Apollo中三種相機外參的可視化分析
- 一、什么是相機外參?為什么需要可視化?
- 二、不同外參來源對比
- 三、詳細操作步驟
- 1. 環境準備
- 2. 獲取 `NuScenes`外參數據
- 3. 外參到空間位置的轉換及可視化
- 四、可視化對比
- 1. `NuScenes`數據集外參
- 2. Apollo BEV模型外參
- 3. Apollo園區版外參
- 五、關鍵結論與應用
一、什么是相機外參?為什么需要可視化?
在自動駕駛系統中,相機外參描述了相機在車輛坐標系中的位置和朝向。它包含兩個關鍵信息:
- 位置:相機相對于車輛中心(通常是激光雷達位置)的坐標 (x, y, z)
- 朝向:相機的旋轉角度(通常用四元數表示)
可視化相機外參的重要性在于:
- 驗證標定質量:直觀檢查相機位置和朝向是否符合物理布局
- 檢測標定錯誤:發現位置偏移或方向錯誤等重大問題
- 理解感知系統:幫助理解不同相機視角的覆蓋范圍
- 多傳感器融合:確保相機和激光雷達的空間對齊關系正確
二、不同外參來源對比
本次分析對比了三種不同來源的外參數據:
-
NuScenes數據集外參
- 來源:公開數據集
v1.0-mini
- 特點:標準車輛坐標系,相機布局規范
- 來源:公開數據集
-
Apollo BEV模型自帶外參
- 來源:
camera_detection_bev
模型 - 特點:針對特定感知模型優化
- 來源:
-
Apollo園區版外參
- 來源:
nuscenes_165
校準數據 - 特點:Apollo實際部署使用的參數【懷疑是非真實的】
- 來源:
三、詳細操作步驟
1. 環境準備
nuscenes-devkit 1.1.11 # NuScenes數據集解析工具
numpy 1.26.0
opencv-contrib-python 4.12.0.88
opencv-python 4.9.0.80
opencv-python-headless 4.9.0.80
2. 獲取 NuScenes
外參數據
cat > get_nuscenes_extrinsics.py <<-'EOF'
import numpy as np
from nuscenes.nuscenes import NuScenesdef get_nuscenes_extrinsics(nusc, sample_token):"""獲取6個相機的變換矩陣和位置"""sample = nusc.get('sample', sample_token)camera_channels = ["CAM_FRONT", "CAM_FRONT_RIGHT", "CAM_BACK_RIGHT","CAM_BACK", "CAM_BACK_LEFT", "CAM_FRONT_LEFT"]extrinsics = {}positions = {}rotations = {}directions = {}print("相機數據 (名稱, 四元數(w,x,y,z), 位置(x,y,z))")print("[")for channel in camera_channels:camera_data = nusc.get('sample_data', sample['data'][channel])calib_sensor = nusc.get('calibrated_sensor', camera_data['calibrated_sensor_token'])rotation = np.array(calib_sensor['rotation'])trans = np.array(calib_sensor['translation'])print(f"[\"{channel:16s}\",[{rotation[0]:>7.4e},{rotation[1]:>7.4e},{rotation[2]:>7.4e},{rotation[3]:>7.4e}],[{trans[0]:>7.4f},{trans[1]:>7.4e},{trans[2]:>7.4e}]],")print("]")
dataroot = './' # 替換為你的數據集路徑
nusc = NuScenes(version='v1.0-mini', dataroot=dataroot, verbose=False)
sample_token = nusc.sample[0]['token']
get_nuscenes_extrinsics(nusc, sample_token)
EOF# 執行腳本(需提前下載數據集)
python get_nuscenes_extrinsics.py
關鍵步驟解釋:
- 連接NuScenes數據庫獲取標定數據
- 提取6個相機的四元數旋轉參數和平移向量
- 格式化輸出外參矩陣(位置+旋轉)
輸出
相機數據 (名稱, 四元數(w,x,y,z), 位置(x,y,z))
[
["CAM_FRONT ",[4.9980e-01,-5.0303e-01,4.9978e-01,-4.9737e-01],[ 1.7008,1.5946e-02,1.5110e+00]],
["CAM_FRONT_RIGHT ",[2.0603e-01,-2.0269e-01,6.8245e-01,-6.7136e-01],[ 1.5508,-4.9340e-01,1.4957e+00]],
["CAM_BACK_RIGHT ",[1.2281e-01,-1.3240e-01,-7.0043e-01,6.9050e-01],[ 1.0149,-4.8057e-01,1.5624e+00]],
["CAM_BACK ",[5.0379e-01,-4.9740e-01,-4.9419e-01,5.0455e-01],[ 0.0283,3.4514e-03,1.5791e+00]],
["CAM_BACK_LEFT ",[6.9242e-01,-7.0316e-01,-1.1648e-01,1.1203e-01],[ 1.0357,4.8480e-01,1.5910e+00]],
["CAM_FRONT_LEFT ",[6.7573e-01,-6.7363e-01,2.1214e-01,-2.1123e-01],[ 1.5239,4.9463e-01,1.5093e+00]],
]
3. 外參到空間位置的轉換及可視化
cat > infer_camera_pos_by_extrinsics.py <<-'EOF'
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d, Axes3D
import json
from scipy.spatial.transform import Rotation as R
from pyquaternion import Quaternion
from collections import OrderedDict
import yaml# 自定義3D箭頭類
class Arrow3D(FancyArrowPatch):def __init__(self, xs, ys, zs, *args, **kwargs):super().__init__((0,0), (0,0), *args, **kwargs)self._verts3d = xs, ys, zsdef do_3d_projection(self, renderer=None):xs3d, ys3d, zs3d = self._verts3dxs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, self.axes.M)self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))return min(zs)# 四元數轉旋轉矩陣函數
def quaternion_to_rotation_matrix(translation, rotation):"""1.輸入是從相機坐標系到車輛坐標系將四元數轉換為3x3旋轉矩陣,并調整平移部分"""q = Quaternion(rotation) # 注意參數順序:w,x,y,zR_w2c = q.rotation_matrix # 世界坐標系到相機坐標系的旋轉# 計算相機中心在世界坐標系中的位置: C = -R^T * TT = np.array(translation)C = -R_w2c.T @ T# 構建從相機坐標系到世界坐標系的變換矩陣transformation_matrix = np.eye(4)transformation_matrix[:3, :3] = R_w2c # 旋轉部分transformation_matrix[:3, 3] = C # 平移部分: 相機中心在世界坐標系中的位置return transformation_matrix# 四元數轉旋轉矩陣函數
def quaternion_to_rotation_matrix_apollo(translation, rotation):"""將四元數轉換為3x3旋轉矩陣,并調整平移部分"""q = Quaternion(rotation) # 注意參數順序:w,x,y,zR_w2c = q.rotation_matrix # 世界坐標系到相機坐標系的旋轉# 計算相機中心在世界坐標系中的位置: C = -R^T * TT = np.array(translation)C = -R_w2c.T @ T# 構建從相機坐標系到世界坐標系的變換矩陣transformation_matrix = np.eye(4)transformation_matrix[:3, :3] = R_w2c # 旋轉部分transformation_matrix[:3, 3] = C # 平移部分: 相機中心在世界坐標系中的位置return transformation_matrixcam_names = ["CAM_FRONT","CAM_FRONT_RIGHT","CAM_FRONT_LEFT","CAM_BACK","CAM_BACK_LEFT","CAM_BACK_RIGHT"]def gen_bev_kdata_from_nuscenes_extrinsics(extrinsics_data):'''通過nuscenes_extrinsics外參生成bev需要的外參矩陣(6,4,4)'''cameras = {} for val in json.loads(extrinsics_data):name,rotation,translation=valname=name.strip()cameras[name]={"translation":translation,"rotation":rotation}with open("nuscenes_extrinsics.txt","w") as f:f.write("[\n")for name in cam_names:cam =cameras[name] extrinsic = quaternion_to_rotation_matrix(cam["translation"],cam["rotation"])for line in extrinsic:print(line)f.write(",".join([f"{x:.8e}" for x in line])+",\n")f.write("]\n")def gen_bev_kdata_from_apollo_nuscenes_165():'''通過apollo的nuscenes_165外參生成bev需要的外參矩陣'''print("相機數據 (名稱, 四元數(w,x,y,z), 位置(x,y,z))")with open("apollo_nuscenes_165.txt","w") as f:f.write("[\n")for name in cam_names:path=f"camera_params/{name}_extrinsics.yaml"with open(path, 'r',encoding="utf-8") as fi:config = yaml.safe_load(fi)extrinsic=config['transform']translation=extrinsic['translation']rotation=extrinsic['rotation'] rotation=[rotation['w'], rotation['x'], rotation['y'], rotation['z']]trans=[translation['x'], translation['y'], translation['z']]print(f"[\"{name:16s}\",[{rotation[0]:>7.4e},{rotation[1]:>7.4e},{rotation[2]:>7.4e},{rotation[3]:>7.4e}],[{trans[0]:>7.4f},{trans[1]:>7.4e},{trans[2]:>7.4e}]],")extrinsic = quaternion_to_rotation_matrix(trans,rotation)for line in extrinsic:f.write(",".join([f"{x:.8e}" for x in line])+",\n")f.write("]\n")def main(ext_params,name):ext_params = ext_params.reshape(6, 4, 4)# 創建3D圖形fig = plt.figure(figsize=(14, 10))ax = fig.add_subplot(111, projection='3d')ax.set_title(f'Camera Positions Relative to LiDAR({name})', fontsize=16)# 繪制LiDAR原點ax.scatter([0], [0], [0], c='red', s=100, label='LiDAR Origin')# 相機顏色映射colors = {"CAM_FRONT": "blue","CAM_FRONT_RIGHT": "green","CAM_FRONT_LEFT": "cyan","CAM_BACK": "red","CAM_BACK_LEFT": "magenta","CAM_BACK_RIGHT": "yellow"}# 處理每個相機for i, matrix in enumerate(ext_params):# 提取數據name = cam_names[i]R = matrix[:3, :3] # 旋轉矩陣pos = matrix[:3, 3] # 平移向量cam_pos=poscam_pos= - R @ cam_pos# 計算相機朝向向量 (Z軸方向)direction = R @ np.array([0, 0, 1])# 繪制相機位置ax.scatter(cam_pos[0], cam_pos[1], cam_pos[2], c=colors[name], s=80, label=name)# 繪制相機朝向箭頭arrow = Arrow3D([cam_pos[0], cam_pos[0] + direction[0]*0.4],[cam_pos[1], cam_pos[1] + direction[1]*0.4],[cam_pos[2], cam_pos[2] + direction[2]*0.4],mutation_scale=15, arrowstyle="-|>", color=colors[name], linewidth=2)ax.add_artist(arrow)# 添加文本標簽ax.text(cam_pos[0], cam_pos[1], cam_pos[2] + 0.1, name, fontsize=6)# 設置坐標軸ax.set_xlabel('X Axis (Front-Back)', fontsize=12)ax.set_ylabel('Y Axis (Left-Right)', fontsize=12)ax.set_zlabel('Z Axis (Height)', fontsize=12)# 設置等比例坐標軸max_range = 2 #np.array([max(abs(p) for cam in cameras for p in cam["translation"])]).max() * 1.5ax.set_xlim(-max_range, max_range)ax.set_ylim(-max_range, max_range)ax.set_zlim(-max_range, max_range)# 添加圖例和網格ax.legend(loc='upper right', fontsize=10)ax.grid(True)# 調整視角以便觀察ax.view_init(elev=25, azim=-45)plt.tight_layout()plt.show()# apollo bev自帶的k_data modules/perception/camera_detection_bev/detector/petr/bev_obstacle_detector.h
apollo_bev_kdata = np.array([-1.40307297e-03, 9.07780395e-06, 4.84838307e-01, -5.43047376e-02,-1.40780103e-04, 1.25770375e-05, 1.04126692e+00, 7.67668605e-01,-1.02884378e-05, -1.41007011e-03, 1.02823459e-01, -3.07415128e-01,0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00,-9.39000631e-04, -7.65239349e-07, 1.14073277e+00, 4.46270645e-01,1.04998052e-03, 1.91798881e-05, 2.06218868e-01, 7.42717385e-01,1.48074005e-05, -1.40855671e-03, 7.45946690e-02, -3.16081315e-01,0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00,-7.0699735e-04, 4.2389297e-07, -5.5183989e-01, -5.3276348e-01,-1.2281288e-03, 2.5626015e-05, 1.0212017e+00, 6.1102939e-01,-2.2421273e-05, -1.4170362e-03, 9.3639769e-02, -3.0863306e-01,0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 1.0000000e+00,2.2227580e-03, 2.5312484e-06, -9.7261822e-01, 9.0684637e-02,1.9360810e-04, 2.1347081e-05, -1.0779887e+00, -7.9227984e-01,4.3742721e-06, -2.2310747e-03, 1.0842450e-01, -2.9406491e-01,0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 1.0000000e+00,5.97175560e-04, -5.88774265e-06, -1.15893924e+00, -4.49921310e-01,-1.28312141e-03, 3.58297058e-07, 1.48300052e-01, 1.14334166e-01,-2.80917516e-06, -1.41527120e-03, 8.37693438e-02, -2.36765608e-01,0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00,3.6048229e-04, 3.8333174e-06, 7.9871160e-01, 4.3321830e-01,1.3671946e-03, 6.7484652e-06, -8.4722507e-01, 1.9411178e-01,7.5027779e-06, -1.4139183e-03, 8.2083985e-02, -2.4505949e-01,0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 1.0000000e+00
])nuscenes_extrinsics_data = """
[
["CAM_FRONT ",[4.9980e-01,-5.0303e-01,4.9978e-01,-4.9737e-01],[ 1.7008,1.5946e-02,1.5110e+00]],
["CAM_FRONT_RIGHT ",[2.0603e-01,-2.0269e-01,6.8245e-01,-6.7136e-01],[ 1.5508,-4.9340e-01,1.4957e+00]],
["CAM_BACK_RIGHT ",[1.2281e-01,-1.3240e-01,-7.0043e-01,6.9050e-01],[ 1.0149,-4.8057e-01,1.5624e+00]],
["CAM_BACK ",[5.0379e-01,-4.9740e-01,-4.9419e-01,5.0455e-01],[ 0.0283,3.4514e-03,1.5791e+00]],
["CAM_BACK_LEFT ",[6.9242e-01,-7.0316e-01,-1.1648e-01,1.1203e-01],[ 1.0357,4.8480e-01,1.5910e+00]],
["CAM_FRONT_LEFT ",[6.7573e-01,-6.7363e-01,2.1214e-01,-2.1123e-01],[ 1.5239,4.9463e-01,1.5093e+00]]
]
"""
gen_bev_kdata_from_nuscenes_extrinsics(nuscenes_extrinsics_data)
with open("nuscenes_extrinsics.txt","r") as f:nuscenes_bev_kdata=np.array(eval(f.read())) gen_bev_kdata_from_apollo_nuscenes_165()
with open("apollo_nuscenes_165.txt","r") as f:apollo_nuscenes_kdata=np.array(eval(f.read())) main(apollo_bev_kdata,"apollo_bev_kdata")
main(nuscenes_bev_kdata,"nuscenes_bev_kdata")
main(apollo_nuscenes_kdata,"apollo_nuscenes_kdata")
EOF
\cp /opt/apollo/neo/share/modules/calibration/data/nuscenes_165/camera_params ./ -rf
python infer_camera_pos_by_extrinsics.py
數學原理:
- 四元數 → 旋轉矩陣:使用
pyquaternion
庫轉換- 相機位置計算:Cworld=?RT?TC_{world} = -R^T \cdot TCworld?=?RT?T
- 最終得到4x4變換矩陣(包含旋轉和平移)
可視化要素:
- 坐標系:X(前/后), Y(左/右), Z(高/低)
- 激光雷達:原點紅色標記
- 相機位置:不同顏色表示不同視角
- 相機朝向:3D箭頭指示拍攝方向
四、可視化對比
參考圖片
1. NuScenes
數據集外參
- 特點:
- 車輛朝向:標準前向(Y軸正方向)
- 相機布局:六相機均勻分布
- 位置對稱性:左右相機位置精確對稱
2. Apollo BEV模型外參
- 特點:
- 車輛朝向:非標準方向(約15度偏轉)
- 相機視角:六相機均勻分布
3. Apollo園區版外參
- 特點:
- 位置正確:相機位置符合車輛布局
- 車輛朝向:朝向X軸,不合理,應該是Y軸
- 朝向錯誤:所有相機均朝向前方(應為各方向)
- 問題原因:可能是標定時未設置正確方向
- 實際影響:導致側面和后方視角失效
相機數據 (名稱, 四元數(w,x,y,z), 位置(x,y,z))
["CAM_FRONT ",[7.0511e-01,-1.7317e-03,-7.0910e-01,2.2896e-03],[-0.0159,1.7008e+00,1.5110e+00]],
["CAM_FRONT_RIGHT ",[6.1737e-01,3.3363e-01,-6.2890e-01,-3.3472e-01],[ 0.4934,1.5508e+00,1.4957e+00]],
["CAM_FRONT_LEFT ",[6.2786e-01,-3.2765e-01,-6.2564e-01,3.2712e-01],[-0.4946,1.5239e+00,1.5093e+00]],
["CAM_BACK ",[2.2658e-03,-7.0116e-01,5.7708e-04,7.1300e-01],[-0.0035,2.8326e-02,1.5791e+00]],
["CAM_BACK_LEFT ",[4.0822e-01,-5.7804e-01,-4.1698e-01,5.7040e-01],[-0.4848,1.0357e+00,1.5910e+00]],
["CAM_BACK_RIGHT ",[3.9507e-01,5.8460e-01,-4.0790e-01,-5.7947e-01],[ 0.4806,1.0149e+00,1.5624e+00]],
五、關鍵結論與應用
-
標定質量驗證:
- 理想情況:相機位置對稱分布,高度一致(如NuScenes數據)
- 危險信號:位置不對稱、高度不一致、朝向錯誤
-
錯誤檢測:
- Apollo園區版外參存在嚴重朝向錯誤
- 通過可視化可快速發現此類基礎錯誤
通過這種可視化方法,即使非專業人員也能直觀理解相機空間關系,快速發現標定中的重大錯誤,顯著提高自動駕駛系統的可靠性。