本文已参与「新人创作礼」活动,一起开启掘金创作之路。
1、用于测试的数据,已经上传到CSDN上:
2、代码
'''
项目名称:
项目内容:
参考来源:
https://blog.csdn.net/qq_36187544/article/details/102626629
https://blog.csdn.net/weixin_30686845/article/details/99054389
https://blog.csdn.net/originalcandy/article/details/84834991
'''
__Author__ = "Shliang"
__Email__ = "shliang0603@gmail.com"
import cv2
import numpy as np
from pyquaternion import Quaternion
translation_vector = np.array([0.06, -0.08, 0.196])
rotation_x = Quaternion(axis=(1.0, 0.0, 0.0), degrees=-3.074572889)
rotation_y = Quaternion(axis=(0.0, 1.0, 0.0), degrees=-71.973162002)
rotation_z = Quaternion(axis=(0.0, 0.0, 1.0), degrees=93.232960952)
Tcl = np.dot(rotation_z.rotation_matrix, rotation_y.rotation_matrix)
Tcl = np.dot(Tcl, rotation_x.rotation_matrix)
qcl = Quaternion(matrix=Tcl)
print("qcl: ", qcl)
print("qcl.axis(): ", qcl.axis)
rotation_vector = qcl.angle * qcl.axis
print("Tcl", Tcl)
print("rotation_vector", rotation_vector)
# 3x3
camera_matrix = np.array([[685.64675, 0. , 649.10791],
[0. , 676.65803, 338.05443],
[0. , 0. , 1. ]])
dist_coeffs = np.array([-0.363219, 0.093818, 0.006178, -0.003714, 0.000000])
# 对图像做畸变矫正
def undistort(frame):
fx = 685.646752
cx = 649.107905
fy = 676.658033
cy = 338.054431
k1, k2, p1, p2, k3 = -0.363219, 0.093818, 0.006178, -0.003714, 0.0
# 相机坐标系到像素坐标系的转换矩阵
k = np.array([
[fx, 0, cx],
[0, fy, cy],
[0, 0, 1]
])
# 畸变系数
d = np.array([
k1, k2, p1, p2, k3
])
h, w = frame.shape[:2]
mapx, mapy = cv2.initUndistortRectifyMap(k, d, None, k, (w, h), 5)
return cv2.remap(frame, mapx, mapy, cv2.INTER_LINEAR)
def read_pointcloud(pointcloud_file):
with open(pointcloud_file) as f:
points = f.readlines()
data = np.zeros((len(points), 3))
for idx, point in enumerate(points):
x, y, z = point.strip().split()
point = np.array([float(x), float(y), float(z)])
data[idx, :] = point
# time.sleep(0.1)
# print(f"point data of {idx+1}: {point}")
print(data.shape)
return data
def read_image(image_path):
image = cv2.imread(image_path)
return image
def draw_points(points):
image = read_image("./1.png")
# 在没有绘制点云的时候就矫正图片,这种提前矫正会导致点云的信息和图片有些对不上,因为矫正后的图像是有裁切的
# image = undistort(image)
for i in range(len(points)):
x, y = points[i]
x, y = int(x), int(y)
# 3D点映射到像素坐标系的2D点后,有些坐标不再画面中的过滤掉
if 0 < x < 1280 and 0 < y < 720:
print(x, y)
image = cv2.circle(image, (x, y), 1, (0, 255, 0), 1)
# 在图像上绘制好点云信息之后,在对绘制后的图像进行矫正,及时裁切,也会同映射的点一起被裁切,也是合理的!
# undistort(image)
cv2.imshow("image", image)
cv2.waitKey(0)
def main():
# 读取3D激光雷达点云
points3D = read_pointcloud('./1.txt')
print(f"./point3D shape: {points3D.shape}")
# 把3D激光雷达点云映射到图像上
points2D_reproj = cv2.projectPoints(points3D, rotation_vector,
translation_vector, camera_matrix, dist_coeffs)[0].squeeze(1)
print(points2D_reproj)
print(f"points2D_reproj shape: {points2D_reproj.shape}, length : {len(points2D_reproj)}")
# 把映射后在像素坐标系下的点坐标绘制出来
draw_points(points2D_reproj)
if __name__ == '__main__':
main()
- 没有做畸变矫正的映射结果
- 在没有绘制映射点之前就做了畸变矫正,这种显然不对,从右边的椅子就可以看出,因为画面提前被裁剪了,但是分辨率又保持不变,这样在裁剪后信息之后在做投影,显然越靠近边缘的点就对不上了!
- 在绘制点之后,再对图像做畸变矫正,即使画面信息被裁剪了,但是点云映射是能和画面对的上的!