RealSense使用系列(一):连接双摄像头

2,052 阅读2分钟

电脑同时连接RealSense d435i和realse t265,读取d435i的imu数据,RGB图像和深度图像,读取t265的位置。

使用pyrealsense2连接

使用pyrealsense2连接,只需要安装realsense sdk和pyrealsense2。测试失败,可能是sdk存在的bug,官方说后序会跟进问题,代码如下

import pyrealsense2 as rs
import numpy as np
import os

# cannot work

ctx = rs.context()
devices = ctx.query_devices()
align = rs.align(rs.stream.color)
depth_scale = None
pipelines = {}
for dev in devices:
    # get the name
    dev_name = dev.get_info(rs.camera_info.name)
    if "D435I" in dev_name:
        pipeline = rs.pipeline(ctx)
        cfg = rs.config()
        cfg.enable_device(dev.get_info(rs.camera_info.serial_number))
        print(dev.get_info(rs.camera_info.serial_number))
        cfg.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        cfg.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
        profile = pipeline.start(cfg)
        depth_scale = profile.get_device().first_depth_sensor().get_depth_scale()
        pipelines["D435I"] = pipeline
    elif "T265" in dev_name:
        pipeline = rs.pipeline(ctx)
        cfg = rs.config()
        cfg.enable_device(dev.get_info(rs.camera_info.serial_number))
        print(dev.get_info(rs.camera_info.serial_number))
        cfg.enable_stream(rs.stream.pose)
        profile = pipeline.start(cfg)
        pipelines["T265"] = pipeline
    else:
        print("detect extra camera")

try:
    while True:
        # D453i info
        frames = pipelines["D435I"].wait_for_frames()
        aligned_frames = align.process(frames)
        aligned_depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()
        if not aligned_depth_frame or not color_frame:
            continue

        # get the distance to image center
        depth_image = np.asanyarray(aligned_depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())
        depth_image = depth_image * depth_scale * 1000
        width = aligned_depth_frame.get_width()
        height = aligned_depth_frame.get_height()
        dist_to_center = aligned_depth_frame.get_distance(int(width / 2), int(height / 2))

        # T265 info
        frames = pipelines["T265"].wait_for_frames()
        pose = frames.get_pose_frame()

        if pose and dist_to_center:
            data = pose.get_pose_data()
            if pose.frame_number % 100 == 0:
                print("Frame #{}".format(pose.frame_number))
                print("Position: {}".format(data.translation))
                print("Distance: {}".format(dist_to_center))
finally:
    for name in pipelines:
        pipelines[name].stop()


使用rospy连接

使用ros连接,需要ros和realsense sdk和rospy。安装ros后,使用roslaunch连接2个摄像头,然后使用rospy进行读取, realsense-ros的使用参考连接。完成安装后修改launch文件,在realsense2_camera/launch下创建rs_d400_and_t265_v2.launch。

<launch>
  <arg name="device_type_camera1"    		default="t265"/>
  <arg name="device_type_camera2"    		default="d4.5"/>		<!-- Note: using regular expression. match D435, D435i, D415... -->
  <arg name="serial_no_camera1"    			default=""/>
  <arg name="serial_no_camera2"    			default=""/>
  <arg name="camera1"              			default="t265"/>
  <arg name="camera2"              			default="d400"/>
  <arg name="tf_prefix_camera1"         default="$(arg camera1)"/>
  <arg name="tf_prefix_camera2"         default="$(arg camera2)"/>
  <arg name="initial_reset"             default="false"/>
  <arg name="enable_fisheye"            default="false"/>
  <arg name="color_width"               default="640"/>
  <arg name="color_height"              default="480"/>
  <arg name="depth_width"               default="640"/>
  <arg name="depth_height"              default="480"/>
  <arg name="clip_distance"             default="-2"/>
  <arg name="topic_odom_in"             default="odom_in"/>
  <arg name="calib_odom_file"           default=""/>
  <arg name="enable_gyro"				default="true"/>
  <arg name="enable_accel"				default="true"/>

  <group ns="$(arg camera1)">
    <include file="$(find realsense2_camera)/launch/includes/nodelet.launch.xml">
      <arg name="device_type"           value="$(arg device_type_camera1)"/>
      <arg name="serial_no"             value="$(arg serial_no_camera1)"/>
      <arg name="tf_prefix"         		value="$(arg tf_prefix_camera1)"/>
      <arg name="initial_reset"         value="$(arg initial_reset)"/>
      <arg name="enable_fisheye1"       value="$(arg enable_fisheye)"/>
      <arg name="enable_fisheye2"       value="$(arg enable_fisheye)"/>
      <arg name="topic_odom_in"         value="$(arg topic_odom_in)"/>
      <arg name="calib_odom_file"       value="$(arg calib_odom_file)"/>
      <arg name="enable_pose"           value="true"/>
    </include>
  </group>

  <group ns="$(arg camera2)">
    <include file="$(find realsense2_camera)/launch/includes/nodelet.launch.xml">
      <arg name="device_type"           value="$(arg device_type_camera2)"/>
      <arg name="serial_no"             value="$(arg serial_no_camera2)"/>
      <arg name="tf_prefix"		          value="$(arg tf_prefix_camera2)"/>
      <arg name="initial_reset"         value="$(arg initial_reset)"/>
      <arg name="align_depth"           value="true"/>
      <arg name="filters"               value="pointcloud"/>
      <arg name="color_width"           value="$(arg color_width)"/>
      <arg name="color_height"          value="$(arg color_height)"/>
      <arg name="depth_width"           value="$(arg depth_width)"/>
      <arg name="depth_height"          value="$(arg depth_height)"/>
      <arg name="clip_distance"         value="$(arg clip_distance)"/>
	  <arg name="enable_gyro"			value="$(arg enable_gyro)"/>
	  <arg name="enable_accel"			value="$(arg enable_accel)"/>
    </include>
  </group>
  <node pkg="tf" type="static_transform_publisher" name="t265_to_d400" args="0 0 0 0 0 0 /$(arg tf_prefix_camera1)_link /$(arg tf_prefix_camera2)_link 100"/>
</launch>

启动节点

roslaunch realsense2_camera rs_d400_and_t265_v2.launch

查看topic如下

其中需要的是

  • /d400/color/image_raw
  • /d400/aligned_depth_to_color/image_raw
  • /d400/gyro/sample
  • /d400/accel/sample
  • /t265/odom/sample

读取上述topic,使用ApproximateTimeSynchronizer进行时间对齐,代码如下

import sys
import time
import os
import rospy
from sensor_msgs.msg import Image as msg_Image
from sensor_msgs.msg import Imu as msg_Imu
from nav_msgs.msg import Odometry as msg_Odometry
import numpy as np
from cv_bridge import CvBridge, CvBridgeError
import argparse
import message_filters
import cv2
import matplotlib.pyplot as plt


class CWaitForMessage:
    def __init__(self, opt):
        self.result = None
        self.timeout = opt.timeout
        self.seq = opt.seq
        self.node_name = "rs2_listener"
        self.bridge = CvBridge()
        self.listener = None
        self.prev_msg_time = 0
        self.fout = None
        self.func_data = dict()

    def callback(self, img_data, depth_data, gyro_data, accel_data, pos_data):
        cv_color_img = self.bridge.imgmsg_to_cv2(img_data, img_data.encoding)
        cv2.imshow("RGB", cv2.cvtColor(cv_color_img, cv2.COLOR_RGB2BGR))
        cv_depth_img = self.bridge.imgmsg_to_cv2(depth_data, depth_data.encoding)
        cv2.imshow("depth", cv_depth_img)
        key = cv2.waitKey(1)
        # Press esc or "q" to close the image window
        if key & 0xFF == ord("q") or key == 27:
            cv2.destroyAllWindows()
            os._exit(0)
        print("gryo angular_velocity")
        print(gyro_data.angular_velocity)
        print("accel linear_acceleration")
        print(accel_data.linear_acceleration)
        print("pos")
        print(pos_data.pose)

    def wait_for_messages(self):
        print("connect to ROS with name: %s" % self.node_name)
        rospy.init_node(self.node_name, anonymous=True)
        img_sub = message_filters.Subscriber("/d400/color/image_raw", msg_Image)
        depth_sub = message_filters.Subscriber("/d400/aligned_depth_to_color/image_raw", msg_Image)
        gyro_sub = message_filters.Subscriber("/d400/gyro/sample", msg_Imu)
        accel_sub = message_filters.Subscriber("/d400/accel/sample", msg_Imu)
        pos_sub = message_filters.Subscriber("/t265/odom/sample", msg_Odometry)
        # topic synchronization by time
        ts = message_filters.ApproximateTimeSynchronizer([img_sub, depth_sub, gyro_sub, accel_sub, pos_sub],
                                                         queue_size=10, slop=0.5, allow_headerless=True)
        ts.registerCallback(self.callback)
        rospy.spin()


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--wanted_topic", type=str, default="colorStream", help="the topic to listen")
    parser.add_argument("--seq", type=str, default="", help="the sequential number for device")
    parser.add_argument("--timeout", type=float, default=1, help="timeout value")
    opt = parser.parse_args()

    msg_retriever = CWaitForMessage(opt)
    msg_retriever.wait_for_messages()

上述python代码支持python,由于cv_bridge不支持python3,可能存在一定问题。

参考资料

realsense-ros安裝