运行msckf_vio

article/2025/9/17 2:08:05

1、编译

cd ~/msckf
catkin_make --pkg msckf_vio --cmake-args -DCMAKE_BUILD_TYPE=Release

2、运行(运行euroc数据集)
首先roscore开启ros节点

cd ~/msckf
source ~/msckf/devel/setup.bash
roslaunch msckf_vio msckf_vio_euroc.launch
cd ~/msckf
source ~/msckf/devel/setup.bash
rosrun rviz rviz -d /home/dyt/msckf/src/msckf_vio/rviz/rviz_euroc_config.rviz
cd ~/msckf
source ~/msckf/devel/setup.bash
rosbag play /home/dyt/DYT/compare/euroc6/MH_05/data.bag

3、运行(运行tum数据集)
首先roscore开启ros节点

cd ~/msckf
source ~/msckf/devel/setup.bash
roslaunch msckf_vio msckf_vio_tum.launch
cd ~/msckf
source ~/msckf/devel/setup.bash
rosrun rviz rviz -d /home/dyt/msckf/src/msckf_vio/rviz/rviz_tum_config.rviz
cd ~/msckf
source ~/msckf/devel/setup.bash
rosbag play /home/dyt/DYT/compare/tum/room1/data.bag

4、运行(运行kitti数据集)
首先roscore开启ros节点

cd ~/msckf
source ~/msckf/devel/setup.bash
roslaunch msckf_vio msckf_vio_kitti.launch
cd ~/msckf
source ~/msckf/devel/setup.bash
rosrun rviz rviz -d /home/dyt/msckf/src/msckf_vio/rviz/rviz_kitti_config.rviz
cd ~/msckf
source ~/msckf/devel/setup.bash
rosbag play /home/dyt/compare11/kitti/kitti2bag-master/kitti2bag/kitti_2011_10_03_drive_0042_synced.bag

运行KITTI数据集:

1、首先需要处理kitti数据集,使用kitti2bag工具,运行Python指令:

python kitti2bag1.py raw_synced /home/dyt/ -t 2011_10_03 -r 0042

代码如下:

#!env python
# -*- coding: utf-8 -*-import systry:import pykitti
except ImportError as e:print('Could not load module \'pykitti\'. Please run `pip install pykitti`')sys.exit(1)import tf
import os
import cv2
import rospy
import rosbag
import progressbar
from tf2_msgs.msg import TFMessage
from datetime import datetime
from std_msgs.msg import Header
from sensor_msgs.msg import CameraInfo, Imu, PointField, NavSatFix
import sensor_msgs.point_cloud2 as pcl2
from geometry_msgs.msg import TransformStamped, TwistStamped, Transform
from cv_bridge import CvBridge
import numpy as np
import argparsedef save_imu_data(bag, kitti, imu_frame_id, topic):print("Exporting IMU")for timestamp, oxts in zip(kitti.timestamps, kitti.oxts):q = tf.transformations.quaternion_from_euler(oxts.packet.roll, oxts.packet.pitch, oxts.packet.yaw)imu = Imu()imu.header.frame_id = imu_frame_idimu.header.stamp = rospy.Time.from_sec(float(timestamp.strftime("%s.%f")))imu.orientation.x = q[0]imu.orientation.y = q[1]imu.orientation.z = q[2]imu.orientation.w = q[3]imu.linear_acceleration.x = oxts.packet.aximu.linear_acceleration.y = oxts.packet.ayimu.linear_acceleration.z = oxts.packet.azimu.angular_velocity.x = oxts.packet.wximu.angular_velocity.y = oxts.packet.wyimu.angular_velocity.z = oxts.packet.wzbag.write(topic, imu, t=imu.header.stamp)def save_dynamic_tf(bag, kitti, kitti_type, initial_time):print("Exporting time dependent transformations")if kitti_type.find("raw") != -1:for timestamp, oxts in zip(kitti.timestamps, kitti.oxts):tf_oxts_msg = TFMessage()tf_oxts_transform = TransformStamped()tf_oxts_transform.header.stamp = rospy.Time.from_sec(float(timestamp.strftime("%s.%f")))tf_oxts_transform.header.frame_id = 'world'tf_oxts_transform.child_frame_id = 'base_link'transform = (oxts.T_w_imu)t = transform[0:3, 3]q = tf.transformations.quaternion_from_matrix(transform)oxts_tf = Transform()oxts_tf.translation.x = t[0]oxts_tf.translation.y = t[1]oxts_tf.translation.z = t[2]oxts_tf.rotation.x = q[0]oxts_tf.rotation.y = q[1]oxts_tf.rotation.z = q[2]oxts_tf.rotation.w = q[3]tf_oxts_transform.transform = oxts_tftf_oxts_msg.transforms.append(tf_oxts_transform)bag.write('/tf', tf_oxts_msg, tf_oxts_msg.transforms[0].header.stamp)elif kitti_type.find("odom") != -1:timestamps = map(lambda x: initial_time + x.total_seconds(), kitti.timestamps)for timestamp, tf_matrix in zip(timestamps, kitti.T_w_cam0):tf_msg = TFMessage()tf_stamped = TransformStamped()tf_stamped.header.stamp = rospy.Time.from_sec(timestamp)tf_stamped.header.frame_id = 'world'tf_stamped.child_frame_id = 'camera_left't = tf_matrix[0:3, 3]q = tf.transformations.quaternion_from_matrix(tf_matrix)transform = Transform()transform.translation.x = t[0]transform.translation.y = t[1]transform.translation.z = t[2]transform.rotation.x = q[0]transform.rotation.y = q[1]transform.rotation.z = q[2]transform.rotation.w = q[3]tf_stamped.transform = transformtf_msg.transforms.append(tf_stamped)bag.write('/tf', tf_msg, tf_msg.transforms[0].header.stamp)def save_camera_data(bag, kitti_type, kitti, util, bridge, camera, camera_frame_id, topic, initial_time):print("Exporting camera {}".format(camera))if kitti_type.find("raw") != -1:camera_pad = '{0:02d}'.format(camera)image_dir = os.path.join(kitti.data_path, 'image_{}'.format(camera_pad))image_path = os.path.join(image_dir, 'data')image_filenames = sorted(os.listdir(image_path))with open(os.path.join(image_dir, 'timestamps.txt')) as f:image_datetimes = map(lambda x: datetime.strptime(x[:-4], '%Y-%m-%d %H:%M:%S.%f'), f.readlines())calib = CameraInfo()calib.header.frame_id = camera_frame_idcalib.width, calib.height = tuple(util['S_rect_{}'.format(camera_pad)].tolist())calib.distortion_model = 'plumb_bob'calib.K = util['K_{}'.format(camera_pad)]calib.R = util['R_rect_{}'.format(camera_pad)]calib.D = util['D_{}'.format(camera_pad)]calib.P = util['P_rect_{}'.format(camera_pad)]elif kitti_type.find("odom") != -1:camera_pad = '{0:01d}'.format(camera)image_path = os.path.join(kitti.sequence_path, 'image_{}'.format(camera_pad))image_filenames = sorted(os.listdir(image_path))image_datetimes = map(lambda x: initial_time + x.total_seconds(), kitti.timestamps)calib = CameraInfo()calib.header.frame_id = camera_frame_idcalib.P = util['P{}'.format(camera_pad)]iterable = zip(image_datetimes, image_filenames)bar = progressbar.ProgressBar()for dt, filename in bar(iterable):image_filename = os.path.join(image_path, filename)cv_image = cv2.imread(image_filename)calib.height, calib.width = cv_image.shape[:2]if camera in (0, 1):cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)encoding = "mono8" if camera in (0, 1) else "bgr8"image_message = bridge.cv2_to_imgmsg(cv_image, encoding=encoding)image_message.header.frame_id = camera_frame_idif kitti_type.find("raw") != -1:image_message.header.stamp = rospy.Time.from_sec(float(datetime.strftime(dt, "%s.%f")))topic_ext = "/image_raw"elif kitti_type.find("odom") != -1:image_message.header.stamp = rospy.Time.from_sec(dt)topic_ext = "/image_rect"calib.header.stamp = image_message.header.stampbag.write(topic + topic_ext, image_message, t = image_message.header.stamp)bag.write(topic + '/camera_info', calib, t = calib.header.stamp) def get_static_transform(from_frame_id, to_frame_id, transform):t = transform[0:3, 3]q = tf.transformations.quaternion_from_matrix(transform)tf_msg = TransformStamped()tf_msg.header.frame_id = from_frame_idtf_msg.child_frame_id = to_frame_idtf_msg.transform.translation.x = float(t[0])tf_msg.transform.translation.y = float(t[1])tf_msg.transform.translation.z = float(t[2])tf_msg.transform.rotation.x = float(q[0])tf_msg.transform.rotation.y = float(q[1])tf_msg.transform.rotation.z = float(q[2])tf_msg.transform.rotation.w = float(q[3])return tf_msgdef inv(transform):"Invert rigid body transformation matrix"R = transform[0:3, 0:3]t = transform[0:3, 3]t_inv = -1 * R.T.dot(t)transform_inv = np.eye(4)transform_inv[0:3, 0:3] = R.Ttransform_inv[0:3, 3] = t_invreturn transform_invdef save_static_transforms(bag, transforms, timestamps):print("Exporting static transformations")tfm = TFMessage()for transform in transforms:t = get_static_transform(from_frame_id=transform[0], to_frame_id=transform[1], transform=transform[2])tfm.transforms.append(t)for timestamp in timestamps:time = rospy.Time.from_sec(float(timestamp.strftime("%s.%f")))for i in range(len(tfm.transforms)):tfm.transforms[i].header.stamp = timebag.write('/tf_static', tfm, t=time)def run_kitti2bag():parser = argparse.ArgumentParser(description = "Convert KITTI dataset to ROS bag file the easy way!")# Accepted argument valueskitti_types = ["raw_synced", "odom_color", "odom_gray"]odometry_sequences = []for s in range(22):odometry_sequences.append(str(s).zfill(2))parser.add_argument("kitti_type", choices = kitti_types, help = "KITTI dataset type")parser.add_argument("dir", nargs = "?", default = os.getcwd(), help = "base directory of the dataset, if no directory passed the deafult is current working directory")parser.add_argument("-t", "--date", help = "date of the raw dataset (i.e. 2011_09_26), option is only for RAW datasets.")parser.add_argument("-r", "--drive", help = "drive number of the raw dataset (i.e. 0001), option is only for RAW datasets.")parser.add_argument("-s", "--sequence", choices = odometry_sequences,help = "sequence of the odometry dataset (between 00 - 21), option is only for ODOMETRY datasets.")args = parser.parse_args()bridge = CvBridge()compression = rosbag.Compression.NONE# compression = rosbag.Compression.BZ2# compression = rosbag.Compression.LZ4# CAMERAScameras = [(0, 'camera_gray_left', '/kitti/camera_gray_left'),(1, 'camera_gray_right', '/kitti/camera_gray_right')]if args.kitti_type.find("raw") != -1:if args.date == None:print("Date option is not given. It is mandatory for raw dataset.")print("Usage for raw dataset: kitti2bag raw_synced [dir] -t <date> -r <drive>")sys.exit(1)elif args.drive == None:print("Drive option is not given. It is mandatory for raw dataset.")print("Usage for raw dataset: kitti2bag raw_synced [dir] -t <date> -r <drive>")sys.exit(1)bag = rosbag.Bag("kitti_{}_drive_{}_{}.bag".format(args.date, args.drive, args.kitti_type[4:]), 'w', compression=compression)kitti = pykitti.raw(args.dir, args.date, args.drive)if not os.path.exists(kitti.data_path):print('Path {} does not exists. Exiting.'.format(kitti.data_path))sys.exit(1)if len(kitti.timestamps) == 0:print('Dataset is empty? Exiting.')sys.exit(1)try:# IMUimu_frame_id = 'imu_link'imu_topic = '/kitti/oxts/imu'T_base_link_to_imu = np.eye(4, 4)T_base_link_to_imu[0:3, 3] = [-2.71/2.0-0.05, 0.32, 0.93]# tf_statictransforms = [('base_link', imu_frame_id, T_base_link_to_imu),(imu_frame_id, cameras[0][1], inv(kitti.calib.T_cam0_imu)),(imu_frame_id, cameras[1][1], inv(kitti.calib.T_cam1_imu))]util = pykitti.utils.read_calib_file(os.path.join(kitti.calib_path, 'calib_cam_to_cam.txt'))# Exportsave_static_transforms(bag, transforms, kitti.timestamps)save_dynamic_tf(bag, kitti, args.kitti_type, initial_time=None)save_imu_data(bag, kitti, imu_frame_id, imu_topic)for camera in cameras:save_camera_data(bag, args.kitti_type, kitti, util, bridge,camera=camera[0], camera_frame_id=camera[1], topic=camera[2], initial_time=None)finally:print("## OVERVIEW ##")print(bag)bag.close()elif args.kitti_type.find("odom") != -1:if args.sequence == None:print("Sequence option is not given. It is mandatory for odometry dataset.")print("Usage for odometry dataset: kitti2bag {odom_color, odom_gray} [dir] -s <sequence>")sys.exit(1)bag = rosbag.Bag("kitti_data_odometry_{}_sequence_{}.bag".format(args.kitti_type[5:],args.sequence), 'w', compression=compression)kitti = pykitti.odometry(args.dir, args.sequence)if not os.path.exists(kitti.sequence_path):print('Path {} does not exists. Exiting.'.format(kitti.sequence_path))sys.exit(1)kitti.load_calib()         kitti.load_timestamps() if len(kitti.timestamps) == 0:print('Dataset is empty? Exiting.')sys.exit(1)if args.sequence in odometry_sequences[:11]:print("Odometry dataset sequence {} has ground truth information (poses).".format(args.sequence))kitti.load_poses()try:util = pykitti.utils.read_calib_file(os.path.join(args.dir,'sequences',args.sequence, 'calib.txt'))current_epoch = (datetime.utcnow() - datetime(1970, 1, 1)).total_seconds()# Exportif args.kitti_type.find("gray") != -1:used_cameras = cameras[:2]elif args.kitti_type.find("color") != -1:used_cameras = cameras[-2:]save_dynamic_tf(bag, kitti, args.kitti_type, initial_time=current_epoch)for camera in used_cameras:save_camera_data(bag, args.kitti_type, kitti, util, bridge, camera=camera[0], camera_frame_id=camera[1], topic=camera[2], initial_time=current_epoch)finally:print("## OVERVIEW ##")print(bag)bag.close()
if __name__ == '__main__':run_kitti2bag()

查看topic:

rosbag info kitti_2011_10_03_drive_0042_synced.bag

在这里插入图片描述
2、在msckf_vio里面新建launch文件和yaml文件,注意旋转矩阵需要修改。


http://chatgpt.dhexx.cn/article/wegWZv7C.shtml

相关文章

深蓝学院-手写VIO作业-第二章

文章目录 一、基础作业&#xff0c;必做环境配置说明a. ROS环境搭建b. Matlab安装 1、设置IMU 仿真代码中的不同的参数&#xff0c;生成Allen 方差标定曲线。a. 对于非ROS&#xff1a;生成运动imu数据b.对于ROS: 专门生成静止 imu 数据&#xff0c;用于 allan 方差标定 2、将IM…

【代码阅读】PL-VIO

〇、写在前面 PL-VIO采用的通信是ROS&#xff0c;所以并不能像ORBSLAM那样按照执行顺序来理顺&#xff0c;因为ORBSLAM是有一个真正意义上的主函数的&#xff0c;经过CMakeList的编辑产生的可执行文件会有一个开始&#xff0c;但是PL-VIO用的是ROS&#xff0c;其内部通信是节点…

VIO:飞行机器人单目VIO算法测评

转&#xff1a;https://blog.csdn.net/u012348774/article/details/81414264 泡泡图灵智库&#xff0c;带你精读机器人顶级会议文章 标题&#xff1a;A Benchmark Comparison of Monocular Visual-Inertial Odometry Algorithms for Flying Robots 作者&#xff1a;Jeffrey De…

VIO学习笔记一

1. IMU&#xff08;Inertial Measurement Unit&#xff0c;惯性测量单元&#xff09; 典型6轴IMU以较高频率&#xff08;≥100Hz&#xff09;返回被测量物体的角速度与加速度。受自身温度、零偏、振动等因素干扰&#xff0c;积分得到的平移和旋转容易漂移。IMU本身由一个陀螺仪…

VIO仿真

使用turtelbot3仿真&#xff0c;发现gazebo的imu没有重力加速度。放弃。还是使用公开数据集。 使用这个仿真​​​​​​vio_data_simulation/src at ros_version HeYijia/vio_data_simulation GitHub 看一下如何用这个仿真跑起来vio。 -- 将特征点反投回图像&#xff0c;…

3.4.1 VIO虚拟以太网原理

最后更新2021/08/12 VIO Server在此完全实现了一个标准的以太网交换机的功能&#xff0c;现在业界都有了高大上的名称&#xff1a;SDN&#xff08;Software Defined Network&#xff09;&#xff0c;如果没有足够的网络知识&#xff08;幸好只是网络链路层&#xff09;&#x…

Vivado调用VIO核

文章目录 前言一、IP核的介绍二、VIO核1.作用2.调用方法 总结 前言 提示&#xff1a;本篇文章所使用的软件为Vivado2018.3&#xff1a; 以四选一数据选择器为例&#xff0c;使用verilog hdl语言以及Vivado自带的VIO,IP来实现功能 提示&#xff1a;以下是本篇文章正文内容&…

海思3519 VIO Sample例程讲解

海思VIO Sample例程讲解 海思SDK解压出来后&#xff0c;Sample包含各个功能模块的历程&#xff0c;本篇讲解VIO Sample历程。 进入VIO模块可以看到&#xff0c;VIO的main函数文件&#xff0c;先从main函数执行程序。 进入文件后首先看下VIO实现的功能&#xff0c;可以看到VIO…

PL-VIO论文阅读

PL-VIO: Tightly-Coupled Monocular Visual–Inertial Odometry Using Point and Line Features Yijia He 1,2,* , Ji Zhao 3, Yue Guo 1,2, Wenhao He 1 and Kui Yuan 1 2018 摘要 To address the problem of estimating camera trajectory and to build a structural 3D m…

DM-VIO简析

今天主要是针对DMVIO/DM-VIO的简析&#xff0c;中文网上有的东西都太少了&#xff0c;只能靠看完论文和组员们一起改代码。Lukas组这个东西在中文网被称为有史以来最好的VIO&#xff0c;但是实际过程中我们还是发现了许多不完美的地方。。。(比如ZUPT更新改造中该有的问题仍然在…

VIOSLAM 综述

文章目录 1.VIO 松耦合/紧耦合。2. 相机和IMU的缺点及互补性3. VIO融合算法流程及其模块分解:4. VIO 算法核心:5. 实验结果与总结:6. 参考文献: 1.VIO 松耦合/紧耦合。 Visual-Inertial Odometry&#xff08;VIO&#xff09;即视觉惯性里程计&#xff0c;有时也叫视觉惯性系统…

VIO系统介绍

VIO&#xff08;visual-inertial odometry&#xff09;即视觉惯性里程计&#xff0c;有时也叫视觉惯性系统&#xff08;VINS&#xff0c;visual-inertial system&#xff09;&#xff0c;是融合相机和IMU数据实现SLAM的算法&#xff0c;根据融合框架的区别又分为紧耦合和松耦合…

vivado VIO (virtual input output)虚拟IO的使用

转自&#xff1a;https://blog.csdn.net/wordwarwordwar/article/details/77150930 一般情况下ILA和VIO都是用在chipscope上使用&#xff0c;VIO可以作为在chipscope时模拟IO。 譬如&#xff1a; 在使用chipscope时需要使用按键出发&#xff0c;但是没有设计按键或者板子不再身…

【Vivado那些事儿】-VIO原理及应用

虚拟输入输出&#xff08;Virtual Input Output,VIO)核是一个可定制的IP核&#xff0c;它可用于实时监视和驱动内部FPGA的信号&#xff0c;如图所示。 可以定制VIO的输入和输出端口的数量与宽度&#xff0c;用于和FPGA设计进行连接。由于VIO核与被监视和驱动的设计同步&#xf…

python logger.exception_Python logging设置和logger解析

一、logging模块讲解 1.函数:logging.basicConfig() 参数讲解: (1)level代表高于或者等于这个值时,那么我们才会记录这条日志 (2)filename代表日志会写在这个文件之中,如果没有这个字段则会显示在控制台上 (3)format代表我们的日志显示的格式自定义,如果字段为空,那么默认…

Logger 基本用法

Logger 基本用法 简介 Simple, pretty and powerful logger for android 为Android提供的&#xff0c;简单、强大而且格式美观的工具 本质就是封装系统提供的Log类&#xff0c;加上一些分割线易于查找不同的Log&#xff1b;logcat中显示的信息可配置。最初的样子如下图 包含…

【Logback】<logger>、<root>标签详解

文章目录 背景一、\<logger>使用1.1、使用示例1.1、属性配置说明 & 演示1.1.1、name1.1.2、level1.1.3、additivity1.1.3.1、效果演示&#xff1a;additivitytrue1.1.3.1、效果演示&#xff1a;additivity"false" 1.2 appender-ref 二、\<root>使用2…

python之logger

import logging import os.path import time def test_log():"""指定保存日志的文件路径&#xff0c;日志级别&#xff0c;以及调用文件将日志存入到指定的文件中:paramlogger:"""# 创建一个loggerlogger logging.getLogger()logger.setLevel(l…

Python中logger日志模块详解

1 logging模块简介 logging模块是Python内置的标准模块&#xff0c;主要用于输出运行日志&#xff0c;可以设置输出日志的等级、日志保存路径、日志文件回滚等&#xff1b;相比print&#xff0c;具备如下优点&#xff1a; 可以通过设置不同的日志等级&#xff0c;在release版…

logger:一款管理日志的Python神器

最近要新开一个项目&#xff0c;需要配个 logger 来管理日志&#xff0c;今天分享一下&#xff0c;喜欢记得点赞、关注、收藏。 【注】文末提供交流互助群 import logging ori_logger logging.getLogger(custom_logger) ori_logger.setLevel(logging.INFO) ori_logger.addHa…