diff --git a/ReadMe.md b/ReadMe.md index 1448a49c1..a058e68f4 100644 --- a/ReadMe.md +++ b/ReadMe.md @@ -1,8 +1,11 @@ -# OpenVINS +# OpenVINS (fork) -[![ROS 1 Workflow](https://github.com/rpng/open_vins/actions/workflows/build_ros1.yml/badge.svg)](https://github.com/rpng/open_vins/actions/workflows/build_ros1.yml) -[![ROS 2 Workflow](https://github.com/rpng/open_vins/actions/workflows/build_ros2.yml/badge.svg)](https://github.com/rpng/open_vins/actions/workflows/build_ros2.yml) -[![ROS Free Workflow](https://github.com/rpng/open_vins/actions/workflows/build.yml/badge.svg)](https://github.com/rpng/open_vins/actions/workflows/build.yml) +## Changes from the original repository +* using `/UAV_NAME/` namespace for IMU and camera topic names, setting these topic names in launch files +* flipped the published TF tree to be compatible with the [MRS system TF tree](https://ctu-mrs.github.io/docs/system/frames_of_reference.html) +* custom configs and launch files + +## Original readme Welcome to the OpenVINS project! The OpenVINS project houses some core computer vision code along with a state-of-the art filter-based visual-inertial diff --git a/config/bluefox_imu/estimator_config.yaml b/config/bluefox_imu/estimator_config.yaml new file mode 100644 index 000000000..7f036cd87 --- /dev/null +++ b/config/bluefox_imu/estimator_config.yaml @@ -0,0 +1,126 @@ +%YAML:1.0 # need to specify the file type at the top! + +verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT + +use_fej: true # if first-estimate Jacobians should be used (enable for good consistency) +integration: "rk4" # discrete, rk4, analytical (if rk4 or analytical used then analytical covariance propagation is used) +use_stereo: false # if we have more than 1 camera, if we should try to track stereo constraints between pairs +max_cameras: 1 # how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking) + +calib_cam_extrinsics: true # if the transform between camera and IMU should be optimized R_ItoC, p_CinI +calib_cam_intrinsics: true # if camera intrinsics should be optimized (focal, center, distortion) +calib_cam_timeoffset: true # if timeoffset between camera and IMU should be optimized +calib_imu_intrinsics: false # if imu intrinsics should be calibrated (rotation and skew-scale matrix) +calib_imu_g_sensitivity: false # if gyroscope gravity sensitivity (Tg) should be calibrated + +max_clones: 11 # how many clones in the sliding window +max_slam: 100 # number of features in our state vector +max_slam_in_update: 25 # update can be split into sequential updates of batches, how many in a batch +max_msckf_in_update: 50 # how many MSCKF features to use in the update +dt_slam_delay: 3 # delay before initializing (helps with stability from bad initialization...) + +gravity_mag: 9.81 # magnitude of gravity in this location + +feat_rep_msckf: "GLOBAL_3D" +feat_rep_slam: "ANCHORED_MSCKF_INVERSE_DEPTH" +feat_rep_aruco: "ANCHORED_MSCKF_INVERSE_DEPTH" +# feat_rep_slam: "ANCHORED_FULL_INVERSE_DEPTH" +# feat_rep_aruco: "ANCHORED_FULL_INVERSE_DEPTH" + +# zero velocity update parameters we can use +# we support either IMU-based or disparity detection. +try_zupt: false +zupt_chi2_multipler: 0 # set to 0 for only disp-based +zupt_max_velocity: 0.1 +zupt_noise_multiplier: 50 +zupt_max_disparity: 1.5 # set to 0 for only imu-based +zupt_only_at_beginning: true + +# ================================================================== +# ================================================================== + +init_window_time: 2.0 # how many seconds to collect initialization information +# init_window_time: 2.0 # how many seconds to collect initialization information +init_imu_thresh: 0.3 # threshold for variance of the accelerometer to detect a "jerk" in motion +init_max_disparity: 2.0 # max disparity to consider the platform stationary (dependent on resolution) +init_max_features: 75 # how many features to track during initialization (saves on computation) + +init_dyn_use: false # if dynamic initialization should be used +init_dyn_mle_opt_calib: false # if we should optimize calibration during intialization (not recommended) +init_dyn_mle_max_iter: 50 # how many iterations the MLE refinement should use (zero to skip the MLE) +init_dyn_mle_max_time: 0.05 # how many seconds the MLE should be completed in +init_dyn_mle_max_threads: 6 # how many threads the MLE should use +init_dyn_num_pose: 6 # number of poses to use within our window time (evenly spaced) +init_dyn_min_deg: 10.0 # orientation change needed to try to init + +init_dyn_inflation_ori: 10 # what to inflate the recovered q_GtoI covariance by +init_dyn_inflation_vel: 100 # what to inflate the recovered v_IinG covariance by +init_dyn_inflation_bg: 10 # what to inflate the recovered bias_g covariance by +init_dyn_inflation_ba: 100 # what to inflate the recovered bias_a covariance by +init_dyn_min_rec_cond: 1e-12 # reciprocal condition number thresh for info inversion + +init_dyn_bias_g: [ 0.0, 0.0, 0.0 ] # initial gyroscope bias guess +init_dyn_bias_a: [ 0.0, 0.0, 0.0 ] # initial accelerometer bias guess + +# ================================================================== +# ================================================================== + +record_timing_information: false # if we want to record timing information of the method +record_timing_filepath: "/tmp/traj_timing.txt" # https://docs.openvins.com/eval-timing.html#eval-ov-timing-flame + +# if we want to save the simulation state and its diagional covariance +# use this with rosrun ov_eval error_simulation +save_total_state: false +filepath_est: "/tmp/ov_estimate.txt" +filepath_std: "/tmp/ov_estimate_std.txt" +filepath_gt: "/tmp/ov_groundtruth.txt" + +# ================================================================== +# ================================================================== + +# our front-end feature tracking parameters +# we have a KLT and descriptor based (KLT is better implemented...) +use_klt: true # if true we will use KLT, otherwise use a ORB descriptor + robust matching +num_pts: 400 # number of points (per camera) we will extract and try to track +# num_pts: 200 # number of points (per camera) we will extract and try to track +# fast_threshold: 20 # threshold for fast extraction (warning: lower threshs can be expensive) +fast_threshold: 15 # threshold for fast extraction (warning: lower threshs can be expensive) +grid_x: 5 # extraction sub-grid count for horizontal direction (uniform tracking) +# grid_y: 5 # extraction sub-grid count for vertical direction (uniform tracking) +grid_y: 3 # extraction sub-grid count for vertical direction (uniform tracking) +# min_px_dist: 10 # distance between features (features near each other provide less information) +min_px_dist: 20 # distance between features (features near each other provide less information) +knn_ratio: 0.70 # descriptor knn threshold for the top two descriptor matches +track_frequency: 21.0 # frequency we will perform feature tracking at (in frames per second / hertz) +downsample_cameras: false # will downsample image in half if true +num_opencv_threads: 4 # -1: auto, 0-1: serial, >1: number of threads +histogram_method: "HISTOGRAM" # NONE, HISTOGRAM, CLAHE + +# aruco tag tracker for the system +# DICT_6X6_1000 from https://chev.me/arucogen/ +use_aruco: false +num_aruco: 1024 +downsize_aruco: true + +# ================================================================== +# ================================================================== + +# camera noises and chi-squared threshold multipliers +up_msckf_sigma_px: 1 +up_msckf_chi2_multipler: 1 +up_slam_sigma_px: 1 +up_slam_chi2_multipler: 1 +up_aruco_sigma_px: 1 +up_aruco_chi2_multipler: 1 + +# masks for our images +use_mask: false + +# imu and camera spacial-temporal +# imu config should also have the correct noise values +relative_config_imu: "kalibr_imu_chain.yaml" +relative_config_imucam: "kalibr_imucam_chain.yaml" + + + + diff --git a/config/bluefox_imu/kalibr_imu_chain.yaml b/config/bluefox_imu/kalibr_imu_chain.yaml new file mode 100644 index 000000000..2003100d0 --- /dev/null +++ b/config/bluefox_imu/kalibr_imu_chain.yaml @@ -0,0 +1,45 @@ +%YAML:1.0 + +imu0: + T_i_b: + - [1.0, 0.0, 0.0, 0.0] + - [0.0, 1.0, 0.0, 0.0] + - [0.0, 0.0, 1.0, 0.0] + - [0.0, 0.0, 0.0, 1.0] + + accelerometer_noise_density: 2.0000e-3 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" ) + accelerometer_random_walk: 3.0000e-3 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion ) + gyroscope_noise_density: 1.6968e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" ) + gyroscope_random_walk: 1.9393e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion ) + # rostopic: /uav35/vio_imu/imu_filtered + time_offset: 0.0 + update_rate: 1000.0 + # three different modes supported: + # "calibrated" (same as "kalibr"), "kalibr", "rpng" + model: "kalibr" + # how to get from Kalibr imu.yaml result file: + # - Tw is imu0:gyroscopes:M: + # - R_IMUtoGYRO: is imu0:gyroscopes:M: + # - Ta is imu0:accelerometers:M: + # - R_IMUtoACC not used by Kalibr + # - Tg is imu0:gyroscopes:A: + Tw: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoGYRO: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Ta: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoACC: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Tg: + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] diff --git a/config/bluefox_imu/kalibr_imucam_chain.yaml b/config/bluefox_imu/kalibr_imucam_chain.yaml new file mode 100644 index 000000000..514058be0 --- /dev/null +++ b/config/bluefox_imu/kalibr_imucam_chain.yaml @@ -0,0 +1,15 @@ +%YAML:1.0 + +cam0: + T_imu_cam: #rotation from camera to IMU R_CtoI, position of camera in IMU p_CinI + - [1, 0, 0, 0] + - [0, 1, 0, 0] + - [0, 0, 1, 0.007] + - [0.0, 0.0, 0.0, 1.0] + cam_overlaps: [0] + camera_model: pinhole + distortion_coeffs: [-0.007054279635535503,-0.013010999471709818,0.007672805064263297,-0.0034608152598251778] + distortion_model: equidistant + intrinsics: [218.62328342963636,218.84544019573178, 374.186243114997, 203.57046644527676] #fu, fv, cu, cv + resolution: [752, 480] + # rostopic: /mv_25003659/image_raw diff --git a/config/bluefox_imu_simulation/estimator_config.yaml b/config/bluefox_imu_simulation/estimator_config.yaml new file mode 100644 index 000000000..dad761b22 --- /dev/null +++ b/config/bluefox_imu_simulation/estimator_config.yaml @@ -0,0 +1,126 @@ +%YAML:1.0 # need to specify the file type at the top! + +verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT + +use_fej: true # if first-estimate Jacobians should be used (enable for good consistency) +integration: "rk4" # discrete, rk4, analytical (if rk4 or analytical used then analytical covariance propagation is used) +use_stereo: false # if we have more than 1 camera, if we should try to track stereo constraints between pairs +max_cameras: 1 # how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking) + +calib_cam_extrinsics: true # if the transform between camera and IMU should be optimized R_ItoC, p_CinI +calib_cam_intrinsics: true # if camera intrinsics should be optimized (focal, center, distortion) +calib_cam_timeoffset: true # if timeoffset between camera and IMU should be optimized +calib_imu_intrinsics: false # if imu intrinsics should be calibrated (rotation and skew-scale matrix) +calib_imu_g_sensitivity: false # if gyroscope gravity sensitivity (Tg) should be calibrated + +max_clones: 11 # how many clones in the sliding window +max_slam: 100 # number of features in our state vector +max_slam_in_update: 25 # update can be split into sequential updates of batches, how many in a batch +max_msckf_in_update: 50 # how many MSCKF features to use in the update +dt_slam_delay: 3 # delay before initializing (helps with stability from bad initialization...) + +gravity_mag: 9.81 # magnitude of gravity in this location + +feat_rep_msckf: "GLOBAL_3D" +feat_rep_slam: "ANCHORED_MSCKF_INVERSE_DEPTH" +feat_rep_aruco: "ANCHORED_MSCKF_INVERSE_DEPTH" +# feat_rep_slam: "ANCHORED_FULL_INVERSE_DEPTH" +# feat_rep_aruco: "ANCHORED_FULL_INVERSE_DEPTH" + +# zero velocity update parameters we can use +# we support either IMU-based or disparity detection. +try_zupt: false +zupt_chi2_multipler: 0 # set to 0 for only disp-based +zupt_max_velocity: 0.1 +zupt_noise_multiplier: 50 +zupt_max_disparity: 1.5 # set to 0 for only imu-based +zupt_only_at_beginning: true + +# ================================================================== +# ================================================================== + +init_window_time: 2.0 # how many seconds to collect initialization information +# init_window_time: 2.0 # how many seconds to collect initialization information +init_imu_thresh: 0.05 # threshold for variance of the accelerometer to detect a "jerk" in motion +init_max_disparity: 2.0 # max disparity to consider the platform stationary (dependent on resolution) +init_max_features: 75 # how many features to track during initialization (saves on computation) + +init_dyn_use: false # if dynamic initialization should be used +init_dyn_mle_opt_calib: false # if we should optimize calibration during intialization (not recommended) +init_dyn_mle_max_iter: 50 # how many iterations the MLE refinement should use (zero to skip the MLE) +init_dyn_mle_max_time: 0.05 # how many seconds the MLE should be completed in +init_dyn_mle_max_threads: 6 # how many threads the MLE should use +init_dyn_num_pose: 6 # number of poses to use within our window time (evenly spaced) +init_dyn_min_deg: 10.0 # orientation change needed to try to init + +init_dyn_inflation_ori: 10 # what to inflate the recovered q_GtoI covariance by +init_dyn_inflation_vel: 100 # what to inflate the recovered v_IinG covariance by +init_dyn_inflation_bg: 10 # what to inflate the recovered bias_g covariance by +init_dyn_inflation_ba: 100 # what to inflate the recovered bias_a covariance by +init_dyn_min_rec_cond: 1e-12 # reciprocal condition number thresh for info inversion + +init_dyn_bias_g: [ 0.0, 0.0, 0.0 ] # initial gyroscope bias guess +init_dyn_bias_a: [ 0.0, 0.0, 0.0 ] # initial accelerometer bias guess + +# ================================================================== +# ================================================================== + +record_timing_information: false # if we want to record timing information of the method +record_timing_filepath: "/tmp/traj_timing.txt" # https://docs.openvins.com/eval-timing.html#eval-ov-timing-flame + +# if we want to save the simulation state and its diagional covariance +# use this with rosrun ov_eval error_simulation +save_total_state: false +filepath_est: "/tmp/ov_estimate.txt" +filepath_std: "/tmp/ov_estimate_std.txt" +filepath_gt: "/tmp/ov_groundtruth.txt" + +# ================================================================== +# ================================================================== + +# our front-end feature tracking parameters +# we have a KLT and descriptor based (KLT is better implemented...) +use_klt: true # if true we will use KLT, otherwise use a ORB descriptor + robust matching +num_pts: 400 # number of points (per camera) we will extract and try to track +# num_pts: 200 # number of points (per camera) we will extract and try to track +# fast_threshold: 20 # threshold for fast extraction (warning: lower threshs can be expensive) +fast_threshold: 15 # threshold for fast extraction (warning: lower threshs can be expensive) +grid_x: 5 # extraction sub-grid count for horizontal direction (uniform tracking) +# grid_y: 5 # extraction sub-grid count for vertical direction (uniform tracking) +grid_y: 3 # extraction sub-grid count for vertical direction (uniform tracking) +# min_px_dist: 10 # distance between features (features near each other provide less information) +min_px_dist: 20 # distance between features (features near each other provide less information) +knn_ratio: 0.70 # descriptor knn threshold for the top two descriptor matches +track_frequency: 21.0 # frequency we will perform feature tracking at (in frames per second / hertz) +downsample_cameras: false # will downsample image in half if true +num_opencv_threads: 4 # -1: auto, 0-1: serial, >1: number of threads +histogram_method: "HISTOGRAM" # NONE, HISTOGRAM, CLAHE + +# aruco tag tracker for the system +# DICT_6X6_1000 from https://chev.me/arucogen/ +use_aruco: false +num_aruco: 1024 +downsize_aruco: true + +# ================================================================== +# ================================================================== + +# camera noises and chi-squared threshold multipliers +up_msckf_sigma_px: 1 +up_msckf_chi2_multipler: 1 +up_slam_sigma_px: 1 +up_slam_chi2_multipler: 1 +up_aruco_sigma_px: 1 +up_aruco_chi2_multipler: 1 + +# masks for our images +use_mask: false + +# imu and camera spacial-temporal +# imu config should also have the correct noise values +relative_config_imu: "kalibr_imu_chain.yaml" +relative_config_imucam: "kalibr_imucam_chain.yaml" + + + + diff --git a/config/bluefox_imu_simulation/kalibr_imu_chain.yaml b/config/bluefox_imu_simulation/kalibr_imu_chain.yaml new file mode 100644 index 000000000..438336eb6 --- /dev/null +++ b/config/bluefox_imu_simulation/kalibr_imu_chain.yaml @@ -0,0 +1,45 @@ +%YAML:1.0 + +imu0: + T_i_b: + - [1.0, 0.0, 0.0, 0.0] + - [0.0, 1.0, 0.0, 0.0] + - [0.0, 0.0, 1.0, 0.0] + - [0.0, 0.0, 0.0, 1.0] + accelerometer_noise_density: 2.0000e-3 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" ) + accelerometer_random_walk: 3.0000e-3 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion ) + gyroscope_noise_density: 1.6968e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" ) + gyroscope_random_walk: 1.9393e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion ) + # rostopic: /uav0/vio/imu + time_offset: 0.0 + # update_rate: 200.0 + update_rate: 125.0 + # three different modes supported: + # "calibrated" (same as "kalibr"), "kalibr", "rpng" + model: "kalibr" + # how to get from Kalibr imu.yaml result file: + # - Tw is imu0:gyroscopes:M: + # - R_IMUtoGYRO: is imu0:gyroscopes:M: + # - Ta is imu0:accelerometers:M: + # - R_IMUtoACC not used by Kalibr + # - Tg is imu0:gyroscopes:A: + Tw: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoGYRO: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Ta: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoACC: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Tg: + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] diff --git a/config/bluefox_imu_simulation/kalibr_imucam_chain.yaml b/config/bluefox_imu_simulation/kalibr_imucam_chain.yaml new file mode 100644 index 000000000..0f936d661 --- /dev/null +++ b/config/bluefox_imu_simulation/kalibr_imucam_chain.yaml @@ -0,0 +1,15 @@ +%YAML:1.0 + +cam0: + T_imu_cam: #rotation from camera to IMU R_CtoI, position of camera in IMU p_CinI + - [1, 0, 0, 0] + - [0, 1, 0, 0] + - [0, 0, 1, 0.000] + - [0.0, 0.0, 0.0, 1.0] + cam_overlaps: [0] + camera_model: pinhole + distortion_coeffs: [0.019265981371039506, 0.0011428473998276235, -0.0003811659324868097, 6.340084698783884e-05] + distortion_model: equidistant + intrinsics: [227.4010064226358,227.35879407313246,375.5302935901654,239.4881944649193] #fu, fv, cu, cv + resolution: [752, 480] + # rostopic: /uav0/vio/camera/image_raw diff --git a/config/t265/estimator_config.yaml b/config/t265/estimator_config.yaml new file mode 100644 index 000000000..0f0079c12 --- /dev/null +++ b/config/t265/estimator_config.yaml @@ -0,0 +1,125 @@ +%YAML:1.0 # need to specify the file type at the top! + +verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT + +use_fej: true # if first-estimate Jacobians should be used (enable for good consistency) +integration: "rk4" # discrete, rk4, analytical (if rk4 or analytical used then analytical covariance propagation is used) +use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints between pairs +max_cameras: 2 # how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking) + +calib_cam_extrinsics: true # if the transform between camera and IMU should be optimized R_ItoC, p_CinI +calib_cam_intrinsics: true # if camera intrinsics should be optimized (focal, center, distortion) +calib_cam_timeoffset: true # if timeoffset between camera and IMU should be optimized +calib_imu_intrinsics: false # if imu intrinsics should be calibrated (rotation and skew-scale matrix) +calib_imu_g_sensitivity: false # if gyroscope gravity sensitivity (Tg) should be calibrated + +max_clones: 11 # how many clones in the sliding window +max_slam: 100 # number of features in our state vector +max_slam_in_update: 25 # update can be split into sequential updates of batches, how many in a batch +max_msckf_in_update: 50 # how many MSCKF features to use in the update +dt_slam_delay: 3 # delay before initializing (helps with stability from bad initialization...) + +gravity_mag: 9.81 # magnitude of gravity in this location + +feat_rep_msckf: "GLOBAL_3D" +feat_rep_slam: "ANCHORED_MSCKF_INVERSE_DEPTH" +feat_rep_aruco: "ANCHORED_MSCKF_INVERSE_DEPTH" +# feat_rep_slam: "ANCHORED_FULL_INVERSE_DEPTH" +# feat_rep_aruco: "ANCHORED_FULL_INVERSE_DEPTH" + +# zero velocity update parameters we can use +# we support either IMU-based or disparity detection. +try_zupt: false +zupt_chi2_multipler: 0 # set to 0 for only disp-based +zupt_max_velocity: 0.1 +zupt_noise_multiplier: 50 +zupt_max_disparity: 1.5 # set to 0 for only imu-based +zupt_only_at_beginning: true + +# ================================================================== +# ================================================================== + +init_window_time: 2.0 # how many seconds to collect initialization information +init_imu_thresh: 0.3 # threshold for variance of the accelerometer to detect a "jerk" in motion +init_max_disparity: 2.0 # max disparity to consider the platform stationary (dependent on resolution) +init_max_features: 75 # how many features to track during initialization (saves on computation) + +init_dyn_use: false # if dynamic initialization should be used +init_dyn_mle_opt_calib: false # if we should optimize calibration during intialization (not recommended) +init_dyn_mle_max_iter: 50 # how many iterations the MLE refinement should use (zero to skip the MLE) +init_dyn_mle_max_time: 0.05 # how many seconds the MLE should be completed in +init_dyn_mle_max_threads: 6 # how many threads the MLE should use +init_dyn_num_pose: 6 # number of poses to use within our window time (evenly spaced) +init_dyn_min_deg: 10.0 # orientation change needed to try to init + +init_dyn_inflation_ori: 10 # what to inflate the recovered q_GtoI covariance by +init_dyn_inflation_vel: 100 # what to inflate the recovered v_IinG covariance by +init_dyn_inflation_bg: 10 # what to inflate the recovered bias_g covariance by +init_dyn_inflation_ba: 100 # what to inflate the recovered bias_a covariance by +init_dyn_min_rec_cond: 1e-12 # reciprocal condition number thresh for info inversion + +init_dyn_bias_g: [ 0.0, 0.0, 0.0 ] # initial gyroscope bias guess +init_dyn_bias_a: [ 0.0, 0.0, 0.0 ] # initial accelerometer bias guess + +# ================================================================== +# ================================================================== + +record_timing_information: false # if we want to record timing information of the method +record_timing_filepath: "/tmp/traj_timing.txt" # https://docs.openvins.com/eval-timing.html#eval-ov-timing-flame + +# if we want to save the simulation state and its diagional covariance +# use this with rosrun ov_eval error_simulation +save_total_state: false +filepath_est: "/tmp/ov_estimate.txt" +filepath_std: "/tmp/ov_estimate_std.txt" +filepath_gt: "/tmp/ov_groundtruth.txt" + +# ================================================================== +# ================================================================== + +# our front-end feature tracking parameters +# we have a KLT and descriptor based (KLT is better implemented...) +use_klt: true # if true we will use KLT, otherwise use a ORB descriptor + robust matching +num_pts: 400 # number of points (per camera) we will extract and try to track +# num_pts: 200 # number of points (per camera) we will extract and try to track +# fast_threshold: 20 # threshold for fast extraction (warning: lower threshs can be expensive) +fast_threshold: 15 # threshold for fast extraction (warning: lower threshs can be expensive) +grid_x: 5 # extraction sub-grid count for horizontal direction (uniform tracking) +# grid_y: 5 # extraction sub-grid count for vertical direction (uniform tracking) +grid_y: 3 # extraction sub-grid count for vertical direction (uniform tracking) +# min_px_dist: 10 # distance between features (features near each other provide less information) +min_px_dist: 20 # distance between features (features near each other provide less information) +knn_ratio: 0.70 # descriptor knn threshold for the top two descriptor matches +track_frequency: 21.0 # frequency we will perform feature tracking at (in frames per second / hertz) +downsample_cameras: false # will downsample image in half if true +num_opencv_threads: 4 # -1: auto, 0-1: serial, >1: number of threads +histogram_method: "HISTOGRAM" # NONE, HISTOGRAM, CLAHE + +# aruco tag tracker for the system +# DICT_6X6_1000 from https://chev.me/arucogen/ +use_aruco: false +num_aruco: 1024 +downsize_aruco: true + +# ================================================================== +# ================================================================== + +# camera noises and chi-squared threshold multipliers +up_msckf_sigma_px: 1 +up_msckf_chi2_multipler: 1 +up_slam_sigma_px: 1 +up_slam_chi2_multipler: 1 +up_aruco_sigma_px: 1 +up_aruco_chi2_multipler: 1 + +# masks for our images +use_mask: false + +# imu and camera spacial-temporal +# imu config should also have the correct noise values +relative_config_imu: "kalibr_imu_chain.yaml" +relative_config_imucam: "kalibr_imucam_chain.yaml" + + + + diff --git a/config/t265/kalibr_imu_chain.yaml b/config/t265/kalibr_imu_chain.yaml new file mode 100644 index 000000000..9f737d229 --- /dev/null +++ b/config/t265/kalibr_imu_chain.yaml @@ -0,0 +1,46 @@ +%YAML:1.0 + +imu0: + T_i_b: + - [1.0, 0.0, 0.0, 0.0] + - [0.0, 1.0, 0.0, 0.0] + - [0.0, 0.0, 1.0, 0.0] + - [0.0, 0.0, 0.0, 1.0] + accelerometer_noise_density: 2.0000e-3 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" ) + accelerometer_random_walk: 3.0000e-3 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion ) + gyroscope_noise_density: 1.6968e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" ) + gyroscope_random_walk: 1.9393e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion ) + # rostopic: /uav35/vio_imu/imu_filtered + time_offset: 0.0 + update_rate: 200.0 + # three different modes supported: + # "calibrated" (same as "kalibr"), "kalibr", "rpng" + model: "kalibr" + # how to get from Kalibr imu.yaml result file: + # - Tw is imu0:gyroscopes:M: + # - R_IMUtoGYRO: is imu0:gyroscopes:M: + # - Ta is imu0:accelerometers:M: + # - R_IMUtoACC not used by Kalibr + # - Tg is imu0:gyroscopes:A: + Tw: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoGYRO: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Ta: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoACC: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Tg: + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + + diff --git a/config/t265/kalibr_imucam_chain.yaml b/config/t265/kalibr_imucam_chain.yaml new file mode 100644 index 000000000..babd79efe --- /dev/null +++ b/config/t265/kalibr_imucam_chain.yaml @@ -0,0 +1,28 @@ +%YAML:1.0 + +cam0: + T_imu_cam: #rotation from camera to IMU R_CtoI, position of camera in IMU p_CinI + - [-0.99988, 0.00102315, 0.0154821, 0.0106987114995718] + - [-0.00100478, -0.999999, 0.00119407, 1.07511405076366e-05] + - [0.0154833, 0.00117837, 0.999879, -0.000165671401191503] + - [0.0, 0.0, 0.0, 1.0] + cam_overlaps: [1] + camera_model: pinhole + distortion_coeffs: [-0.00413605011999607, 0.0385973118245602, -0.0369558110833168, 0.00608701910823584] + distortion_model: equidistant + intrinsics: [284.378601074219,285.364898681641, 423.223510742188, 402.276000976562] #fu, fv, cu, cv + resolution: [848, 800] + # rostopic: /uav35/t265/fisheye1/image_raw +cam1: + T_imu_cam: #rotation from camera to IMU R_CtoI, position of camera in IMU p_CinI + - [-0.999979, -0.0022358, 0.00603111, -0.0537273213267326] + - [0.00226225, -0.999988, 0.00438268, -0.000109005901322234] + - [0.00602124, 0.00439623, 0.999972, 0.00031432809191756] + - [0.0, 0.0, 0.0, 1.0] + cam_overlaps: [0] + camera_model: pinhole + distortion_coeffs: [-0.00312783196568489, 0.0387581698596478, -0.0367337986826897, 0.00606365082785487] + distortion_model: equidistant + intrinsics: [285.629211425781, 286.654602050781, 433.148193359375, 403.567413330078] #fu, fv, cu, cv + resolution: [848, 800] + # rostopic: /uav35/t265/fisheye2/image_raw diff --git a/installation/install.sh b/installation/install.sh new file mode 100755 index 000000000..b66e1009c --- /dev/null +++ b/installation/install.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +default=n + +# get the path to this script +MY_PATH=`dirname "$0"` +MY_PATH=`( cd "$MY_PATH" && pwd )` +cd "$MY_PATH" + +CERES_PATH=$MY_PATH/../lib +CERES_VERSION=1.14.0 + +# IMPORTANT: These variables should match the settings of your catkin workspace +PROFILE="RelWithDebInfo" # RelWithDebInfo, Release, Debug +BUILD_WITH_MARCH_NATIVE=false +if [ ! -z "$PCL_CROSS_COMPILATION" ]; then + BUILD_WITH_MARCH_NATIVE=$PCL_CROSS_COMPILATION +fi +CMAKE_STANDARD=17 + +#Ceres Solver +while true; do + [[ -t 0 ]] && { read -t 5 -n 2 -p $'\e[1;32mInstall Ceres-Solver(required for OpenVINS)? [y/n] (default: '"$default"$')\e[0m\n' resp || resp=$default ; } + response=`echo $resp | sed -r 's/(.*)$/\1=/'` + + if [[ $response =~ ^(y|Y)=$ ]] + then + cd ~/git + + echo "Installing ceres solver" + # Ceres installation - dependencies + + # CMake + sudo apt-get -y install cmake + # google-glog + gflags + sudo apt-get -y install libgoogle-glog-dev + # BLAS & LAPACK + sudo apt-get -y install libatlas-base-dev + # Eigen3 + sudo apt-get -y install libeigen3-dev + # SuiteSparse and CXSparse (optional) + # - If you want to build Ceres as a *static* library (the default) + # you can use the SuiteSparse package in the main Ubuntu package + # repository: + sudo apt-get -y install libsuitesparse-dev + # - However, if you want to build Ceres as a *shared* library, you must + # add the following PPA: + # sudo add-apt-repository ppa:bzindovic/suitesparse-bugfix-1319687 + # sudo apt-get update + # sudo apt-get install libsuitesparse-dev + + # Build with march native? + if $BUILD_WITH_MARCH_NATIVE; then + echo "Building ceres optimizer with -march=native" + CMAKE_MARCH_NATIVE="-march=native" + else + echo "Building ceres optimizer without -march=native" + CMAKE_MARCH_NATIVE="" + fi + + # Profile-dependent flags + if [[ "$PROFILE" == "RelWithDebInfo" ]]; then + BUILD_FLAGS_PROFILE=( + -DCMAKE_CXX_FLAGS_${PROFILE^^}="-O2 -g" + -DCMAKE_C_FLAGS_${PROFILE^^}="-O2 -g") + elif [[ "$PROFILE" == "Release" ]]; then + BUILD_FLAGS_PROFILE=( + -DCMAKE_CXX_FLAGS_${PROFILE^^}="-O3" + -DCMAKE_C_FLAGS_${PROFILE^^}="-O3") + else + BUILD_FLAGS_PROFILE=( + -DCMAKE_CXX_FLAGS_${PROFILE^^}="-O0 -g" + -DCMAKE_C_FLAGS_${PROFILE^^}="-O0 -g") + fi + + # Defaults taken from mrs_workspace building flags + BUILD_FLAGS_GENERAL=( + -DCMAKE_EXPORT_COMPILE_COMMANDS=ON + -DCMAKE_CXX_STANDARD=$CMAKE_STANDARD + -DCMAKE_BUILD_TYPE=$PROFILE + -DCMAKE_CXX_FLAGS="-std=c++$CMAKE_STANDARD $CMAKE_MARCH_NATIVE" + -DCMAKE_C_FLAGS="$CMAKE_MARCH_NATIVE" + -DBUILD_TESTING=OFF + -DBUILD_DOCUMENTATION=OFF + -DBUILD_BENCHMARKS=OFF + -DBUILD_EXAMPLES=OFF + -DSCHUR_SPECIALIZATIONS=ON + ) + + # download ceres solver + echo "Downloading ceres solver" + [ ! -d $CERES_PATH ] && mkdir -p $CERES_PATH + cd $CERES_PATH + + if [ ! -d $CERES_PATH/ceres-solver-$CERES_VERSION ] + then + # unpack source files + wget -O "$CERES_PATH/ceres-solver-$CERES_VERSION.tar.gz" http://ceres-solver.org/ceres-solver-$CERES_VERSION.tar.gz + tar zxf ceres-solver-$CERES_VERSION.tar.gz + rm -f ceres-solver-$CERES_VERSION.tar.gz + fi + + # install ceres solver + echo "Compiling ceres solver" + cd $CERES_PATH/ceres-solver-$CERES_VERSION + [ ! -d "build" ] && mkdir build + cd build + cmake "${BUILD_FLAGS_GENERAL[@]}" "${BUILD_FLAGS_PROFILE[@]}" ../ + + [ -z "$GITHUB_CI" ] && N_PROC="-j$[$(nproc) - 1]" + [ ! -z "$GITHUB_CI" ] && N_PROC="-j$[$(nproc) / 2]" + + echo "building with $N_PROC processes" + + make ${N_PROC} + sudo make install + + echo "Done installing prerequisities for OpenVINS" + + # remove the ceres solver source and build files + rm -rf $CERES_PATH + + break + elif [[ $response =~ ^(n|N)=$ ]] + then + break + else + echo " What? \"$resp\" is not a correct answer. Try y+Enter." + fi +done + diff --git a/ov_msckf/launch/display.rviz b/ov_msckf/launch/display.rviz index e847e0593..1991398ab 100644 --- a/ov_msckf/launch/display.rviz +++ b/ov_msckf/launch/display.rviz @@ -294,7 +294,7 @@ Visualization Manager: Global Options: Background Color: 44; 44; 44 Default Light: true - Fixed Frame: global + Fixed Frame: world Frame Rate: 60 Name: root Tools: diff --git a/ov_msckf/src/core/RosVisualizer.cpp b/ov_msckf/src/core/RosVisualizer.cpp new file mode 100644 index 000000000..57c67713e --- /dev/null +++ b/ov_msckf/src/core/RosVisualizer.cpp @@ -0,0 +1,988 @@ +/* + * OpenVINS: An Open Platform for Visual-Inertial Research + * Copyright (C) 2021 Patrick Geneva + * Copyright (C) 2021 Guoquan Huang + * Copyright (C) 2021 OpenVINS Contributors + * Copyright (C) 2019 Kevin Eckenhoff + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include "RosVisualizer.h" + +using namespace ov_msckf; + +RosVisualizer::RosVisualizer(ros::NodeHandle &nh, std::shared_ptr app, std::shared_ptr sim) : _app(app), _sim(sim) { + + // Setup our transform broadcaster + mTfBr = new tf::TransformBroadcaster(); + + // Setup pose and path publisher + pub_poseimu = nh.advertise("/ov_msckf/poseimu", 2); + ROS_INFO("Publishing: %s", pub_poseimu.getTopic().c_str()); + pub_odomimu = nh.advertise("/ov_msckf/odomimu", 2); + ROS_INFO("Publishing: %s", pub_odomimu.getTopic().c_str()); + pub_pathimu = nh.advertise("/ov_msckf/pathimu", 2); + ROS_INFO("Publishing: %s", pub_pathimu.getTopic().c_str()); + + // 3D points publishing + pub_points_msckf = nh.advertise("/ov_msckf/points_msckf", 2); + ROS_INFO("Publishing: %s", pub_points_msckf.getTopic().c_str()); + pub_points_slam = nh.advertise("/ov_msckf/points_slam", 2); + ROS_INFO("Publishing: %s", pub_points_msckf.getTopic().c_str()); + pub_points_aruco = nh.advertise("/ov_msckf/points_aruco", 2); + ROS_INFO("Publishing: %s", pub_points_aruco.getTopic().c_str()); + pub_points_sim = nh.advertise("/ov_msckf/points_sim", 2); + ROS_INFO("Publishing: %s", pub_points_sim.getTopic().c_str()); + + // Our tracking image + pub_tracks = nh.advertise("/ov_msckf/trackhist", 2); + ROS_INFO("Publishing: %s", pub_tracks.getTopic().c_str()); + + // Groundtruth publishers + pub_posegt = nh.advertise("/ov_msckf/posegt", 2); + ROS_INFO("Publishing: %s", pub_posegt.getTopic().c_str()); + pub_pathgt = nh.advertise("/ov_msckf/pathgt", 2); + ROS_INFO("Publishing: %s", pub_pathgt.getTopic().c_str()); + + // Loop closure publishers + pub_loop_pose = nh.advertise("/ov_msckf/loop_pose", 2); + pub_loop_point = nh.advertise("/ov_msckf/loop_feats", 2); + pub_loop_extrinsic = nh.advertise("/ov_msckf/loop_extrinsic", 2); + pub_loop_intrinsics = nh.advertise("/ov_msckf/loop_intrinsics", 2); + pub_loop_img_depth = nh.advertise("/ov_msckf/loop_depth", 2); + pub_loop_img_depth_color = nh.advertise("/ov_msckf/loop_depth_colored", 2); + + // option to enable publishing of global to IMU transformation + nh.param("publish_global_to_imu_tf", publish_global2imu_tf, true); + nh.param("publish_calibration_tf", publish_calibration_tf, true); + + // Load groundtruth if we have it and are not doing simulation + if (nh.hasParam("path_gt") && _sim == nullptr) { + std::string path_to_gt; + nh.param("path_gt", path_to_gt, ""); + DatasetReader::load_gt_file(path_to_gt, gt_states); + } + + // Load if we should save the total state to file + nh.param("save_total_state", save_total_state, false); + + // If the file is not open, then open the file + if (save_total_state) { + + // files we will open + std::string filepath_est, filepath_std, filepath_gt; + nh.param("filepath_est", filepath_est, "state_estimate.txt"); + nh.param("filepath_std", filepath_std, "state_deviation.txt"); + nh.param("filepath_gt", filepath_gt, "state_groundtruth.txt"); + + // If it exists, then delete it + if (boost::filesystem::exists(filepath_est)) + boost::filesystem::remove(filepath_est); + if (boost::filesystem::exists(filepath_std)) + boost::filesystem::remove(filepath_std); + + // Open the files + of_state_est.open(filepath_est.c_str()); + of_state_std.open(filepath_std.c_str()); + of_state_est << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl; + of_state_std << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl; + + // Groundtruth if we are simulating + if (_sim != nullptr) { + if (boost::filesystem::exists(filepath_gt)) + boost::filesystem::remove(filepath_gt); + of_state_gt.open(filepath_gt.c_str()); + of_state_gt << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl; + } + } +} + +void RosVisualizer::visualize() { + + // Return if we have already visualized + if (last_visualization_timestamp == _app->get_state()->_timestamp) + return; + last_visualization_timestamp = _app->get_state()->_timestamp; + + // Start timing + boost::posix_time::ptime rT0_1, rT0_2; + rT0_1 = boost::posix_time::microsec_clock::local_time(); + + // publish current image + publish_images(); + + // Return if we have not inited + if (!_app->initialized()) + return; + + // Save the start time of this dataset + if (!start_time_set) { + rT1 = boost::posix_time::microsec_clock::local_time(); + start_time_set = true; + } + + // publish state + publish_state(); + + // publish points + publish_features(); + + // Publish gt if we have it + publish_groundtruth(); + + // Publish keyframe information + publish_loopclosure_information(); + + // save total state + if (save_total_state) + sim_save_total_state_to_file(); + + // Print how much time it took to publish / displaying things + rT0_2 = boost::posix_time::microsec_clock::local_time(); + double time_total = (rT0_2 - rT0_1).total_microseconds() * 1e-6; + printf(BLUE "[TIME]: %.4f seconds for visualization\n" RESET, time_total); +} + +void RosVisualizer::visualize_odometry(double timestamp) { + + // Check if we have subscribers + if (pub_odomimu.getNumSubscribers() == 0) + return; + + // Return if we have not inited and a second has passes + if (!_app->initialized() || (timestamp - _app->initialized_time()) < 1) + return; + + // Get fast propagate state at the desired timestamp + std::shared_ptr state = _app->get_state(); + Eigen::Matrix state_plus = Eigen::Matrix::Zero(); + _app->get_propagator()->fast_state_propagate(state, timestamp, state_plus); + + // Our odometry message + nav_msgs::Odometry odomIinM; + odomIinM.header.stamp = ros::Time(timestamp); + odomIinM.header.frame_id = "world"; + + // The POSE component (orientation and position) + odomIinM.pose.pose.orientation.x = state_plus(0); + odomIinM.pose.pose.orientation.y = state_plus(1); + odomIinM.pose.pose.orientation.z = state_plus(2); + odomIinM.pose.pose.orientation.w = state_plus(3); + odomIinM.pose.pose.position.x = state_plus(4); + odomIinM.pose.pose.position.y = state_plus(5); + odomIinM.pose.pose.position.z = state_plus(6); + + // Finally set the covariance in the message (in the order position then orientation as per ros convention) + // TODO: this currently is an approximation since this should actually evolve over our propagation period + // TODO: but to save time we only propagate the mean and not the uncertainty, but maybe we should try to prop the covariance? + std::vector> statevars; + statevars.push_back(state->_imu->pose()->p()); + statevars.push_back(state->_imu->pose()->q()); + Eigen::Matrix covariance_posori = StateHelper::get_marginal_covariance(_app->get_state(), statevars); + for (int r = 0; r < 6; r++) { + for (int c = 0; c < 6; c++) { + odomIinM.pose.covariance[6 * r + c] = covariance_posori(r, c); + } + } + + // The TWIST component (angular and linear velocities) + odomIinM.child_frame_id = "imu"; + odomIinM.twist.twist.linear.x = state_plus(7); + odomIinM.twist.twist.linear.y = state_plus(8); + odomIinM.twist.twist.linear.z = state_plus(9); + odomIinM.twist.twist.angular.x = state_plus(10); // we do not estimate this... + odomIinM.twist.twist.angular.y = state_plus(11); // we do not estimate this... + odomIinM.twist.twist.angular.z = state_plus(12); // we do not estimate this... + + // Velocity covariance (linear then angular) + // TODO: this currently is an approximation since this should actually evolve over our propagation period + // TODO: but to save time we only propagate the mean and not the uncertainty, but maybe we should try to prop the covariance? + // TODO: can we come up with an approx covariance for the omega based on the w_hat = w_m - b_w ?? + statevars.clear(); + statevars.push_back(state->_imu->v()); + Eigen::Matrix covariance_linang = INFINITY * Eigen::Matrix::Identity(); + covariance_linang.block(0, 0, 3, 3) = StateHelper::get_marginal_covariance(_app->get_state(), statevars); + for (int r = 0; r < 6; r++) { + for (int c = 0; c < 6; c++) { + odomIinM.twist.covariance[6 * r + c] = (std::isnan(covariance_linang(r, c))) ? 0 : covariance_linang(r, c); + } + } + + // Finally, publish the resulting odometry message + pub_odomimu.publish(odomIinM); +} + +void RosVisualizer::visualize_final() { + + // Final time offset value + if (_app->get_state()->_options.do_calib_camera_timeoffset) { + printf(REDPURPLE "camera-imu timeoffset = %.5f\n\n" RESET, _app->get_state()->_calib_dt_CAMtoIMU->value()(0)); + } + + // Final camera intrinsics + if (_app->get_state()->_options.do_calib_camera_intrinsics) { + for (int i = 0; i < _app->get_state()->_options.num_cameras; i++) { + std::shared_ptr calib = _app->get_state()->_cam_intrinsics.at(i); + printf(REDPURPLE "cam%d intrinsics:\n" RESET, (int)i); + printf(REDPURPLE "%.3f,%.3f,%.3f,%.3f\n" RESET, calib->value()(0), calib->value()(1), calib->value()(2), calib->value()(3)); + printf(REDPURPLE "%.5f,%.5f,%.5f,%.5f\n\n" RESET, calib->value()(4), calib->value()(5), calib->value()(6), calib->value()(7)); + } + } + + // Final camera extrinsics + if (_app->get_state()->_options.do_calib_camera_pose) { + for (int i = 0; i < _app->get_state()->_options.num_cameras; i++) { + std::shared_ptr calib = _app->get_state()->_calib_IMUtoCAM.at(i); + Eigen::Matrix4d T_CtoI = Eigen::Matrix4d::Identity(); + T_CtoI.block(0, 0, 3, 3) = quat_2_Rot(calib->quat()).transpose(); + T_CtoI.block(0, 3, 3, 1) = -T_CtoI.block(0, 0, 3, 3) * calib->pos(); + printf(REDPURPLE "T_C%dtoI:\n" RESET, i); + printf(REDPURPLE "%.3f,%.3f,%.3f,%.3f,\n" RESET, T_CtoI(0, 0), T_CtoI(0, 1), T_CtoI(0, 2), T_CtoI(0, 3)); + printf(REDPURPLE "%.3f,%.3f,%.3f,%.3f,\n" RESET, T_CtoI(1, 0), T_CtoI(1, 1), T_CtoI(1, 2), T_CtoI(1, 3)); + printf(REDPURPLE "%.3f,%.3f,%.3f,%.3f,\n" RESET, T_CtoI(2, 0), T_CtoI(2, 1), T_CtoI(2, 2), T_CtoI(2, 3)); + printf(REDPURPLE "%.3f,%.3f,%.3f,%.3f\n\n" RESET, T_CtoI(3, 0), T_CtoI(3, 1), T_CtoI(3, 2), T_CtoI(3, 3)); + } + } + + // Publish RMSE if we have it + if (!gt_states.empty()) { + printf(REDPURPLE "RMSE average: %.3f (deg) orientation\n" RESET, summed_rmse_ori / summed_number); + printf(REDPURPLE "RMSE average: %.3f (m) position\n\n" RESET, summed_rmse_pos / summed_number); + } + + // Publish RMSE and NEES if doing simulation + if (_sim != nullptr) { + printf(REDPURPLE "RMSE average: %.3f (deg) orientation\n" RESET, summed_rmse_ori / summed_number); + printf(REDPURPLE "RMSE average: %.3f (m) position\n\n" RESET, summed_rmse_pos / summed_number); + printf(REDPURPLE "NEES average: %.3f (deg) orientation\n" RESET, summed_nees_ori / summed_number); + printf(REDPURPLE "NEES average: %.3f (m) position\n\n" RESET, summed_nees_pos / summed_number); + } + + // Print the total time + rT2 = boost::posix_time::microsec_clock::local_time(); + printf(REDPURPLE "TIME: %.3f seconds\n\n" RESET, (rT2 - rT1).total_microseconds() * 1e-6); +} + +void RosVisualizer::publish_state() { + + // Get the current state + std::shared_ptr state = _app->get_state(); + + // We want to publish in the IMU clock frame + // The timestamp in the state will be the last camera time + double t_ItoC = state->_calib_dt_CAMtoIMU->value()(0); + double timestamp_inI = state->_timestamp + t_ItoC; + + // Create pose of IMU (note we use the bag time) + geometry_msgs::PoseWithCovarianceStamped poseIinM; + poseIinM.header.stamp = ros::Time(timestamp_inI); + poseIinM.header.seq = poses_seq_imu; + poseIinM.header.frame_id = "world"; + poseIinM.pose.pose.orientation.x = state->_imu->quat()(0); + poseIinM.pose.pose.orientation.y = state->_imu->quat()(1); + poseIinM.pose.pose.orientation.z = state->_imu->quat()(2); + poseIinM.pose.pose.orientation.w = state->_imu->quat()(3); + poseIinM.pose.pose.position.x = state->_imu->pos()(0); + poseIinM.pose.pose.position.y = state->_imu->pos()(1); + poseIinM.pose.pose.position.z = state->_imu->pos()(2); + + // Finally set the covariance in the message (in the order position then orientation as per ros convention) + std::vector> statevars; + statevars.push_back(state->_imu->pose()->p()); + statevars.push_back(state->_imu->pose()->q()); + Eigen::Matrix covariance_posori = StateHelper::get_marginal_covariance(_app->get_state(), statevars); + for (int r = 0; r < 6; r++) { + for (int c = 0; c < 6; c++) { + poseIinM.pose.covariance[6 * r + c] = covariance_posori(r, c); + } + } + pub_poseimu.publish(poseIinM); + + //========================================================= + //========================================================= + + // Append to our pose vector + geometry_msgs::PoseStamped posetemp; + posetemp.header = poseIinM.header; + posetemp.pose = poseIinM.pose.pose; + poses_imu.push_back(posetemp); + + // Create our path (imu) + // NOTE: We downsample the number of poses as needed to prevent rviz crashes + // NOTE: https://github.com/ros-visualization/rviz/issues/1107 + nav_msgs::Path arrIMU; + arrIMU.header.stamp = ros::Time::now(); + arrIMU.header.seq = poses_seq_imu; + arrIMU.header.frame_id = "world"; + for (size_t i = 0; i < poses_imu.size(); i += std::floor(poses_imu.size() / 16384.0) + 1) { + arrIMU.poses.push_back(poses_imu.at(i)); + } + pub_pathimu.publish(arrIMU); + + // Move them forward in time + poses_seq_imu++; + + // Publish our transform on TF + // NOTE: since we use JPL we have an implicit conversion to Hamilton when we publish + // NOTE: a rotation from GtoI in JPL has the same xyzw as a ItoG Hamilton rotation + tf::StampedTransform trans; + trans.stamp_ = ros::Time::now(); + trans.frame_id_ = "world"; + trans.child_frame_id_ = "imu"; + tf::Quaternion quat(state->_imu->quat()(0), state->_imu->quat()(1), state->_imu->quat()(2), state->_imu->quat()(3)); + trans.setRotation(quat); + tf::Vector3 orig(state->_imu->pos()(0), state->_imu->pos()(1), state->_imu->pos()(2)); + trans.setOrigin(orig); + if (publish_global2imu_tf) { + mTfBr->sendTransform(trans); + } + + // Loop through each camera calibration and publish it + for (const auto &calib : state->_calib_IMUtoCAM) { + // need to flip the transform to the IMU frame + Eigen::Vector4d q_ItoC = calib.second->quat(); + Eigen::Vector3d p_CinI = -calib.second->Rot().transpose() * calib.second->pos(); + // publish our transform on TF + // NOTE: since we use JPL we have an implicit conversion to Hamilton when we publish + // NOTE: a rotation from ItoC in JPL has the same xyzw as a CtoI Hamilton rotation + tf::StampedTransform trans; + trans.stamp_ = ros::Time::now(); + trans.frame_id_ = "imu"; + trans.child_frame_id_ = "cam" + std::to_string(calib.first); + tf::Quaternion quat(q_ItoC(0), q_ItoC(1), q_ItoC(2), q_ItoC(3)); + trans.setRotation(quat); + tf::Vector3 orig(p_CinI(0), p_CinI(1), p_CinI(2)); + trans.setOrigin(orig); + if (publish_calibration_tf) { + mTfBr->sendTransform(trans); + } + } +} + +void RosVisualizer::publish_images() { + + // Check if we have subscribers + if (pub_tracks.getNumSubscribers() == 0) + return; + + // Get our image of history tracks + cv::Mat img_history = _app->get_historical_viz_image(); + + // Create our message + std_msgs::Header header; + header.stamp = ros::Time::now(); + sensor_msgs::ImagePtr exl_msg = cv_bridge::CvImage(header, "bgr8", img_history).toImageMsg(); + + // Publish + pub_tracks.publish(exl_msg); +} + +void RosVisualizer::publish_features() { + + // Check if we have subscribers + if (pub_points_msckf.getNumSubscribers() == 0 && pub_points_slam.getNumSubscribers() == 0 && pub_points_aruco.getNumSubscribers() == 0 && + pub_points_sim.getNumSubscribers() == 0) + return; + + // Get our good features + std::vector feats_msckf = _app->get_good_features_MSCKF(); + + // Declare message and sizes + sensor_msgs::PointCloud2 cloud; + cloud.header.frame_id = "world"; + cloud.header.stamp = ros::Time::now(); + cloud.width = 3 * feats_msckf.size(); + cloud.height = 1; + cloud.is_bigendian = false; + cloud.is_dense = false; // there may be invalid points + + // Setup pointcloud fields + sensor_msgs::PointCloud2Modifier modifier(cloud); + modifier.setPointCloud2FieldsByString(1, "xyz"); + modifier.resize(3 * feats_msckf.size()); + + // Iterators + sensor_msgs::PointCloud2Iterator out_x(cloud, "x"); + sensor_msgs::PointCloud2Iterator out_y(cloud, "y"); + sensor_msgs::PointCloud2Iterator out_z(cloud, "z"); + + // Fill our iterators + for (const auto &pt : feats_msckf) { + *out_x = pt(0); + ++out_x; + *out_y = pt(1); + ++out_y; + *out_z = pt(2); + ++out_z; + } + + // Publish + pub_points_msckf.publish(cloud); + + //==================================================================== + //==================================================================== + + // Get our good features + std::vector feats_slam = _app->get_features_SLAM(); + + // Declare message and sizes + sensor_msgs::PointCloud2 cloud_SLAM; + cloud_SLAM.header.frame_id = "world"; + cloud_SLAM.header.stamp = ros::Time::now(); + cloud_SLAM.width = 3 * feats_slam.size(); + cloud_SLAM.height = 1; + cloud_SLAM.is_bigendian = false; + cloud_SLAM.is_dense = false; // there may be invalid points + + // Setup pointcloud fields + sensor_msgs::PointCloud2Modifier modifier_SLAM(cloud_SLAM); + modifier_SLAM.setPointCloud2FieldsByString(1, "xyz"); + modifier_SLAM.resize(3 * feats_slam.size()); + + // Iterators + sensor_msgs::PointCloud2Iterator out_x_SLAM(cloud_SLAM, "x"); + sensor_msgs::PointCloud2Iterator out_y_SLAM(cloud_SLAM, "y"); + sensor_msgs::PointCloud2Iterator out_z_SLAM(cloud_SLAM, "z"); + + // Fill our iterators + for (const auto &pt : feats_slam) { + *out_x_SLAM = pt(0); + ++out_x_SLAM; + *out_y_SLAM = pt(1); + ++out_y_SLAM; + *out_z_SLAM = pt(2); + ++out_z_SLAM; + } + + // Publish + pub_points_slam.publish(cloud_SLAM); + + //==================================================================== + //==================================================================== + + // Get our good features + std::vector feats_aruco = _app->get_features_ARUCO(); + + // Declare message and sizes + sensor_msgs::PointCloud2 cloud_ARUCO; + cloud_ARUCO.header.frame_id = "world"; + cloud_ARUCO.header.stamp = ros::Time::now(); + cloud_ARUCO.width = 3 * feats_aruco.size(); + cloud_ARUCO.height = 1; + cloud_ARUCO.is_bigendian = false; + cloud_ARUCO.is_dense = false; // there may be invalid points + + // Setup pointcloud fields + sensor_msgs::PointCloud2Modifier modifier_ARUCO(cloud_ARUCO); + modifier_ARUCO.setPointCloud2FieldsByString(1, "xyz"); + modifier_ARUCO.resize(3 * feats_aruco.size()); + + // Iterators + sensor_msgs::PointCloud2Iterator out_x_ARUCO(cloud_ARUCO, "x"); + sensor_msgs::PointCloud2Iterator out_y_ARUCO(cloud_ARUCO, "y"); + sensor_msgs::PointCloud2Iterator out_z_ARUCO(cloud_ARUCO, "z"); + + // Fill our iterators + for (const auto &pt : feats_aruco) { + *out_x_ARUCO = pt(0); + ++out_x_ARUCO; + *out_y_ARUCO = pt(1); + ++out_y_ARUCO; + *out_z_ARUCO = pt(2); + ++out_z_ARUCO; + } + + // Publish + pub_points_aruco.publish(cloud_ARUCO); + + //==================================================================== + //==================================================================== + + // Skip the rest of we are not doing simulation + if (_sim == nullptr) + return; + + // Get our good features + std::unordered_map feats_sim = _sim->get_map(); + + // Declare message and sizes + sensor_msgs::PointCloud2 cloud_SIM; + cloud_SIM.header.frame_id = "world"; + cloud_SIM.header.stamp = ros::Time::now(); + cloud_SIM.width = 3 * feats_sim.size(); + cloud_SIM.height = 1; + cloud_SIM.is_bigendian = false; + cloud_SIM.is_dense = false; // there may be invalid points + + // Setup pointcloud fields + sensor_msgs::PointCloud2Modifier modifier_SIM(cloud_SIM); + modifier_SIM.setPointCloud2FieldsByString(1, "xyz"); + modifier_SIM.resize(3 * feats_sim.size()); + + // Iterators + sensor_msgs::PointCloud2Iterator out_x_SIM(cloud_SIM, "x"); + sensor_msgs::PointCloud2Iterator out_y_SIM(cloud_SIM, "y"); + sensor_msgs::PointCloud2Iterator out_z_SIM(cloud_SIM, "z"); + + // Fill our iterators + for (const auto &pt : feats_sim) { + *out_x_SIM = pt.second(0); + ++out_x_SIM; + *out_y_SIM = pt.second(1); + ++out_y_SIM; + *out_z_SIM = pt.second(2); + ++out_z_SIM; + } + + // Publish + pub_points_sim.publish(cloud_SIM); +} + +void RosVisualizer::publish_groundtruth() { + + // Our groundtruth state + Eigen::Matrix state_gt; + + // We want to publish in the IMU clock frame + // The timestamp in the state will be the last camera time + double t_ItoC = _app->get_state()->_calib_dt_CAMtoIMU->value()(0); + double timestamp_inI = _app->get_state()->_timestamp + t_ItoC; + + // Check that we have the timestamp in our GT file [time(sec),q_GtoI,p_IinG,v_IinG,b_gyro,b_accel] + if (_sim == nullptr && (gt_states.empty() || !DatasetReader::get_gt_state(timestamp_inI, state_gt, gt_states))) { + return; + } + + // Get the simulated groundtruth + // NOTE: we get the true time in the IMU clock frame + if (_sim != nullptr) { + timestamp_inI = _app->get_state()->_timestamp + _sim->get_true_paramters().calib_camimu_dt; + if (!_sim->get_state(timestamp_inI, state_gt)) + return; + } + + // Get the GT and system state state + Eigen::Matrix state_ekf = _app->get_state()->_imu->value(); + + // Create pose of IMU + geometry_msgs::PoseStamped poseIinM; + poseIinM.header.stamp = ros::Time(timestamp_inI); + poseIinM.header.seq = poses_seq_gt; + poseIinM.header.frame_id = "world"; + poseIinM.pose.orientation.x = state_gt(1, 0); + poseIinM.pose.orientation.y = state_gt(2, 0); + poseIinM.pose.orientation.z = state_gt(3, 0); + poseIinM.pose.orientation.w = state_gt(4, 0); + poseIinM.pose.position.x = state_gt(5, 0); + poseIinM.pose.position.y = state_gt(6, 0); + poseIinM.pose.position.z = state_gt(7, 0); + pub_posegt.publish(poseIinM); + + // Append to our pose vector + poses_gt.push_back(poseIinM); + + // Create our path (imu) + // NOTE: We downsample the number of poses as needed to prevent rviz crashes + // NOTE: https://github.com/ros-visualization/rviz/issues/1107 + nav_msgs::Path arrIMU; + arrIMU.header.stamp = ros::Time::now(); + arrIMU.header.seq = poses_seq_gt; + arrIMU.header.frame_id = "world"; + for (size_t i = 0; i < poses_gt.size(); i += std::floor(poses_gt.size() / 16384.0) + 1) { + arrIMU.poses.push_back(poses_gt.at(i)); + } + pub_pathgt.publish(arrIMU); + + // Move them forward in time + poses_seq_gt++; + + // Publish our transform on TF + tf::StampedTransform trans; + trans.stamp_ = ros::Time::now(); + trans.frame_id_ = "world"; + trans.child_frame_id_ = "truth"; + tf::Quaternion quat(state_gt(1, 0), state_gt(2, 0), state_gt(3, 0), state_gt(4, 0)); + trans.setRotation(quat); + tf::Vector3 orig(state_gt(5, 0), state_gt(6, 0), state_gt(7, 0)); + trans.setOrigin(orig); + if (publish_global2imu_tf) { + mTfBr->sendTransform(trans); + } + + //========================================================================== + //========================================================================== + + // Difference between positions + double dx = state_ekf(4, 0) - state_gt(5, 0); + double dy = state_ekf(5, 0) - state_gt(6, 0); + double dz = state_ekf(6, 0) - state_gt(7, 0); + double rmse_pos = std::sqrt(dx * dx + dy * dy + dz * dz); + + // Quaternion error + Eigen::Matrix quat_gt, quat_st, quat_diff; + quat_gt << state_gt(1, 0), state_gt(2, 0), state_gt(3, 0), state_gt(4, 0); + quat_st << state_ekf(0, 0), state_ekf(1, 0), state_ekf(2, 0), state_ekf(3, 0); + quat_diff = quat_multiply(quat_st, Inv(quat_gt)); + double rmse_ori = (180 / M_PI) * 2 * quat_diff.block(0, 0, 3, 1).norm(); + + //========================================================================== + //========================================================================== + + // Get covariance of pose + std::vector> statevars; + statevars.push_back(_app->get_state()->_imu->q()); + statevars.push_back(_app->get_state()->_imu->p()); + Eigen::Matrix covariance = StateHelper::get_marginal_covariance(_app->get_state(), statevars); + + // Calculate NEES values + double ori_nees = 2 * quat_diff.block(0, 0, 3, 1).dot(covariance.block(0, 0, 3, 3).inverse() * 2 * quat_diff.block(0, 0, 3, 1)); + Eigen::Vector3d errpos = state_ekf.block(4, 0, 3, 1) - state_gt.block(5, 0, 3, 1); + double pos_nees = errpos.transpose() * covariance.block(3, 3, 3, 3).inverse() * errpos; + + //========================================================================== + //========================================================================== + + // Update our average variables + summed_rmse_ori += rmse_ori; + summed_rmse_pos += rmse_pos; + summed_nees_ori += ori_nees; + summed_nees_pos += pos_nees; + summed_number++; + + // Nice display for the user + printf(REDPURPLE "error to gt => %.3f, %.3f (deg,m) | average error => %.3f, %.3f (deg,m) | called %d times\n" RESET, rmse_ori, rmse_pos, + summed_rmse_ori / summed_number, summed_rmse_pos / summed_number, (int)summed_number); + printf(REDPURPLE "nees => %.1f, %.1f (ori,pos) | average nees = %.1f, %.1f (ori,pos)\n" RESET, ori_nees, pos_nees, + summed_nees_ori / summed_number, summed_nees_pos / summed_number); + + //========================================================================== + //========================================================================== +} + +void RosVisualizer::publish_loopclosure_information() { + + // Get the current tracks in this frame + double active_tracks_time1 = -1; + double active_tracks_time2 = -1; + std::unordered_map active_tracks_posinG; + std::unordered_map active_tracks_uvd; + cv::Mat active_cam0_image; + _app->get_active_tracks(active_tracks_time1, active_tracks_posinG, active_tracks_uvd); + _app->get_active_image(active_tracks_time2, active_cam0_image); + if (active_tracks_time1 == -1) + return; + if (_app->get_state()->_clones_IMU.find(active_tracks_time1) == _app->get_state()->_clones_IMU.end()) + return; + if (active_tracks_time1 != active_tracks_time2) + return; + + // Default header + std_msgs::Header header; + header.stamp = ros::Time(active_tracks_time1); + + //====================================================== + // Check if we have subscribers for the pose odometry, camera intrinsics, or extrinsics + if (pub_loop_pose.getNumSubscribers() != 0 || pub_loop_extrinsic.getNumSubscribers() != 0 || + pub_loop_intrinsics.getNumSubscribers() != 0) { + + // PUBLISH HISTORICAL POSE ESTIMATE + nav_msgs::Odometry odometry_pose; + odometry_pose.header = header; + odometry_pose.header.frame_id = "world"; + odometry_pose.pose.pose.position.x = _app->get_state()->_clones_IMU.at(active_tracks_time1)->pos()(0); + odometry_pose.pose.pose.position.y = _app->get_state()->_clones_IMU.at(active_tracks_time1)->pos()(1); + odometry_pose.pose.pose.position.z = _app->get_state()->_clones_IMU.at(active_tracks_time1)->pos()(2); + odometry_pose.pose.pose.orientation.x = _app->get_state()->_clones_IMU.at(active_tracks_time1)->quat()(0); + odometry_pose.pose.pose.orientation.y = _app->get_state()->_clones_IMU.at(active_tracks_time1)->quat()(1); + odometry_pose.pose.pose.orientation.z = _app->get_state()->_clones_IMU.at(active_tracks_time1)->quat()(2); + odometry_pose.pose.pose.orientation.w = _app->get_state()->_clones_IMU.at(active_tracks_time1)->quat()(3); + pub_loop_pose.publish(odometry_pose); + + // PUBLISH IMU TO CAMERA0 EXTRINSIC + // need to flip the transform to the IMU frame + Eigen::Vector4d q_ItoC = _app->get_state()->_calib_IMUtoCAM.at(0)->quat(); + Eigen::Vector3d p_CinI = -_app->get_state()->_calib_IMUtoCAM.at(0)->Rot().transpose() * _app->get_state()->_calib_IMUtoCAM.at(0)->pos(); + nav_msgs::Odometry odometry_calib; + odometry_calib.header = header; + odometry_calib.header.frame_id = "imu"; + odometry_calib.pose.pose.position.x = p_CinI(0); + odometry_calib.pose.pose.position.y = p_CinI(1); + odometry_calib.pose.pose.position.z = p_CinI(2); + odometry_calib.pose.pose.orientation.x = q_ItoC(0); + odometry_calib.pose.pose.orientation.y = q_ItoC(1); + odometry_calib.pose.pose.orientation.z = q_ItoC(2); + odometry_calib.pose.pose.orientation.w = q_ItoC(3); + pub_loop_extrinsic.publish(odometry_calib); + + // PUBLISH CAMERA0 INTRINSICS + sensor_msgs::CameraInfo cameraparams; + cameraparams.header = header; + cameraparams.header.frame_id = "imu"; + cameraparams.distortion_model = (_app->get_state()->_cam_intrinsics_model.at(0)) ? "equidistant" : "plumb_bob"; + Eigen::VectorXd cparams = _app->get_state()->_cam_intrinsics.at(0)->value(); + cameraparams.D = {cparams(4), cparams(5), cparams(6), cparams(7)}; + cameraparams.K = {cparams(0), 0, cparams(2), 0, cparams(1), cparams(3), 0, 0, 1}; + pub_loop_intrinsics.publish(cameraparams); + } + + //====================================================== + // PUBLISH FEATURE TRACKS IN THE GLOBAL FRAME OF REFERENCE + if (pub_loop_point.getNumSubscribers() != 0) { + + // Construct the message + sensor_msgs::PointCloud point_cloud; + point_cloud.header = header; + point_cloud.header.frame_id = "world"; + for (const auto &feattimes : active_tracks_posinG) { + + // Get this feature information + size_t featid = feattimes.first; + Eigen::Vector3d uvd = Eigen::Vector3d::Zero(); + if (active_tracks_uvd.find(featid) != active_tracks_uvd.end()) { + uvd = active_tracks_uvd.at(featid); + } + Eigen::Vector3d pFinG = active_tracks_posinG.at(featid); + + // Push back 3d point + geometry_msgs::Point32 p; + p.x = pFinG(0); + p.y = pFinG(1); + p.z = pFinG(2); + point_cloud.points.push_back(p); + + // Push back the uv_norm, uv_raw, and feature id + // NOTE: we don't use the normalized coordinates to save time here + // NOTE: they will have to be re-normalized in the loop closure code + sensor_msgs::ChannelFloat32 p_2d; + p_2d.values.push_back(0); + p_2d.values.push_back(0); + p_2d.values.push_back(uvd(0)); + p_2d.values.push_back(uvd(1)); + p_2d.values.push_back(featid); + point_cloud.channels.push_back(p_2d); + } + pub_loop_point.publish(point_cloud); + } + + //====================================================== + // Depth images of sparse points and its colorized version + if (pub_loop_img_depth.getNumSubscribers() != 0 || pub_loop_img_depth_color.getNumSubscribers() != 0) { + + // Create the images we will populate with the depths + std::pair wh_pair = {active_cam0_image.cols, active_cam0_image.rows}; + cv::Mat depthmap_viz; + cv::cvtColor(active_cam0_image, depthmap_viz, cv::COLOR_GRAY2RGB); + cv::Mat depthmap = cv::Mat::zeros(wh_pair.second, wh_pair.first, CV_16UC1); + + // Loop through all points and append + for (const auto &feattimes : active_tracks_uvd) { + + // Get this feature information + size_t featid = feattimes.first; + Eigen::Vector3d uvd = active_tracks_uvd.at(featid); + + // Skip invalid points + double dw = 3; + if (uvd(0) < dw || uvd(0) > wh_pair.first - dw || uvd(1) < dw || uvd(1) > wh_pair.second - dw) { + continue; + } + + // Append the depth + // NOTE: scaled by 1000 to fit the 16U + // NOTE: access order is y,x (stupid opencv convention stuff) + depthmap.at((int)uvd(1), (int)uvd(0)) = (uint16_t)(1000 * uvd(2)); + + // Taken from LSD-SLAM codebase segment into 0-4 meter segments: + // https://github.com/tum-vision/lsd_slam/blob/d1e6f0e1a027889985d2e6b4c0fe7a90b0c75067/lsd_slam_core/src/util/globalFuncs.cpp#L87-L96 + float id = 1.0f / (float)uvd(2); + float r = (0.0f - id) * 255 / 1.0f; + if (r < 0) + r = -r; + float g = (1.0f - id) * 255 / 1.0f; + if (g < 0) + g = -g; + float b = (2.0f - id) * 255 / 1.0f; + if (b < 0) + b = -b; + uchar rc = r < 0 ? 0 : (r > 255 ? 255 : r); + uchar gc = g < 0 ? 0 : (g > 255 ? 255 : g); + uchar bc = b < 0 ? 0 : (b > 255 ? 255 : b); + cv::Scalar color(255 - rc, 255 - gc, 255 - bc); + + // Small square around the point (note the above bound check needs to take into account this width) + cv::Point p0(uvd(0) - dw, uvd(1) - dw); + cv::Point p1(uvd(0) + dw, uvd(1) + dw); + cv::rectangle(depthmap_viz, p0, p1, color, -1); + } + + // Create our messages + sensor_msgs::ImagePtr exl_msg1 = cv_bridge::CvImage(header, sensor_msgs::image_encodings::TYPE_16UC1, depthmap).toImageMsg(); + pub_loop_img_depth.publish(exl_msg1); + sensor_msgs::ImagePtr exl_msg2 = cv_bridge::CvImage(header, "bgr8", depthmap_viz).toImageMsg(); + pub_loop_img_depth_color.publish(exl_msg2); + } +} + +void RosVisualizer::sim_save_total_state_to_file() { + + // Get current state + std::shared_ptr state = _app->get_state(); + + // We want to publish in the IMU clock frame + // The timestamp in the state will be the last camera time + double t_ItoC = state->_calib_dt_CAMtoIMU->value()(0); + double timestamp_inI = state->_timestamp + t_ItoC; + + // If we have our simulator, then save it to our groundtruth file + if (_sim != nullptr) { + + // Note that we get the true time in the IMU clock frame + // NOTE: we record both the estimate and groundtruth with the same "true" timestamp if we are doing simulation + Eigen::Matrix state_gt; + timestamp_inI = state->_timestamp + _sim->get_true_paramters().calib_camimu_dt; + if (_sim->get_state(timestamp_inI, state_gt)) { + // STATE: write current true state + of_state_gt.precision(5); + of_state_gt.setf(std::ios::fixed, std::ios::floatfield); + of_state_gt << state_gt(0) << " "; + of_state_gt.precision(6); + of_state_gt << state_gt(1) << " " << state_gt(2) << " " << state_gt(3) << " " << state_gt(4) << " "; + of_state_gt << state_gt(5) << " " << state_gt(6) << " " << state_gt(7) << " "; + of_state_gt << state_gt(8) << " " << state_gt(9) << " " << state_gt(10) << " "; + of_state_gt << state_gt(11) << " " << state_gt(12) << " " << state_gt(13) << " "; + of_state_gt << state_gt(14) << " " << state_gt(15) << " " << state_gt(16) << " "; + + // TIMEOFF: Get the current true time offset + of_state_gt.precision(7); + of_state_gt << _sim->get_true_paramters().calib_camimu_dt << " "; + of_state_gt.precision(0); + of_state_gt << state->_options.num_cameras << " "; + of_state_gt.precision(6); + + // CALIBRATION: Write the camera values to file + assert(state->_options.num_cameras == _sim->get_true_paramters().state_options.num_cameras); + for (int i = 0; i < state->_options.num_cameras; i++) { + // Intrinsics values + of_state_gt << _sim->get_true_paramters().camera_intrinsics.at(i)(0) << " " << _sim->get_true_paramters().camera_intrinsics.at(i)(1) + << " " << _sim->get_true_paramters().camera_intrinsics.at(i)(2) << " " + << _sim->get_true_paramters().camera_intrinsics.at(i)(3) << " "; + of_state_gt << _sim->get_true_paramters().camera_intrinsics.at(i)(4) << " " << _sim->get_true_paramters().camera_intrinsics.at(i)(5) + << " " << _sim->get_true_paramters().camera_intrinsics.at(i)(6) << " " + << _sim->get_true_paramters().camera_intrinsics.at(i)(7) << " "; + // Rotation and position + of_state_gt << _sim->get_true_paramters().camera_extrinsics.at(i)(0) << " " << _sim->get_true_paramters().camera_extrinsics.at(i)(1) + << " " << _sim->get_true_paramters().camera_extrinsics.at(i)(2) << " " + << _sim->get_true_paramters().camera_extrinsics.at(i)(3) << " "; + of_state_gt << _sim->get_true_paramters().camera_extrinsics.at(i)(4) << " " << _sim->get_true_paramters().camera_extrinsics.at(i)(5) + << " " << _sim->get_true_paramters().camera_extrinsics.at(i)(6) << " "; + } + + // New line + of_state_gt << endl; + } + } + + //========================================================================== + //========================================================================== + //========================================================================== + + // Get the covariance of the whole system + Eigen::MatrixXd cov = StateHelper::get_full_covariance(state); + + // STATE: Write the current state to file + of_state_est.precision(5); + of_state_est.setf(std::ios::fixed, std::ios::floatfield); + of_state_est << timestamp_inI << " "; + of_state_est.precision(6); + of_state_est << state->_imu->quat()(0) << " " << state->_imu->quat()(1) << " " << state->_imu->quat()(2) << " " << state->_imu->quat()(3) + << " "; + of_state_est << state->_imu->pos()(0) << " " << state->_imu->pos()(1) << " " << state->_imu->pos()(2) << " "; + of_state_est << state->_imu->vel()(0) << " " << state->_imu->vel()(1) << " " << state->_imu->vel()(2) << " "; + of_state_est << state->_imu->bias_g()(0) << " " << state->_imu->bias_g()(1) << " " << state->_imu->bias_g()(2) << " "; + of_state_est << state->_imu->bias_a()(0) << " " << state->_imu->bias_a()(1) << " " << state->_imu->bias_a()(2) << " "; + + // STATE: Write current uncertainty to file + of_state_std.precision(5); + of_state_std.setf(std::ios::fixed, std::ios::floatfield); + of_state_std << timestamp_inI << " "; + of_state_std.precision(6); + int id = state->_imu->q()->id(); + of_state_std << std::sqrt(cov(id + 0, id + 0)) << " " << std::sqrt(cov(id + 1, id + 1)) << " " << std::sqrt(cov(id + 2, id + 2)) << " "; + id = state->_imu->p()->id(); + of_state_std << std::sqrt(cov(id + 0, id + 0)) << " " << std::sqrt(cov(id + 1, id + 1)) << " " << std::sqrt(cov(id + 2, id + 2)) << " "; + id = state->_imu->v()->id(); + of_state_std << std::sqrt(cov(id + 0, id + 0)) << " " << std::sqrt(cov(id + 1, id + 1)) << " " << std::sqrt(cov(id + 2, id + 2)) << " "; + id = state->_imu->bg()->id(); + of_state_std << std::sqrt(cov(id + 0, id + 0)) << " " << std::sqrt(cov(id + 1, id + 1)) << " " << std::sqrt(cov(id + 2, id + 2)) << " "; + id = state->_imu->ba()->id(); + of_state_std << std::sqrt(cov(id + 0, id + 0)) << " " << std::sqrt(cov(id + 1, id + 1)) << " " << std::sqrt(cov(id + 2, id + 2)) << " "; + + // TIMEOFF: Get the current estimate time offset + of_state_est.precision(7); + of_state_est << state->_calib_dt_CAMtoIMU->value()(0) << " "; + of_state_est.precision(0); + of_state_est << state->_options.num_cameras << " "; + of_state_est.precision(6); + + // TIMEOFF: Get the current std values + if (state->_options.do_calib_camera_timeoffset) { + of_state_std << std::sqrt(cov(state->_calib_dt_CAMtoIMU->id(), state->_calib_dt_CAMtoIMU->id())) << " "; + } else { + of_state_std << 0.0 << " "; + } + of_state_std.precision(0); + of_state_std << state->_options.num_cameras << " "; + of_state_std.precision(6); + + // CALIBRATION: Write the camera values to file + for (int i = 0; i < state->_options.num_cameras; i++) { + // Intrinsics values + of_state_est << state->_cam_intrinsics.at(i)->value()(0) << " " << state->_cam_intrinsics.at(i)->value()(1) << " " + << state->_cam_intrinsics.at(i)->value()(2) << " " << state->_cam_intrinsics.at(i)->value()(3) << " "; + of_state_est << state->_cam_intrinsics.at(i)->value()(4) << " " << state->_cam_intrinsics.at(i)->value()(5) << " " + << state->_cam_intrinsics.at(i)->value()(6) << " " << state->_cam_intrinsics.at(i)->value()(7) << " "; + // Rotation and position + of_state_est << state->_calib_IMUtoCAM.at(i)->value()(0) << " " << state->_calib_IMUtoCAM.at(i)->value()(1) << " " + << state->_calib_IMUtoCAM.at(i)->value()(2) << " " << state->_calib_IMUtoCAM.at(i)->value()(3) << " "; + of_state_est << state->_calib_IMUtoCAM.at(i)->value()(4) << " " << state->_calib_IMUtoCAM.at(i)->value()(5) << " " + << state->_calib_IMUtoCAM.at(i)->value()(6) << " "; + // Covariance + if (state->_options.do_calib_camera_intrinsics) { + int index_in = state->_cam_intrinsics.at(i)->id(); + of_state_std << std::sqrt(cov(index_in + 0, index_in + 0)) << " " << std::sqrt(cov(index_in + 1, index_in + 1)) << " " + << std::sqrt(cov(index_in + 2, index_in + 2)) << " " << std::sqrt(cov(index_in + 3, index_in + 3)) << " "; + of_state_std << std::sqrt(cov(index_in + 4, index_in + 4)) << " " << std::sqrt(cov(index_in + 5, index_in + 5)) << " " + << std::sqrt(cov(index_in + 6, index_in + 6)) << " " << std::sqrt(cov(index_in + 7, index_in + 7)) << " "; + } else { + of_state_std << 0.0 << " " << 0.0 << " " << 0.0 << " " << 0.0 << " "; + of_state_std << 0.0 << " " << 0.0 << " " << 0.0 << " " << 0.0 << " "; + } + if (state->_options.do_calib_camera_pose) { + int index_ex = state->_calib_IMUtoCAM.at(i)->id(); + of_state_std << std::sqrt(cov(index_ex + 0, index_ex + 0)) << " " << std::sqrt(cov(index_ex + 1, index_ex + 1)) << " " + << std::sqrt(cov(index_ex + 2, index_ex + 2)) << " "; + of_state_std << std::sqrt(cov(index_ex + 3, index_ex + 3)) << " " << std::sqrt(cov(index_ex + 4, index_ex + 4)) << " " + << std::sqrt(cov(index_ex + 5, index_ex + 5)) << " "; + } else { + of_state_std << 0.0 << " " << 0.0 << " " << 0.0 << " "; + of_state_std << 0.0 << " " << 0.0 << " " << 0.0 << " "; + } + } + + // Done with the estimates! + of_state_est << endl; + of_state_std << endl; +} diff --git a/ov_msckf/src/fisheye_mask_752x480.jpg b/ov_msckf/src/fisheye_mask_752x480.jpg new file mode 100644 index 000000000..5caf51f55 Binary files /dev/null and b/ov_msckf/src/fisheye_mask_752x480.jpg differ diff --git a/ov_msckf/src/ros/ROS1Visualizer.cpp b/ov_msckf/src/ros/ROS1Visualizer.cpp index 6e1cf127d..c9b8003f3 100644 --- a/ov_msckf/src/ros/ROS1Visualizer.cpp +++ b/ov_msckf/src/ros/ROS1Visualizer.cpp @@ -156,7 +156,7 @@ void ROS1Visualizer::setup_subscribers(std::shared_ptr pars // Create imu subscriber (handle legacy ros param info) std::string topic_imu; _nh->param("topic_imu", topic_imu, "/imu0"); - parser->parse_external("relative_config_imu", "imu0", "rostopic", topic_imu); + /* parser->parse_external("relative_config_imu", "imu0", "rostopic", topic_imu); */ sub_imu = _nh->subscribe(topic_imu, 1000, &ROS1Visualizer::callback_inertial, this); PRINT_INFO("subscribing to IMU: %s\n", topic_imu.c_str()); @@ -167,8 +167,8 @@ void ROS1Visualizer::setup_subscribers(std::shared_ptr pars std::string cam_topic0, cam_topic1; _nh->param("topic_camera" + std::to_string(0), cam_topic0, "/cam" + std::to_string(0) + "/image_raw"); _nh->param("topic_camera" + std::to_string(1), cam_topic1, "/cam" + std::to_string(1) + "/image_raw"); - parser->parse_external("relative_config_imucam", "cam" + std::to_string(0), "rostopic", cam_topic0); - parser->parse_external("relative_config_imucam", "cam" + std::to_string(1), "rostopic", cam_topic1); + /* parser->parse_external("relative_config_imucam", "cam" + std::to_string(0), "rostopic", cam_topic0); */ + /* parser->parse_external("relative_config_imucam", "cam" + std::to_string(1), "rostopic", cam_topic1); */ // Create sync filter (they have unique pointers internally, so we have to use move logic here...) auto image_sub0 = std::make_shared>(*_nh, cam_topic0, 1); auto image_sub1 = std::make_shared>(*_nh, cam_topic1, 1); @@ -186,7 +186,7 @@ void ROS1Visualizer::setup_subscribers(std::shared_ptr pars // read in the topic std::string cam_topic; _nh->param("topic_camera" + std::to_string(i), cam_topic, "/cam" + std::to_string(i) + "/image_raw"); - parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "rostopic", cam_topic); + /* parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "rostopic", cam_topic); */ // create subscriber subs_cam.push_back(_nh->subscribe(cam_topic, 10, boost::bind(&ROS1Visualizer::callback_monocular, this, _1, i))); PRINT_INFO("subscribing to cam (mono): %s\n", cam_topic.c_str()); @@ -332,10 +332,12 @@ void ROS1Visualizer::visualize_odometry(double timestamp) { auto odom_pose = std::make_shared(); odom_pose->set_value(state_plus.block(0, 0, 7, 1)); tf::StampedTransform trans = ROSVisualizerHelper::get_stamped_transform_from_pose(odom_pose, false); - trans.frame_id_ = "global"; - trans.child_frame_id_ = "imu"; + /* trans.frame_id_ = "global"; */ + /* trans.child_frame_id_ = "imu"; */ + tf::Transform trans_inv = trans.inverse(); + tf::StampedTransform tf_temp(trans_inv, trans.stamp_, "imu", "global"); if (publish_global2imu_tf) { - mTfBr->sendTransform(trans); + mTfBr->sendTransform(tf_temp); } // Loop through each camera calibration and publish it diff --git a/ov_msckf/src/ros_subscribe_msckf.cpp b/ov_msckf/src/ros_subscribe_msckf.cpp new file mode 100644 index 000000000..37f0f2cd6 --- /dev/null +++ b/ov_msckf/src/ros_subscribe_msckf.cpp @@ -0,0 +1,204 @@ +/* + * OpenVINS: An Open Platform for Visual-Inertial Research + * Copyright (C) 2021 Patrick Geneva + * Copyright (C) 2021 Guoquan Huang + * Copyright (C) 2021 OpenVINS Contributors + * Copyright (C) 2019 Kevin Eckenhoff + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "core/RosVisualizer.h" +#include "core/VioManager.h" +#include "core/VioManagerOptions.h" +#include "utils/dataset_reader.h" +#include "utils/parse_ros.h" +#include "utils/sensor_data.h" + +using namespace ov_msckf; + +std::shared_ptr sys; +std::shared_ptr viz; + +// Callback functions +void callback_inertial(const sensor_msgs::Imu::ConstPtr &msg); +void callback_monocular(const sensor_msgs::ImageConstPtr &msg0, int cam_id0, const cv::Mat mask_image); +void callback_stereo(const sensor_msgs::ImageConstPtr &msg0, const sensor_msgs::ImageConstPtr &msg1, int cam_id0, int cam_id1); + +// Main function +int main(int argc, char **argv) { + cv::Mat mask_image = cv::imread("/home/mrs/fisheye_mask_752x480.jpg",0); + + // Launch our ros node + ros::init(argc, argv, "run_subscribe_msckf"); + ros::NodeHandle nh("~"); + + // Create our VIO system + VioManagerOptions params = parse_ros_nodehandler(nh); + sys = std::make_shared(params); + viz = std::make_shared(nh, sys); + + //=================================================================================== + //=================================================================================== + //=================================================================================== + + // Create imu subscriber + std::string topic_imu; + nh.param("topic_imu", topic_imu, "/imu0"); + ros::Subscriber subimu = nh.subscribe(topic_imu, 9999, callback_inertial); + + // Create camera subscriber data vectors + std::vector added_cam_ids; + std::vector subs_cam; + typedef message_filters::sync_policies::ApproximateTime sync_pol; + std::vector>> sync_cam; + std::vector>> sync_subs_cam; + + // Logic for sync stereo subscriber + // https://answers.ros.org/question/96346/subscribe-to-two-image_raws-with-one-function/?answer=96491#post-id-96491 + for (const auto &pair : params.stereo_pairs) { + // Read in the topics + std::string cam_topic0, cam_topic1; + nh.param("topic_camera" + std::to_string(pair.first), cam_topic0, "/cam" + std::to_string(pair.first) + "/image_raw"); + nh.param("topic_camera" + std::to_string(pair.second), cam_topic1, "/cam" + std::to_string(pair.second) + "/image_raw"); + // Create sync filter (they have unique pointers internally, so we have to use move logic here...) + auto image_sub0 = std::unique_ptr>( + new message_filters::Subscriber(nh, cam_topic0, 5)); + auto image_sub1 = std::unique_ptr>( + new message_filters::Subscriber(nh, cam_topic1, 5)); + auto sync = std::unique_ptr>( + new message_filters::Synchronizer(sync_pol(5), *image_sub0, *image_sub1)); + sync->registerCallback(boost::bind(&callback_stereo, _1, _2, pair.first, pair.second)); + // Append to our vector of subscribers + added_cam_ids.push_back(pair.first); + added_cam_ids.push_back(pair.second); + sync_cam.push_back(std::move(sync)); + sync_subs_cam.push_back(std::move(image_sub0)); + sync_subs_cam.push_back(std::move(image_sub1)); + ROS_INFO("subscribing to cam (stereo): %s", cam_topic0.c_str()); + ROS_INFO("subscribing to cam (stereo): %s", cam_topic1.c_str()); + } + + // Now we should add any non-stereo callbacks here + for (int i = 0; i < params.state_options.num_cameras; i++) { + // Skip if already have been added + if (std::find(added_cam_ids.begin(), added_cam_ids.end(), i) != added_cam_ids.end()) + continue; + // read in the topic + std::string cam_topic; + nh.param("topic_camera" + std::to_string(i), cam_topic, "/cam" + std::to_string(i) + "/image_raw"); + // create subscriber + subs_cam.push_back(nh.subscribe(cam_topic, 5, boost::bind(callback_monocular, _1, i, mask_image))); + ROS_INFO("subscribing to cam (mono): %s", cam_topic.c_str()); + } + + //=================================================================================== + //=================================================================================== + //=================================================================================== + + // Spin off to ROS + // TODO: maybe should use multi-thread spinner + // TODO: but need to support multi-threaded calls to viomanager + ROS_INFO("done...spinning to ros"); + ros::spin(); + + // Final visualization + viz->visualize_final(); + + // Done! + return EXIT_SUCCESS; +} + +void callback_inertial(const sensor_msgs::Imu::ConstPtr &msg) { + + // convert into correct format + ov_core::ImuData message; + message.timestamp = msg->header.stamp.toSec(); + message.wm << msg->angular_velocity.x, msg->angular_velocity.y, msg->angular_velocity.z; + message.am << msg->linear_acceleration.x, msg->linear_acceleration.y, msg->linear_acceleration.z; + + // send it to our VIO system + sys->feed_measurement_imu(message); + viz->visualize(); + viz->visualize_odometry(message.timestamp); +} + +void callback_monocular(const sensor_msgs::ImageConstPtr &msg0, int cam_id0, const cv::Mat mask_image) { + + // Get the image + cv_bridge::CvImageConstPtr cv_ptr; + try { + cv_ptr = cv_bridge::toCvShare(msg0, sensor_msgs::image_encodings::MONO8); + } catch (cv_bridge::Exception &e) { + ROS_ERROR("cv_bridge exception: %s", e.what()); + return; + } + + // Create the measurement + ov_core::CameraData message; + message.timestamp = cv_ptr->header.stamp.toSec(); + message.sensor_ids.push_back(cam_id0); + cv::Mat masked_image; + cv_ptr->image.copyTo(masked_image, mask_image); + message.images.push_back(masked_image.clone()); + + // send it to our VIO system + sys->feed_measurement_camera(message); +} + +void callback_stereo(const sensor_msgs::ImageConstPtr &msg0, const sensor_msgs::ImageConstPtr &msg1, int cam_id0, int cam_id1) { + + // Get the image + cv_bridge::CvImageConstPtr cv_ptr0; + try { + cv_ptr0 = cv_bridge::toCvShare(msg0, sensor_msgs::image_encodings::MONO8); + } catch (cv_bridge::Exception &e) { + ROS_ERROR("cv_bridge exception: %s", e.what()); + return; + } + + // Get the image + cv_bridge::CvImageConstPtr cv_ptr1; + try { + cv_ptr1 = cv_bridge::toCvShare(msg1, sensor_msgs::image_encodings::MONO8); + } catch (cv_bridge::Exception &e) { + ROS_ERROR("cv_bridge exception: %s", e.what()); + return; + } + + // Create the measurement + ov_core::CameraData message; + message.timestamp = cv_ptr0->header.stamp.toSec(); + message.sensor_ids.push_back(cam_id0); + message.sensor_ids.push_back(cam_id1); + message.images.push_back(cv_ptr0->image.clone()); + message.images.push_back(cv_ptr1->image.clone()); + + // send it to our VIO system + sys->feed_measurement_camera(message); +} diff --git a/ov_msckf/src/state/State.h b/ov_msckf/src/state/State.h index f83dee5ce..80b755b71 100644 --- a/ov_msckf/src/state/State.h +++ b/ov_msckf/src/state/State.h @@ -194,4 +194,4 @@ class State { } // namespace ov_msckf -#endif // OV_MSCKF_STATE_H \ No newline at end of file +#endif // OV_MSCKF_STATE_H diff --git a/ov_msckf/src/state/StateHelper.cpp b/ov_msckf/src/state/StateHelper.cpp index 0c89c5610..4bc955f1b 100644 --- a/ov_msckf/src/state/StateHelper.cpp +++ b/ov_msckf/src/state/StateHelper.cpp @@ -642,4 +642,4 @@ void StateHelper::marginalize_slam(std::shared_ptr state) { it0++; } } -} \ No newline at end of file +} diff --git a/ov_repub/CMakeLists.txt b/ov_repub/CMakeLists.txt new file mode 100644 index 000000000..411688a2c --- /dev/null +++ b/ov_repub/CMakeLists.txt @@ -0,0 +1,205 @@ +cmake_minimum_required(VERSION 3.0.2) +project(ov_repub) + +## Compile as C++11, supported in ROS Kinetic and newer +# add_compile_options(-std=c++11) + +## Find catkin macros and libraries +## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) +## is used, also find other catkin packages +find_package(catkin REQUIRED COMPONENTS + roscpp + rospy +) + +## System dependencies are found with CMake's conventions +# find_package(Boost REQUIRED COMPONENTS system) + + +## Uncomment this if the package has a setup.py. This macro ensures +## modules and global scripts declared therein get installed +## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html +# catkin_python_setup() + +################################################ +## Declare ROS messages, services and actions ## +################################################ + +## To declare and build messages, services or actions from within this +## package, follow these steps: +## * Let MSG_DEP_SET be the set of packages whose message types you use in +## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). +## * In the file package.xml: +## * add a build_depend tag for "message_generation" +## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET +## * If MSG_DEP_SET isn't empty the following dependency has been pulled in +## but can be declared for certainty nonetheless: +## * add a exec_depend tag for "message_runtime" +## * In this file (CMakeLists.txt): +## * add "message_generation" and every package in MSG_DEP_SET to +## find_package(catkin REQUIRED COMPONENTS ...) +## * add "message_runtime" and every package in MSG_DEP_SET to +## catkin_package(CATKIN_DEPENDS ...) +## * uncomment the add_*_files sections below as needed +## and list every .msg/.srv/.action file to be processed +## * uncomment the generate_messages entry below +## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) + +## Generate messages in the 'msg' folder +# add_message_files( +# FILES +# Message1.msg +# Message2.msg +# ) + +## Generate services in the 'srv' folder +# add_service_files( +# FILES +# Service1.srv +# Service2.srv +# ) + +## Generate actions in the 'action' folder +# add_action_files( +# FILES +# Action1.action +# Action2.action +# ) + +## Generate added messages and services with any dependencies listed here +# generate_messages( +# DEPENDENCIES +# std_msgs # Or other packages containing msgs +# ) + +################################################ +## Declare ROS dynamic reconfigure parameters ## +################################################ + +## To declare and build dynamic reconfigure parameters within this +## package, follow these steps: +## * In the file package.xml: +## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" +## * In this file (CMakeLists.txt): +## * add "dynamic_reconfigure" to +## find_package(catkin REQUIRED COMPONENTS ...) +## * uncomment the "generate_dynamic_reconfigure_options" section below +## and list every .cfg file to be processed + +## Generate dynamic reconfigure parameters in the 'cfg' folder +# generate_dynamic_reconfigure_options( +# cfg/DynReconf1.cfg +# cfg/DynReconf2.cfg +# ) + +################################### +## catkin specific configuration ## +################################### +## The catkin_package macro generates cmake config files for your package +## Declare things to be passed to dependent projects +## INCLUDE_DIRS: uncomment this if your package contains header files +## LIBRARIES: libraries you create in this project that dependent projects also need +## CATKIN_DEPENDS: catkin_packages dependent projects also need +## DEPENDS: system dependencies of this project that dependent projects also need +catkin_package( +# INCLUDE_DIRS include +# LIBRARIES ov_repub +# CATKIN_DEPENDS roscpp rospy +# DEPENDS system_lib +) + +########### +## Build ## +########### + +## Specify additional locations of header files +## Your package locations should be listed before other locations +include_directories( +# include + ${catkin_INCLUDE_DIRS} +) + +## Declare a C++ library +# add_library(${PROJECT_NAME} +# src/${PROJECT_NAME}/ov_repub.cpp +# ) + +## Add cmake target dependencies of the library +## as an example, code may need to be generated before libraries +## either from message generation or dynamic reconfigure +# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Declare a C++ executable +## With catkin_make all packages are built within a single CMake context +## The recommended prefix ensures that target names across packages don't collide +# add_executable(${PROJECT_NAME}_node src/ov_repub_node.cpp) + +## Rename C++ executable without prefix +## The above recommended prefix causes long target names, the following renames the +## target back to the shorter version for ease of user use +## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" +# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") + +## Add cmake target dependencies of the executable +## same as for the library above +# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Specify libraries to link a library or executable target against +# target_link_libraries(${PROJECT_NAME}_node +# ${catkin_LIBRARIES} +# ) + +############# +## Install ## +############# + +# all install targets should use catkin DESTINATION variables +# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html + +## Mark executable scripts (Python etc.) for installation +## in contrast to setup.py, you can choose the destination +# catkin_install_python(PROGRAMS +# scripts/my_python_script +# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark executables for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html +# install(TARGETS ${PROJECT_NAME}_node +# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark libraries for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html +# install(TARGETS ${PROJECT_NAME} +# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} +# ) + +## Mark cpp header files for installation +# install(DIRECTORY include/${PROJECT_NAME}/ +# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} +# FILES_MATCHING PATTERN "*.h" +# PATTERN ".svn" EXCLUDE +# ) + +## Mark other files for installation (e.g. launch and bag files, etc.) +# install(FILES +# # myfile1 +# # myfile2 +# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +# ) + +############# +## Testing ## +############# + +## Add gtest based cpp test target and link libraries +# catkin_add_gtest(${PROJECT_NAME}-test test/test_ov_repub.cpp) +# if(TARGET ${PROJECT_NAME}-test) +# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) +# endif() + +## Add folders to be run by python nosetests +# catkin_add_nosetests(test) diff --git a/ov_repub/launch/bluefox.launch b/ov_repub/launch/bluefox.launch new file mode 100644 index 000000000..82c5ac3b8 --- /dev/null +++ b/ov_repub/launch/bluefox.launch @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/ov_repub/launch/simulation_bluefox.launch b/ov_repub/launch/simulation_bluefox.launch new file mode 100644 index 000000000..9163ed284 --- /dev/null +++ b/ov_repub/launch/simulation_bluefox.launch @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/ov_repub/launch/t265.launch b/ov_repub/launch/t265.launch new file mode 100644 index 000000000..2d366d399 --- /dev/null +++ b/ov_repub/launch/t265.launch @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/ov_repub/package.xml b/ov_repub/package.xml new file mode 100644 index 000000000..af1e51ea4 --- /dev/null +++ b/ov_repub/package.xml @@ -0,0 +1,65 @@ + + + ov_repub + 0.0.0 + The ov_repub package + + + + + rakshith + + + + + + TODO + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + catkin + roscpp + rospy + roscpp + rospy + roscpp + rospy + + + + + + + +