From 3a62f35e22340e591ee35f60b23b277f03036892 Mon Sep 17 00:00:00 2001
From: raphaelchang <raphaelchang1227@gmail.com>
Date: Wed, 18 Sep 2019 20:44:11 -0400
Subject: [PATCH] working stereo

---
 CMakeLists.txt                            |  28 ++++-
 launch/stereo_eval.launch                 |  33 ++++++
 src/data/feature.h                        |   5 +-
 src/data/frame.cc                         | 115 ++++++++++++++++++++-
 src/data/frame.h                          |  15 ++-
 src/data/landmark.cc                      |  26 ++++-
 src/data/landmark.h                       |   5 +
 src/feature/tracker.cc                    |  42 ++++----
 src/module/odometry_module.cc             |   8 ++
 src/module/reconstruction_module.cc       |   5 +
 src/module/stereo_module.cc               |  80 +++++++++++++++
 src/module/stereo_module.h                |  61 +++++++++++
 src/module/tracking_module.cc             |  28 ++---
 src/odometry/five_point.cc                |   4 +
 src/odometry/pnp.cc                       |   4 +
 src/omni_slam_odometry_eval_node.cc       |   2 +-
 src/omni_slam_reconstruction_eval_node.cc |   2 +-
 src/omni_slam_stereo_eval_node.cc         |  12 +++
 src/omni_slam_tracking_eval_node.cc       |   2 +-
 src/ros/eval_base.cc                      | 118 +++++++++++++++++++---
 src/ros/eval_base.h                       |   9 +-
 src/ros/matching_eval.cc                  |   4 +-
 src/ros/matching_eval.h                   |   2 +-
 src/ros/odometry_eval.cc                  |  78 +++++++-------
 src/ros/odometry_eval.h                   |  19 ++--
 src/ros/reconstruction_eval.cc            |  51 ++++++----
 src/ros/reconstruction_eval.h             |  20 ++--
 src/ros/stereo_eval.cc                    |  60 +++++++++++
 src/ros/stereo_eval.h                     |  38 +++++++
 src/ros/tracking_eval.cc                  |  44 ++++----
 src/ros/tracking_eval.h                   |   3 +-
 src/stereo/lk_stereo_matcher.cc           |  45 +++++++++
 src/stereo/lk_stereo_matcher.h            |  28 +++++
 src/stereo/stereo_matcher.cc              |  91 +++++++++++++++++
 src/stereo/stereo_matcher.h               |  31 ++++++
 src/util/tf_util.h                        |  21 ++++
 36 files changed, 982 insertions(+), 157 deletions(-)
 create mode 100644 launch/stereo_eval.launch
 create mode 100644 src/module/stereo_module.cc
 create mode 100644 src/module/stereo_module.h
 create mode 100644 src/omni_slam_stereo_eval_node.cc
 create mode 100644 src/ros/stereo_eval.cc
 create mode 100644 src/ros/stereo_eval.h
 create mode 100644 src/stereo/lk_stereo_matcher.cc
 create mode 100644 src/stereo/lk_stereo_matcher.h
 create mode 100644 src/stereo/stereo_matcher.cc
 create mode 100644 src/stereo/stereo_matcher.h

diff --git a/CMakeLists.txt b/CMakeLists.txt
index d972d03..d02ace0 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -58,6 +58,7 @@ include_directories(
 add_library(omni_slam_eval_lib
   src/module/tracking_module.cc
   src/module/matching_module.cc
+  src/module/stereo_module.cc
   src/module/reconstruction_module.cc
   src/module/odometry_module.cc
   src/data/frame.cc
@@ -70,6 +71,8 @@ add_library(omni_slam_eval_lib
   src/odometry/pnp.cc
   src/odometry/five_point.cc
   src/optimization/bundle_adjuster.cc
+  src/stereo/stereo_matcher.cc
+  src/stereo/lk_stereo_matcher.cc
   src/util/hdf_file.cc
 )
 
@@ -99,6 +102,13 @@ add_executable(omni_slam_odometry_eval_node
     src/omni_slam_odometry_eval_node.cc
 )
 
+add_executable(omni_slam_stereo_eval_node
+    src/ros/stereo_eval.cc
+    src/ros/tracking_eval.cc
+    src/ros/eval_base.cc
+    src/omni_slam_stereo_eval_node.cc
+)
+
 target_link_libraries(omni_slam_tracking_eval_node PUBLIC omni_slam_eval_lib
   ${catkin_LIBRARIES}
   ${OpenCV_LIBS}
@@ -135,11 +145,21 @@ target_link_libraries(omni_slam_odometry_eval_node PUBLIC omni_slam_eval_lib
   ${CERES_LIBRARIES}
 )
 
+target_link_libraries(omni_slam_stereo_eval_node PUBLIC omni_slam_eval_lib
+  ${catkin_LIBRARIES}
+  ${OpenCV_LIBS}
+  ${HDF5_CXX_LIBRARIES}
+  ${OpenMP_CXX_FLAGS}
+  ${PCL_LIBRARIES}
+  ${CERES_LIBRARIES}
+)
+
 target_compile_options(omni_slam_tracking_eval_node PUBLIC ${OpenMP_CXX_FLAGS})
 target_compile_options(omni_slam_matching_eval_node PUBLIC ${OpenMP_CXX_FLAGS})
 target_compile_options(omni_slam_reconstruction_eval_node PUBLIC ${OpenMP_CXX_FLAGS})
 target_compile_options(omni_slam_odometry_eval_node PUBLIC ${OpenMP_CXX_FLAGS})
-set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS})
+target_compile_options(omni_slam_stereo_eval_node PUBLIC ${OpenMP_CXX_FLAGS})
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS} -ggdb")
 
 install(TARGETS omni_slam_tracking_eval_node
   ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
@@ -164,3 +184,9 @@ install(TARGETS omni_slam_odometry_eval_node
   LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
   RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
 )
+
+install(TARGETS omni_slam_stereo_eval_node
+  ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+  LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+  RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+)
diff --git a/launch/stereo_eval.launch b/launch/stereo_eval.launch
new file mode 100644
index 0000000..cb5f968
--- /dev/null
+++ b/launch/stereo_eval.launch
@@ -0,0 +1,33 @@
+<launch>
+    <arg name="bag_file" default="" />
+    <arg name="results_file" default="$(arg bag_file).stereo.hdf5" />
+    <arg name="camera_model" default="double_sphere" />
+    <arg name="camera_params" default="{fx: 295.936, fy: 295.936, cx: 512, cy: 512, chi: 0.3, alpha: 0.6666667}" />
+    <arg name="rate" default="1" />
+    <node pkg="omni_slam_eval" type="omni_slam_stereo_eval_node" name="omni_slam_stereo_eval_node" required="true" output="screen">
+        <param name="bag_file" value="$(arg bag_file)" />
+        <param name="results_file" value="$(arg results_file)" />
+        <param name="image_topic" value="/unity_ros/Sphere/FisheyeCamera/image_raw" />
+        <param name="stereo_image_topic" value="/unity_ros/Sphere/FisheyeCamera2/image_raw" />
+        <param name="depth_image_topic" value="/unity_ros/Sphere/FisheyeDepthCamera/image_raw" />
+        <param name="pose_topic" value="/unity_ros/Sphere/TrueState/pose" />
+        <param name="stereo_matched_topic" value="/omni_slam/stereo_matched" />
+        <param name="rate" value="$(arg rate)" />
+        <rosparam subst_value="true">
+            camera_model: '$(arg camera_model)'
+            camera_parameters: $(arg camera_params)
+            detector_type: 'GFTT'
+            detector_parameters: {maxCorners: 1000, qualityLevel: 0.001, minDistance: 5, blockSize: 5}
+            stereo_matcher_window_size: 256
+            stereo_matcher_num_scales: 5
+            stereo_matcher_error_threshold: 20
+            stereo_matcher_epipolar_threshold: 0.008
+            stereo_tf_t: [2.0, 0.0, 0.0]
+            stereo_tf_r: [0.0, 0.0, 0.0, 1.0]
+        </rosparam>
+    </node>
+</launch>
+
+
+
+
diff --git a/src/data/feature.h b/src/data/feature.h
index 2a921a4..5953d74 100644
--- a/src/data/feature.h
+++ b/src/data/feature.h
@@ -28,14 +28,15 @@ class Feature
     bool HasWorldPoint() const;
     bool HasEstimatedWorldPoint() const;
 
-    bool worldPointCached_{false};
-    bool worldPointEstimateCached_{false};
 private:
     Frame &frame_;
     cv::KeyPoint kpt_;
     cv::Mat descriptor_;
     Vector3d worldPoint_;
     Vector3d worldPointEstimate_;
+
+    bool worldPointCached_{false};
+    bool worldPointEstimateCached_{false};
 };
 
 }
diff --git a/src/data/frame.cc b/src/data/frame.cc
index 663ef4b..fa4eb6f 100644
--- a/src/data/frame.cc
+++ b/src/data/frame.cc
@@ -8,17 +8,33 @@ namespace data
 
 int Frame::lastFrameId_ = 0;
 
-Frame::Frame(cv::Mat &image, cv::Mat &depth_image, Matrix<double, 3, 4>  &pose, double time, camera::CameraModel<> &camera_model)
+Frame::Frame(cv::Mat &image, cv::Mat &stereo_image, cv::Mat &depth_image, Matrix<double, 3, 4>  &pose, Matrix<double, 3, 4> &stereo_pose, double time, camera::CameraModel<> &camera_model)
     : id_(lastFrameId_++),
     image_(image.clone()),
+    stereoImage_(stereo_image.clone()),
     depthImage_(depth_image.clone()),
     pose_(pose),
     invPose_(util::TFUtil::InversePoseMatrix(pose)),
+    stereoPose_(stereo_pose),
     timeSec_(time),
     cameraModel_(camera_model)
 {
     hasPose_ = true;
     hasDepth_ = true;
+    hasStereo_ = true;
+}
+
+Frame::Frame(cv::Mat &image, cv::Mat &stereo_image, Matrix<double, 3, 4> &stereo_pose, double time, camera::CameraModel<> &camera_model)
+    : id_(lastFrameId_++),
+    image_(image.clone()),
+    stereoImage_(stereo_image.clone()),
+    stereoPose_(stereo_pose),
+    timeSec_(time),
+    cameraModel_(camera_model)
+{
+    hasPose_ = false;
+    hasDepth_ = false;
+    hasStereo_ = true;
 }
 
 Frame::Frame(cv::Mat &image, cv::Mat &depth_image, double time, camera::CameraModel<> &camera_model)
@@ -30,6 +46,21 @@ Frame::Frame(cv::Mat &image, cv::Mat &depth_image, double time, camera::CameraMo
 {
     hasPose_ = false;
     hasDepth_ = true;
+    hasStereo_ = false;
+}
+
+Frame::Frame(cv::Mat &image, cv::Mat &stereo_image, cv::Mat &depth_image, Matrix<double, 3, 4> &stereo_pose, double time, camera::CameraModel<> &camera_model)
+    : id_(lastFrameId_++),
+    image_(image.clone()),
+    stereoImage_(stereo_image.clone()),
+    depthImage_(depth_image.clone()),
+    stereoPose_(stereo_pose),
+    timeSec_(time),
+    cameraModel_(camera_model)
+{
+    hasPose_ = false;
+    hasDepth_ = true;
+    hasStereo_ = true;
 }
 
 Frame::Frame(cv::Mat &image, Matrix<double, 3, 4>  &pose, double time, camera::CameraModel<> &camera_model)
@@ -42,6 +73,36 @@ Frame::Frame(cv::Mat &image, Matrix<double, 3, 4>  &pose, double time, camera::C
 {
     hasPose_ = true;
     hasDepth_ = false;
+    hasStereo_ = false;
+}
+
+Frame::Frame(cv::Mat &image, cv::Mat &stereo_image, Matrix<double, 3, 4>  &pose, Matrix<double, 3, 4> &stereo_pose, double time, camera::CameraModel<> &camera_model)
+    : id_(lastFrameId_++),
+    image_(image.clone()),
+    stereoImage_(stereo_image.clone()),
+    pose_(pose),
+    invPose_(util::TFUtil::InversePoseMatrix(pose)),
+    stereoPose_(stereo_pose),
+    timeSec_(time),
+    cameraModel_(camera_model)
+{
+    hasPose_ = true;
+    hasDepth_ = false;
+    hasStereo_ = true;
+}
+
+Frame::Frame(cv::Mat &image, Matrix<double, 3, 4>  &pose, cv::Mat &depth_image, double time, camera::CameraModel<> &camera_model)
+    : id_(lastFrameId_++),
+    image_(image.clone()),
+    depthImage_(depth_image.clone()),
+    pose_(pose),
+    invPose_(util::TFUtil::InversePoseMatrix(pose)),
+    timeSec_(time),
+    cameraModel_(camera_model)
+{
+    hasPose_ = true;
+    hasDepth_ = true;
+    hasStereo_ = false;
 }
 
 Frame::Frame(cv::Mat &image, double time, camera::CameraModel<> &camera_model)
@@ -51,13 +112,16 @@ Frame::Frame(cv::Mat &image, double time, camera::CameraModel<> &camera_model)
 {
     hasPose_ = false;
     hasDepth_ = false;
+    hasStereo_ = false;
 }
 
 Frame::Frame(const Frame &other)
     : image_(other.image_.clone()),
     depthImage_(other.depthImage_.clone()),
+    stereoImage_(other.stereoImage_.clone()),
     imageComp_(other.imageComp_),
     depthImageComp_(other.depthImageComp_),
+    stereoImageComp_(other.stereoImageComp_),
     cameraModel_(other.cameraModel_),
     id_(other.id_),
     pose_(other.pose_),
@@ -65,6 +129,7 @@ Frame::Frame(const Frame &other)
     timeSec_(other.timeSec_),
     hasPose_(other.hasPose_),
     hasDepth_(other.hasDepth_),
+    hasStereo_(other.hasStereo_),
     isCompressed_(other.isCompressed_)
 {
 }
@@ -97,11 +162,25 @@ const cv::Mat& Frame::GetDepthImage()
     return depthImage_;
 }
 
+const cv::Mat& Frame::GetStereoImage()
+{
+    if (isCompressed_)
+    {
+        DecompressImages();
+    }
+    return stereoImage_;
+}
+
 const int Frame::GetID() const
 {
     return id_;
 }
 
+const Matrix<double, 3, 4>& Frame::GetStereoPose() const
+{
+    return stereoPose_;
+}
+
 const camera::CameraModel<>& Frame::GetCameraModel() const
 {
     return cameraModel_;
@@ -138,6 +217,25 @@ void Frame::SetDepthImage(cv::Mat &depth_image)
     hasDepth_ = true;
 }
 
+void Frame::SetStereoImage(cv::Mat &stereo_image)
+{
+    if (isCompressed_)
+    {
+        std::vector<int> param = {cv::IMWRITE_PNG_COMPRESSION, 5};
+        cv::imencode(".png", stereo_image, stereoImageComp_, param);
+    }
+    else
+    {
+        stereoImageComp_ = stereo_image.clone();
+    }
+    hasStereo_ = true;
+}
+
+void Frame::SetStereoPose(Matrix<double, 3, 4> &pose)
+{
+    stereoPose_ = pose;
+}
+
 void Frame::SetEstimatedPose(const Matrix<double, 3, 4> &pose, const std::vector<int> &landmark_ids)
 {
     poseEstimate_ = pose;
@@ -178,6 +276,11 @@ bool Frame::HasDepthImage() const
     return hasDepth_;
 }
 
+bool Frame::HasStereoImage() const
+{
+    return hasStereo_;
+}
+
 bool Frame::HasEstimatedPose() const
 {
     return hasPoseEstimate_;
@@ -200,8 +303,13 @@ void Frame::CompressImages()
     {
         cv::imencode(".png", depthImage_, depthImageComp_, param);
     }
+    if (hasStereo_)
+    {
+        cv::imencode(".png", stereoImage_, stereoImageComp_, param);
+    }
     image_.release();
     depthImage_.release();
+    stereoImage_.release();
     isCompressed_ = true;
 }
 
@@ -216,8 +324,13 @@ void Frame::DecompressImages()
     {
         depthImage_ = cv::imdecode(cv::Mat(1, depthImageComp_.size(), CV_8UC1, depthImageComp_.data()), CV_LOAD_IMAGE_UNCHANGED);
     }
+    if (hasStereo_)
+    {
+        stereoImage_ = cv::imdecode(cv::Mat(1, stereoImageComp_.size(), CV_8UC1, stereoImageComp_.data()), CV_LOAD_IMAGE_UNCHANGED);
+    }
     imageComp_.clear();
     depthImageComp_.clear();
+    stereoImageComp_.clear();
     isCompressed_ = false;
 }
 
diff --git a/src/data/frame.h b/src/data/frame.h
index 7ec88eb..cc83be8 100644
--- a/src/data/frame.h
+++ b/src/data/frame.h
@@ -16,9 +16,13 @@ namespace data
 class Frame
 {
 public:
-    Frame(cv::Mat &image, cv::Mat &depth_image, Matrix<double, 3, 4>  &pose, double time, camera::CameraModel<> &camera_model);
+    Frame(cv::Mat &image, cv::Mat &stereo_image, cv::Mat &depth_image, Matrix<double, 3, 4>  &pose, Matrix<double, 3, 4> &stereo_pose, double time, camera::CameraModel<> &camera_model);
+    Frame(cv::Mat &image, cv::Mat &stereo_image, Matrix<double, 3, 4> &stereo_pose, double time, camera::CameraModel<> &camera_model);
     Frame(cv::Mat &image, cv::Mat &depth_image, double time, camera::CameraModel<> &camera_model);
+    Frame(cv::Mat &image, cv::Mat &stereo_image, cv::Mat &depth_image, Matrix<double, 3, 4> &stereo_pose, double time, camera::CameraModel<> &camera_model);
     Frame(cv::Mat &image, Matrix<double, 3, 4>  &pose, double time, camera::CameraModel<> &camera_model);
+    Frame(cv::Mat &image, cv::Mat &stereo_image, Matrix<double, 3, 4>  &pose, Matrix<double, 3, 4> &stereo_pose, double time, camera::CameraModel<> &camera_model);
+    Frame(cv::Mat &image, Matrix<double, 3, 4>  &pose, cv::Mat &depth_image, double time, camera::CameraModel<> &camera_model);
     Frame(cv::Mat &image, double time, camera::CameraModel<> &camera_model);
     Frame(const Frame &other);
 
@@ -26,6 +30,8 @@ class Frame
     const Matrix<double, 3, 4>& GetInversePose() const;
     const cv::Mat& GetImage();
     const cv::Mat& GetDepthImage();
+    const cv::Mat& GetStereoImage();
+    const Matrix<double, 3, 4>& GetStereoPose() const;
     const camera::CameraModel<>& GetCameraModel() const;
     const Matrix<double, 3, 4>& GetEstimatedPose() const;
     const Matrix<double, 3, 4>& GetEstimatedInversePose() const;
@@ -34,11 +40,14 @@ class Frame
 
     bool HasPose() const;
     bool HasDepthImage() const;
+    bool HasStereoImage() const;
     bool HasEstimatedPose() const;
     bool IsEstimatedByLandmark(const int landmark_id) const;
 
     void SetPose(Matrix<double, 3, 4> &pose);
     void SetDepthImage(cv::Mat &depth_image);
+    void SetStereoImage(cv::Mat &stereo_image);
+    void SetStereoPose(Matrix<double, 3, 4> &pose);
     void SetEstimatedPose(const Matrix<double, 3, 4> &pose, const std::vector<int> &landmark_ids);
     void SetEstimatedPose(const Matrix<double, 3, 4> &pose);
     void SetEstimatedInversePose(const Matrix<double, 3, 4> &pose, const std::vector<int> &landmark_ids);
@@ -52,10 +61,13 @@ class Frame
     const int id_;
     std::vector<unsigned char> imageComp_;
     std::vector<unsigned char> depthImageComp_;
+    std::vector<unsigned char> stereoImageComp_;
     cv::Mat image_;
     cv::Mat depthImage_;
+    cv::Mat stereoImage_;
     Matrix<double, 3, 4> pose_;
     Matrix<double, 3, 4> invPose_;
+    Matrix<double, 3, 4> stereoPose_;
     Matrix<double, 3, 4> poseEstimate_;
     Matrix<double, 3, 4> invPoseEstimate_;
     double timeSec_;
@@ -65,6 +77,7 @@ class Frame
 
     bool hasPose_;
     bool hasDepth_;
+    bool hasStereo_;
     bool hasPoseEstimate_{false};
 
     bool isCompressed_{false};
diff --git a/src/data/landmark.cc b/src/data/landmark.cc
index a2dbff2..f88b409 100644
--- a/src/data/landmark.cc
+++ b/src/data/landmark.cc
@@ -21,7 +21,7 @@ void Landmark::AddObservation(Feature obs, bool compute_gnd)
             groundTruth_ = obs.GetWorldPoint();
             hasGroundTruth_ = true;
         }
-        if (obs.HasEstimatedWorldPoint())
+        if (!obs.GetFrame().HasStereoImage() && obs.HasEstimatedWorldPoint())
         {
             posEstimate_ = obs.GetEstimatedWorldPoint();
             estFrameIds_.insert(obs.GetFrame().GetID());
@@ -36,6 +36,16 @@ void Landmark::AddObservation(Feature obs, bool compute_gnd)
     obs_.push_back(obs);
 }
 
+void Landmark::AddStereoObservation(Feature obs)
+{
+    if (idToStereoIndex_.find(obs.GetFrame().GetID()) != idToStereoIndex_.end())
+    {
+        return;
+    }
+    idToStereoIndex_[obs.GetFrame().GetID()] = stereoObs_.size();
+    stereoObs_.push_back(obs);
+}
+
 const std::vector<Feature>& Landmark::GetObservations() const
 {
     return obs_;
@@ -46,6 +56,11 @@ std::vector<Feature>& Landmark::GetObservations()
     return obs_;
 }
 
+const std::vector<Feature>& Landmark::GetStereoObservations() const
+{
+    return stereoObs_;
+}
+
 bool Landmark::IsObservedInFrame(const int frame_id) const
 {
     for (Feature f : obs_)
@@ -81,6 +96,15 @@ const Feature* Landmark::GetObservationByFrameID(const int frame_id) const
     return &obs_[idToIndex_.at(frame_id)];
 }
 
+const Feature* Landmark::GetStereoObservationByFrameID(const int frame_id) const
+{
+    if (idToStereoIndex_.find(frame_id) == idToStereoIndex_.end())
+    {
+        return nullptr;
+    }
+    return &stereoObs_[idToStereoIndex_.at(frame_id)];
+}
+
 void Landmark::SetEstimatedPosition(const Vector3d &pos, const std::vector<int> &frame_ids)
 {
     posEstimate_ = pos;
diff --git a/src/data/landmark.h b/src/data/landmark.h
index db5b9e4..c8b9f6d 100644
--- a/src/data/landmark.h
+++ b/src/data/landmark.h
@@ -18,12 +18,15 @@ class Landmark
 public:
     Landmark();
     void AddObservation(Feature obs, bool compute_gnd = true);
+    void AddStereoObservation(Feature obs);
     const std::vector<Feature>& GetObservations() const;
     std::vector<Feature>& GetObservations();
+    const std::vector<Feature>& GetStereoObservations() const;
     bool IsObservedInFrame(const int frame_id) const;
     const int GetFirstFrameID() const;
     const int GetNumObservations() const;
     const Feature* GetObservationByFrameID(const int frame_id) const;
+    const Feature* GetStereoObservationByFrameID(const int frame_id) const;
     void SetEstimatedPosition(const Vector3d &pos, const std::vector<int> &frame_ids);
     void SetEstimatedPosition(const Vector3d &pos);
 
@@ -37,8 +40,10 @@ class Landmark
 private:
     const int id_;
     std::vector<Feature> obs_;
+    std::vector<Feature> stereoObs_;
     std::unordered_set<int> estFrameIds_;
     std::map<int, int> idToIndex_;
+    std::map<int, int> idToStereoIndex_;
     Vector3d groundTruth_;
     Vector3d posEstimate_;
     bool hasGroundTruth_{false};
diff --git a/src/feature/tracker.cc b/src/feature/tracker.cc
index 454712d..421bdb7 100644
--- a/src/feature/tracker.cc
+++ b/src/feature/tracker.cc
@@ -30,22 +30,22 @@ int Tracker::Track(std::vector<data::Landmark> &landmarks, data::Frame &cur_fram
     }
     bool prevCompressed = prevFrame_->IsCompressed();
     bool curCompressed = cur_frame.IsCompressed();
-    int prev_id = prevFrame_->GetID();
-    std::vector<cv::Point2f> points_to_track;
-    std::vector<cv::KeyPoint> orig_kpt;
-    std::vector<int> orig_inx;
+    int prevId = prevFrame_->GetID();
+    std::vector<cv::Point2f> pointsToTrack;
+    std::vector<cv::KeyPoint> origKpt;
+    std::vector<int> origInx;
     for (int i = 0; i < landmarks.size(); i++)
     {
         data::Landmark &landmark = landmarks[i];
-        const data::Feature *feat = landmark.GetObservationByFrameID(prev_id);
+        const data::Feature *feat = landmark.GetObservationByFrameID(prevId);
         if (feat != nullptr)
         {
-            points_to_track.push_back(feat->GetKeypoint().pt);
-            orig_kpt.push_back(feat->GetKeypoint());
-            orig_inx.push_back(i);
+            pointsToTrack.push_back(feat->GetKeypoint().pt);
+            origKpt.push_back(feat->GetKeypoint());
+            origInx.push_back(i);
         }
     }
-    if (points_to_track.size() == 0)
+    if (pointsToTrack.size() == 0)
     {
         prevFrame_ = &cur_frame;
         return 0;
@@ -53,30 +53,30 @@ int Tracker::Track(std::vector<data::Landmark> &landmarks, data::Frame &cur_fram
     std::vector<cv::Point2f> results;
     std::vector<unsigned char> status;
     std::vector<float> err;
-    cv::calcOpticalFlowPyrLK(prevFrame_->GetImage(), cur_frame.GetImage(), points_to_track, results, status, err, windowSize_, numScales_, termCrit_, 0);
+    cv::calcOpticalFlowPyrLK(prevFrame_->GetImage(), cur_frame.GetImage(), pointsToTrack, results, status, err, windowSize_, numScales_, termCrit_, 0);
     errors.clear();
     int numGood = 0;
     for (int i = 0; i < results.size(); i++)
     {
-        data::Landmark &landmark = landmarks[orig_inx[i]];
+        data::Landmark &landmark = landmarks[origInx[i]];
         if (deltaPixErrThresh_ > 0)
         {
             if (landmark.HasGroundTruth() && cur_frame.HasPose())
             {
-                Vector2d pixel_gnd;
-                Vector2d pixel_gnd_prev;
-                if (!cur_frame.GetCameraModel().ProjectToImage(util::TFUtil::WorldFrameToCameraFrame(util::TFUtil::TransformPoint(cur_frame.GetInversePose(), landmark.GetGroundTruth())), pixel_gnd) || !prevFrame_->GetCameraModel().ProjectToImage(util::TFUtil::WorldFrameToCameraFrame(util::TFUtil::TransformPoint(prevFrame_->GetInversePose(), landmark.GetGroundTruth())), pixel_gnd_prev))
+                Vector2d pixelGnd;
+                Vector2d pixelGndPrev;
+                if (!cur_frame.GetCameraModel().ProjectToImage(util::TFUtil::WorldFrameToCameraFrame(util::TFUtil::TransformPoint(cur_frame.GetInversePose(), landmark.GetGroundTruth())), pixelGnd) || !prevFrame_->GetCameraModel().ProjectToImage(util::TFUtil::WorldFrameToCameraFrame(util::TFUtil::TransformPoint(prevFrame_->GetInversePose(), landmark.GetGroundTruth())), pixelGndPrev))
                 {
                     continue;
                 }
                 else
                 {
-                    Vector2d pixel_cur;
-                    pixel_cur << results[i].x, results[i].y;
-                    Vector2d pixel_prev;
-                    pixel_prev << points_to_track[i].x, points_to_track[i].y;
-                    double cur_error = (pixel_cur - pixel_gnd).norm();
-                    double prev_error = (pixel_prev - pixel_gnd_prev).norm();
+                    Vector2d pixelCur;
+                    pixelCur << results[i].x, results[i].y;
+                    Vector2d pixelPrev;
+                    pixelPrev << pointsToTrack[i].x, pointsToTrack[i].y;
+                    double cur_error = (pixelCur - pixelGnd).norm();
+                    double prev_error = (pixelPrev - pixelGndPrev).norm();
                     if (cur_error - prev_error > deltaPixErrThresh_)
                     {
                         continue;
@@ -86,7 +86,7 @@ int Tracker::Track(std::vector<data::Landmark> &landmarks, data::Frame &cur_fram
         }
         if (status[i] == 1 && err[i] <= errThresh_)
         {
-            cv::KeyPoint kpt(results[i], orig_kpt[i].size);
+            cv::KeyPoint kpt(results[i], origKpt[i].size);
             data::Feature feat(cur_frame, kpt);
             landmark.AddObservation(feat);
             errors.push_back(err[i]);
diff --git a/src/module/odometry_module.cc b/src/module/odometry_module.cc
index 2176feb..94a1520 100644
--- a/src/module/odometry_module.cc
+++ b/src/module/odometry_module.cc
@@ -20,6 +20,14 @@ OdometryModule::OdometryModule(std::unique_ptr<odometry::PNP> &&pnp, std::unique
 
 void OdometryModule::Update(std::vector<data::Landmark> &landmarks, data::Frame &frame)
 {
+    if (landmarks.size() == 0)
+    {
+        if (frame.HasPose())
+        {
+            frame.SetEstimatedPose(frame.GetPose());
+        }
+        return;
+    }
     pnp_->Compute(landmarks, frame);
 }
 
diff --git a/src/module/reconstruction_module.cc b/src/module/reconstruction_module.cc
index dd09bc6..79a4794 100644
--- a/src/module/reconstruction_module.cc
+++ b/src/module/reconstruction_module.cc
@@ -20,6 +20,11 @@ ReconstructionModule::ReconstructionModule(std::unique_ptr<reconstruction::Trian
 
 void ReconstructionModule::Update(std::vector<data::Landmark> &landmarks)
 {
+    if (landmarks.size() == 0)
+    {
+        return;
+    }
+
     triangulator_->Triangulate(landmarks);
 
     visualization_.Reserve(landmarks.size());
diff --git a/src/module/stereo_module.cc b/src/module/stereo_module.cc
new file mode 100644
index 0000000..aa92baa
--- /dev/null
+++ b/src/module/stereo_module.cc
@@ -0,0 +1,80 @@
+#include "stereo_module.h"
+
+using namespace std;
+
+namespace omni_slam
+{
+namespace module
+{
+
+StereoModule::StereoModule(std::unique_ptr<stereo::StereoMatcher> &stereo)
+    : stereo_(std::move(stereo))
+{
+}
+
+StereoModule::StereoModule(std::unique_ptr<stereo::StereoMatcher> &&stereo)
+    : StereoModule(stereo)
+{
+}
+
+void StereoModule::Update(data::Frame &frame, std::vector<data::Landmark> &landmarks)
+{
+    stereo_->Match(frame, landmarks);
+
+    if (frameNum_ == 0)
+    {
+        visualization_.Init(frame.GetImage().size());
+    }
+    for (data::Landmark &landmark : landmarks)
+    {
+        const data::Feature *feat1 = landmark.GetObservationByFrameID(frame.GetID());
+        const data::Feature *feat2 = landmark.GetStereoObservationByFrameID(frame.GetID());
+        if (feat1 != nullptr && feat2 != nullptr)
+        {
+            Matrix<double, 3, 4> framePose = frame.HasEstimatedPose() ? frame.GetEstimatedPose() : frame.GetPose();
+            double depth = (landmark.GetEstimatedPosition() - framePose.block<3, 1>(0, 3)).norm();
+            visualization_.AddMatch(feat1->GetKeypoint().pt, feat2->GetKeypoint().pt, depth);
+        }
+    }
+    frameNum_++;
+}
+
+StereoModule::Stats& StereoModule::GetStats()
+{
+    return stats_;
+}
+
+void StereoModule::Visualize(cv::Mat &base_img, const cv::Mat &base_stereo_img)
+{
+    visualization_.Draw(base_img, base_stereo_img);
+}
+
+void StereoModule::Visualization::Init(cv::Size img_size)
+{
+    curMask_ = cv::Mat::zeros(cv::Size(img_size.width * 2, img_size.height), CV_8UC3);
+    curDepth_ = cv::Mat::zeros(img_size, CV_8UC3);
+}
+
+void StereoModule::Visualization::AddMatch(cv::Point2f pt1, cv::Point2f pt2, double depth)
+{
+    cv::circle(curMask_, pt1, 3, cv::Scalar(255, 0, 0), -1);
+    cv::circle(curMask_, pt2 + cv::Point2f(curMask_.cols / 2., 0), 3, cv::Scalar(255, 0, 0), -1);
+    cv::line(curMask_, pt1, pt2 + cv::Point2f(curMask_.cols / 2., 0), cv::Scalar(255, 0, 0), 1);
+    cv::circle(curDepth_, pt1, 2, cv::Scalar(depth / maxDepth_ * 255, depth / maxDepth_ * 255, depth / maxDepth_ * 255));
+}
+
+void StereoModule::Visualization::Draw(cv::Mat &img, const cv::Mat &stereo_img)
+{
+    cv::hconcat(img, stereo_img, img);
+    if (img.channels() == 1)
+    {
+        cv::cvtColor(img, img, CV_GRAY2BGR);
+    }
+    curMask_.copyTo(img, curMask_);
+    curMask_ = cv::Mat::zeros(img.size(), CV_8UC3);
+    cv::hconcat(img, curDepth_, img);
+    curDepth_ = cv::Mat::zeros(stereo_img.size(), CV_8UC3);
+}
+
+}
+}
diff --git a/src/module/stereo_module.h b/src/module/stereo_module.h
new file mode 100644
index 0000000..f3c6f19
--- /dev/null
+++ b/src/module/stereo_module.h
@@ -0,0 +1,61 @@
+#ifndef _STEREO_MODULE_H_
+#define _STEREO_MODULE_H_
+
+#include <vector>
+#include <set>
+#include <memory>
+
+#include "data/landmark.h"
+#include "stereo/stereo_matcher.h"
+
+#include <pcl/common/projection_matrix.h>
+#include <pcl/point_types.h>
+
+using namespace Eigen;
+
+namespace omni_slam
+{
+namespace module
+{
+
+class StereoModule
+{
+public:
+    struct Stats
+    {
+    };
+
+    StereoModule(std::unique_ptr<stereo::StereoMatcher> &stereo);
+    StereoModule(std::unique_ptr<stereo::StereoMatcher> &&stereo);
+
+    void Update(data::Frame &frame, std::vector<data::Landmark> &landmarks);
+
+    Stats& GetStats();
+    void Visualize(cv::Mat &base_img, const cv::Mat &base_stereo_img);
+
+private:
+    class Visualization
+    {
+    public:
+        void Init(cv::Size img_size);
+        void AddMatch(cv::Point2f pt1, cv::Point2f pt2, double depth);
+        void Draw(cv::Mat &img, const cv::Mat &stereo_img);
+
+    private:
+        cv::Mat curMask_;
+        cv::Mat curDepth_;
+        const double maxDepth_{50.};
+    };
+
+    std::shared_ptr<stereo::StereoMatcher> stereo_;
+
+    int frameNum_{0};
+
+    Stats stats_;
+    Visualization visualization_;
+};
+
+}
+}
+
+#endif /* _STEREO_MODULE_H_ */
diff --git a/src/module/tracking_module.cc b/src/module/tracking_module.cc
index d7ae4cc..01e1fc3 100644
--- a/src/module/tracking_module.cc
+++ b/src/module/tracking_module.cc
@@ -31,14 +31,6 @@ void TrackingModule::Update(std::unique_ptr<data::Frame> &frame)
     int imsize = max(frames_.back()->GetImage().rows, frames_.back()->GetImage().cols);
     if (frameNum_ == 0)
     {
-        #pragma omp parallel for collapse(2)
-        for (int i = 0; i < rs_.size() - 1; i++)
-        {
-            for (int j = 0; j < ts_.size() - 1; j++)
-            {
-                detector_->DetectInRadialRegion(*frames_.back(), landmarks_, rs_[i] * imsize, rs_[i+1] * imsize, ts_[j], ts_[j+1]);
-            }
-        }
         tracker_->Init(*frames_.back());
 
         visualization_.Init(frames_.back()->GetImage().size(), landmarks_.size());
@@ -48,22 +40,24 @@ void TrackingModule::Update(std::unique_ptr<data::Frame> &frame)
     }
 
     vector<double> trackErrors;
-    std::vector<data::Landmark> landmarksTemp(landmarks_);
-    tracker_->Track(landmarksTemp, *frames_.back(), trackErrors);
-
     if (fivePointChecker_)
     {
-        Matrix3d E;
-        std::vector<int> inlierIndices;
-        fivePointChecker_->Compute(landmarksTemp, **next(frames_.rbegin()), *frames_.back(), E, inlierIndices);
-        for (int inx : inlierIndices)
+        std::vector<data::Landmark> landmarksTemp(landmarks_);
+        int tracks = tracker_->Track(landmarksTemp, *frames_.back(), trackErrors);
+        if (tracks > 0)
         {
-            landmarks_[inx].AddObservation(*landmarksTemp[inx].GetObservationByFrameID(frames_.back()->GetID()));
+            Matrix3d E;
+            std::vector<int> inlierIndices;
+            fivePointChecker_->Compute(landmarksTemp, **next(frames_.rbegin()), *frames_.back(), E, inlierIndices);
+            for (int inx : inlierIndices)
+            {
+                landmarks_[inx].AddObservation(*landmarksTemp[inx].GetObservationByFrameID(frames_.back()->GetID()));
+            }
         }
     }
     else
     {
-        landmarks_ = std::move(landmarksTemp);
+        tracker_->Track(landmarks_, *frames_.back(), trackErrors);
     }
 
     int i = 0;
diff --git a/src/odometry/five_point.cc b/src/odometry/five_point.cc
index 2a2f21a..e682939 100644
--- a/src/odometry/five_point.cc
+++ b/src/odometry/five_point.cc
@@ -33,6 +33,10 @@ int FivePoint::Compute(const std::vector<data::Landmark> &landmarks, const data:
         }
         i++;
     }
+    if (x1.size() < 5)
+    {
+        return 0;
+    }
     int inliers = RANSAC(x1, x2, E);
     std::vector<int> indices = GetInlierIndices(x1, x2, E);
     inlier_indices.clear();
diff --git a/src/odometry/pnp.cc b/src/odometry/pnp.cc
index 9c93aa3..2fa2eab 100644
--- a/src/odometry/pnp.cc
+++ b/src/odometry/pnp.cc
@@ -59,6 +59,10 @@ int PNP::Compute(const std::vector<data::Landmark> &landmarks, data::Frame &fram
         }
         i++;
     }
+    if (xs.size() < 4)
+    {
+        return 0;
+    }
     Matrix<double, 3, 4> pose;
     int inliers = RANSAC(xs, ys, yns, frame.GetCameraModel(), pose);
     std::vector<int> indices = GetInlierIndices(xs, yns, pose, frame.GetCameraModel());
diff --git a/src/omni_slam_odometry_eval_node.cc b/src/omni_slam_odometry_eval_node.cc
index 5f3c95f..2e3042f 100644
--- a/src/omni_slam_odometry_eval_node.cc
+++ b/src/omni_slam_odometry_eval_node.cc
@@ -4,7 +4,7 @@ int main(int argc, char *argv[])
 {
     ros::init(argc, argv, "omni_slam_odometry_eval_node");
 
-    omni_slam::ros::OdometryEval odometry_eval;
+    omni_slam::ros::OdometryEval<> odometry_eval;
     odometry_eval.Run();
 
     return 0;
diff --git a/src/omni_slam_reconstruction_eval_node.cc b/src/omni_slam_reconstruction_eval_node.cc
index 9b24837..c16076a 100644
--- a/src/omni_slam_reconstruction_eval_node.cc
+++ b/src/omni_slam_reconstruction_eval_node.cc
@@ -4,7 +4,7 @@ int main(int argc, char *argv[])
 {
     ros::init(argc, argv, "omni_slam_reconstruction_eval_node");
 
-    omni_slam::ros::ReconstructionEval reconstruction_eval;
+    omni_slam::ros::ReconstructionEval<> reconstruction_eval;
     reconstruction_eval.Run();
 
     return 0;
diff --git a/src/omni_slam_stereo_eval_node.cc b/src/omni_slam_stereo_eval_node.cc
new file mode 100644
index 0000000..38aa4df
--- /dev/null
+++ b/src/omni_slam_stereo_eval_node.cc
@@ -0,0 +1,12 @@
+#include "ros/stereo_eval.h"
+
+int main(int argc, char *argv[])
+{
+    ros::init(argc, argv, "omni_slam_stereo_eval_node");
+
+    omni_slam::ros::StereoEval stereo_eval;
+    stereo_eval.Run();
+
+    return 0;
+}
+
diff --git a/src/omni_slam_tracking_eval_node.cc b/src/omni_slam_tracking_eval_node.cc
index 3d1b141..83b7601 100644
--- a/src/omni_slam_tracking_eval_node.cc
+++ b/src/omni_slam_tracking_eval_node.cc
@@ -4,7 +4,7 @@ int main(int argc, char *argv[])
 {
     ros::init(argc, argv, "omni_slam_tracking_eval_node");
 
-    omni_slam::ros::TrackingEval tracking_eval;
+    omni_slam::ros::TrackingEval<> tracking_eval;
     tracking_eval.Run();
 
     return 0;
diff --git a/src/ros/eval_base.cc b/src/ros/eval_base.cc
index 5b3e67c..ec8820f 100644
--- a/src/ros/eval_base.cc
+++ b/src/ros/eval_base.cc
@@ -15,7 +15,8 @@ namespace omni_slam
 namespace ros
 {
 
-EvalBase::EvalBase(const ::ros::NodeHandle &nh, const ::ros::NodeHandle &nh_private)
+template <bool Stereo>
+EvalBase<Stereo>::EvalBase(const ::ros::NodeHandle &nh, const ::ros::NodeHandle &nh_private)
     : nh_(nh), nhp_(nh_private), imageTransport_(nh)
 {
     std::string cameraModel;
@@ -24,6 +25,19 @@ EvalBase::EvalBase(const ::ros::NodeHandle &nh, const ::ros::NodeHandle &nh_priv
     nhp_.param("image_topic", imageTopic_, std::string("/camera/image_raw"));
     nhp_.param("depth_image_topic", depthImageTopic_, std::string("/depth_camera/image_raw"));
     nhp_.param("pose_topic", poseTopic_, std::string("/pose"));
+    if (Stereo)
+    {
+        nhp_.param("stereo_image_topic", stereoImageTopic_, std::string("/camera2/image_raw"));
+        std::vector<double> stereoT;
+        stereoT.reserve(3);
+        std::vector<double> stereoR;
+        stereoR.reserve(4);
+        nhp_.getParam("stereo_tf_t", stereoT);
+        nhp_.getParam("stereo_tf_r", stereoR);
+        Quaterniond q(stereoR[3], stereoR[0], stereoR[1], stereoR[2]);
+        Vector3d t(stereoT[0], stereoT[1], stereoT[2]);
+        stereoPose_ = util::TFUtil::QuaternionTranslationToPoseMatrix(q, t);
+    }
 
     if (cameraModel == "double_sphere")
     {
@@ -39,20 +53,34 @@ EvalBase::EvalBase(const ::ros::NodeHandle &nh, const ::ros::NodeHandle &nh_priv
     }
 }
 
-void EvalBase::InitSubscribers()
+template <>
+void EvalBase<false>::InitSubscribers()
+{
+    imageSubscriber_.subscribe(imageTransport_, imageTopic_, 3);
+    depthImageSubscriber_.subscribe(imageTransport_, depthImageTopic_, 3);
+    poseSubscriber_.subscribe(nh_, poseTopic_, 10);
+    sync_.reset(new message_filters::Synchronizer<MessageFilter>(MessageFilter(5), imageSubscriber_, depthImageSubscriber_, poseSubscriber_));
+    sync_->registerCallback(boost::bind(&EvalBase<false>::FrameCallback, this, _1, _2, _3));
+}
+
+template <>
+void EvalBase<true>::InitSubscribers()
 {
     imageSubscriber_.subscribe(imageTransport_, imageTopic_, 3);
+    stereoImageSubscriber_.subscribe(imageTransport_, stereoImageTopic_, 3);
     depthImageSubscriber_.subscribe(imageTransport_, depthImageTopic_, 3);
     poseSubscriber_.subscribe(nh_, poseTopic_, 10);
-    sync_.reset(new message_filters::Synchronizer<message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, sensor_msgs::Image, geometry_msgs::PoseStamped>>(message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, sensor_msgs::Image, geometry_msgs::PoseStamped>(5), imageSubscriber_, depthImageSubscriber_, poseSubscriber_));
-    sync_->registerCallback(boost::bind(&EvalBase::FrameCallback, this, _1, _2, _3));
+    sync_.reset(new message_filters::Synchronizer<MessageFilter>(MessageFilter(5), imageSubscriber_, stereoImageSubscriber_, depthImageSubscriber_, poseSubscriber_));
+    sync_->registerCallback(boost::bind(&EvalBase<true>::FrameCallback, this, _1, _2, _3, _4));
 }
 
-void EvalBase::InitPublishers()
+template <bool Stereo>
+void EvalBase<Stereo>::InitPublishers()
 {
 }
 
-void EvalBase::FrameCallback(const sensor_msgs::ImageConstPtr &image, const sensor_msgs::ImageConstPtr &depth_image, const geometry_msgs::PoseStamped::ConstPtr &pose)
+template <bool Stereo>
+void EvalBase<Stereo>::FrameCallback(const sensor_msgs::ImageConstPtr &image, const sensor_msgs::ImageConstPtr &depth_image, const geometry_msgs::PoseStamped::ConstPtr &pose)
 {
     cv_bridge::CvImagePtr cvImage;
     cv_bridge::CvImagePtr cvDepthImage;
@@ -75,30 +103,74 @@ void EvalBase::FrameCallback(const sensor_msgs::ImageConstPtr &image, const sens
     cv::Mat depthFloatImg;
     cvDepthImage->image.convertTo(depthFloatImg, CV_64FC1, 500. / 65535);
 
-    ProcessFrame(std::unique_ptr<data::Frame>(new data::Frame(monoImg, depthFloatImg, posemat, pose->header.stamp.toSec(), *cameraModel_)));
+    ProcessFrame(std::unique_ptr<data::Frame>(new data::Frame(monoImg, posemat, depthFloatImg, pose->header.stamp.toSec(), *cameraModel_)));
 
     Visualize(cvImage);
 }
 
-void EvalBase::Finish()
+template <bool Stereo>
+void EvalBase<Stereo>::FrameCallback(const sensor_msgs::ImageConstPtr &image, const sensor_msgs::ImageConstPtr &stereo_image, const sensor_msgs::ImageConstPtr &depth_image, const geometry_msgs::PoseStamped::ConstPtr &pose)
+{
+    cv_bridge::CvImagePtr cvImage;
+    cv_bridge::CvImagePtr cvStereoImage;
+    cv_bridge::CvImagePtr cvDepthImage;
+    try
+    {
+        cvImage = cv_bridge::toCvCopy(image, sensor_msgs::image_encodings::BGR8);
+        cvStereoImage = cv_bridge::toCvCopy(stereo_image, sensor_msgs::image_encodings::BGR8);
+        cvDepthImage = cv_bridge::toCvCopy(depth_image, sensor_msgs::image_encodings::TYPE_16UC1);
+    }
+    catch (cv_bridge::Exception &e)
+    {
+        ROS_ERROR("cv_bridge exception: %s", e.what());
+        return;
+    }
+
+    Quaterniond q(pose->pose.orientation.w, pose->pose.orientation.x, pose->pose.orientation.y, pose->pose.orientation.z);
+    Vector3d t(pose->pose.position.x, pose->pose.position.y, pose->pose.position.z);
+    Matrix<double, 3, 4> posemat = util::TFUtil::QuaternionTranslationToPoseMatrix(q, t);
+    cv::Mat monoImg;
+    cv::cvtColor(cvImage->image, monoImg, CV_BGR2GRAY);
+    cv::Mat monoImg2;
+    cv::cvtColor(cvStereoImage->image, monoImg2, CV_BGR2GRAY);
+    cv::Mat depthFloatImg;
+    cvDepthImage->image.convertTo(depthFloatImg, CV_64FC1, 500. / 65535);
+
+    ProcessFrame(std::unique_ptr<data::Frame>(new data::Frame(monoImg, monoImg2, depthFloatImg, posemat, stereoPose_, pose->header.stamp.toSec(), *cameraModel_)));
+
+    Visualize(cvImage, cvStereoImage);
+}
+
+template <bool Stereo>
+void EvalBase<Stereo>::Finish()
 {
 }
 
-bool EvalBase::GetAttributes(std::map<std::string, std::string> &attributes)
+template <bool Stereo>
+bool EvalBase<Stereo>::GetAttributes(std::map<std::string, std::string> &attributes)
 {
     return false;
 }
 
-bool EvalBase::GetAttributes(std::map<std::string, double> &attributes)
+template <bool Stereo>
+bool EvalBase<Stereo>::GetAttributes(std::map<std::string, double> &attributes)
 {
     return false;
 }
 
-void EvalBase::Visualize(cv_bridge::CvImagePtr &base_img)
+template <bool Stereo>
+void EvalBase<Stereo>::Visualize(cv_bridge::CvImagePtr &base_img)
+{
+}
+
+template <bool Stereo>
+void EvalBase<Stereo>::Visualize(cv_bridge::CvImagePtr &base_img, cv_bridge::CvImagePtr &base_stereo_img)
 {
+    Visualize(base_img);
 }
 
-void EvalBase::Run()
+template <bool Stereo>
+void EvalBase<Stereo>::Run()
 {
     std::string bagFile;
     std::string resultsFile;
@@ -128,6 +200,7 @@ void EvalBase::Run()
         rosbag::Bag bag;
         bag.open(bagFile);
         sensor_msgs::ImageConstPtr imageMsg = nullptr;
+        sensor_msgs::ImageConstPtr stereoMsg = nullptr;
         sensor_msgs::ImageConstPtr depthMsg = nullptr;
         geometry_msgs::PoseStamped::ConstPtr poseMsg = nullptr;
         int runNext = 0;
@@ -145,20 +218,32 @@ void EvalBase::Run()
                 depthMsg = m.instantiate<sensor_msgs::Image>();
                 runNext++;
             }
+            else if (Stereo && m.getTopic() == stereoImageTopic_)
+            {
+                stereoMsg = m.instantiate<sensor_msgs::Image>();
+                runNext++;
+            }
             else if (m.getTopic() == imageTopic_)
             {
                 imageMsg = m.instantiate<sensor_msgs::Image>();
                 runNext++;
             }
-            else if (runNext >= 2 && m.getTopic() == poseTopic_)
+            else if (runNext >= (Stereo ? 3 : 2) && m.getTopic() == poseTopic_)
             {
                 poseMsg = m.instantiate<geometry_msgs::PoseStamped>();
                 runNext = 0;
                 if (skip == 0)
                 {
-                    if (imageMsg != nullptr && depthMsg != nullptr && poseMsg != nullptr)
+                    if (imageMsg != nullptr && depthMsg != nullptr && poseMsg != nullptr && (!Stereo || stereoMsg != nullptr))
                     {
-                        FrameCallback(imageMsg, depthMsg, poseMsg);
+                        if (Stereo)
+                        {
+                            FrameCallback(imageMsg, stereoMsg, depthMsg, poseMsg);
+                        }
+                        else
+                        {
+                            FrameCallback(imageMsg, depthMsg, poseMsg);
+                        }
                     }
                 }
                 skip++;
@@ -198,5 +283,8 @@ void EvalBase::Run()
     }
 }
 
+template class EvalBase<true>;
+template class EvalBase<false>;
+
 }
 }
diff --git a/src/ros/eval_base.h b/src/ros/eval_base.h
index 4b5c5dc..0888470 100644
--- a/src/ros/eval_base.h
+++ b/src/ros/eval_base.h
@@ -21,6 +21,7 @@ namespace omni_slam
 namespace ros
 {
 
+template <bool Stereo = false>
 class EvalBase
 {
 public:
@@ -42,6 +43,7 @@ class EvalBase
 
 private:
     void FrameCallback(const sensor_msgs::ImageConstPtr &image, const sensor_msgs::ImageConstPtr &depth_image, const geometry_msgs::PoseStamped::ConstPtr &pose);
+    void FrameCallback(const sensor_msgs::ImageConstPtr &image, const sensor_msgs::ImageConstPtr &stereo_image, const sensor_msgs::ImageConstPtr &depth_image, const geometry_msgs::PoseStamped::ConstPtr &pose);
 
     virtual void ProcessFrame(std::unique_ptr<data::Frame> &&frame) = 0;
     virtual void GetResultsData(std::map<std::string, std::vector<std::vector<double>>> &data) = 0;
@@ -49,16 +51,21 @@ class EvalBase
     virtual bool GetAttributes(std::map<std::string, std::string> &attributes);
     virtual bool GetAttributes(std::map<std::string, double> &attributes);
     virtual void Visualize(cv_bridge::CvImagePtr &base_img);
+    virtual void Visualize(cv_bridge::CvImagePtr &base_img, cv_bridge::CvImagePtr &base_stereo_img);
 
     image_transport::SubscriberFilter imageSubscriber_;
     image_transport::SubscriberFilter depthImageSubscriber_;
+    image_transport::SubscriberFilter stereoImageSubscriber_;
     message_filters::Subscriber<geometry_msgs::PoseStamped> poseSubscriber_;
-    std::unique_ptr<message_filters::Synchronizer<message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, sensor_msgs::Image, geometry_msgs::PoseStamped>>> sync_;
+    typedef typename std::conditional<Stereo, message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::Image, geometry_msgs::PoseStamped>, message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, sensor_msgs::Image, geometry_msgs::PoseStamped>>::type MessageFilter;
+    std::unique_ptr<message_filters::Synchronizer<MessageFilter>> sync_;
 
     std::string imageTopic_;
+    std::string stereoImageTopic_;
     std::string depthImageTopic_;
     std::string poseTopic_;
     std::map<std::string, double> cameraParams_;
+    Matrix<double, 3, 4> stereoPose_;
 };
 
 }
diff --git a/src/ros/matching_eval.cc b/src/ros/matching_eval.cc
index 5bb89ec..7a71bb9 100644
--- a/src/ros/matching_eval.cc
+++ b/src/ros/matching_eval.cc
@@ -11,7 +11,7 @@ namespace ros
 {
 
 MatchingEval::MatchingEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle &nh_private)
-    : EvalBase(nh, nh_private)
+    : EvalBase<>(nh, nh_private)
 {
     map<string, double> detectorParams;
     map<string, double> descriptorParams;
@@ -48,7 +48,7 @@ MatchingEval::MatchingEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle
 
 void MatchingEval::InitPublishers()
 {
-    EvalBase::InitPublishers();
+    EvalBase<>::InitPublishers();
 
     string outputTopic;
     nhp_.param("matched_image_topic", outputTopic, string("/omni_slam/matched"));
diff --git a/src/ros/matching_eval.h b/src/ros/matching_eval.h
index 73de0bf..a7e21e2 100644
--- a/src/ros/matching_eval.h
+++ b/src/ros/matching_eval.h
@@ -14,7 +14,7 @@ namespace omni_slam
 namespace ros
 {
 
-class MatchingEval : public EvalBase
+class MatchingEval : public EvalBase<>
 {
 public:
     MatchingEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle &nh_private);
diff --git a/src/ros/odometry_eval.cc b/src/ros/odometry_eval.cc
index b6f492e..da33de8 100644
--- a/src/ros/odometry_eval.cc
+++ b/src/ros/odometry_eval.cc
@@ -14,8 +14,9 @@ namespace omni_slam
 namespace ros
 {
 
-OdometryEval::OdometryEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle &nh_private)
-    : TrackingEval(nh, nh_private)
+template <bool Stereo>
+OdometryEval<Stereo>::OdometryEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle &nh_private)
+    : TrackingEval<Stereo>(nh, nh_private)
 {
     double reprojThresh;
     int iterations;
@@ -24,12 +25,12 @@ OdometryEval::OdometryEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle
     bool logCeres;
     int numCeresThreads;
 
-    nhp_.param("pnp_inlier_threshold", reprojThresh, 10.);
-    nhp_.param("pnp_iterations", iterations, 1000);
-    nhp_.param("bundle_adjustment_max_iterations", baMaxIter, 500);
-    nhp_.param("bundle_adjustment_loss_coefficient", baLossCoeff, 0.1);
-    nhp_.param("bundle_adjustment_logging", logCeres, false);
-    nhp_.param("bundle_adjustment_num_threads", numCeresThreads, 1);
+    this->nhp_.param("pnp_inlier_threshold", reprojThresh, 10.);
+    this->nhp_.param("pnp_iterations", iterations, 1000);
+    this->nhp_.param("bundle_adjustment_max_iterations", baMaxIter, 500);
+    this->nhp_.param("bundle_adjustment_loss_coefficient", baLossCoeff, 0.1);
+    this->nhp_.param("bundle_adjustment_logging", logCeres, false);
+    this->nhp_.param("bundle_adjustment_num_threads", numCeresThreads, 1);
 
     unique_ptr<odometry::PNP> pnp(new odometry::PNP(iterations, reprojThresh, numCeresThreads));
     unique_ptr<optimization::BundleAdjuster> bundleAdjuster(new optimization::BundleAdjuster(baMaxIter, baLossCoeff, numCeresThreads, logCeres));
@@ -37,55 +38,61 @@ OdometryEval::OdometryEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle
     odometryModule_.reset(new module::OdometryModule(pnp, bundleAdjuster));
 }
 
-void OdometryEval::InitPublishers()
+template <bool Stereo>
+void OdometryEval<Stereo>::InitPublishers()
 {
-    TrackingEval::InitPublishers();
+    TrackingEval<Stereo>::InitPublishers();
     string outputTopic;
     string outputGndTopic;
     string outputPathTopic;
     string outputPathGndTopic;
-    nhp_.param("odometry_estimate_topic", outputTopic, string("/omni_slam/odometry"));
-    nhp_.param("odometry_ground_truth_topic", outputGndTopic, string("/omni_slam/odometry_truth"));
-    nhp_.param("path_estimate_topic", outputPathTopic, string("/omni_slam/odometry_path"));
-    nhp_.param("path_ground_truth_topic", outputPathGndTopic, string("/omni_slam/odometry_path_truth"));
-    odometryPublisher_ = nh_.advertise<geometry_msgs::PoseStamped>(outputTopic, 2);
-    odometryGndPublisher_ = nh_.advertise<geometry_msgs::PoseStamped>(outputGndTopic, 2);
-    pathPublisher_ = nh_.advertise<nav_msgs::Path>(outputPathTopic, 2);
-    pathGndPublisher_ = nh_.advertise<nav_msgs::Path>(outputPathGndTopic, 2);
+    this->nhp_.param("odometry_estimate_topic", outputTopic, string("/omni_slam/odometry"));
+    this->nhp_.param("odometry_ground_truth_topic", outputGndTopic, string("/omni_slam/odometry_truth"));
+    this->nhp_.param("path_estimate_topic", outputPathTopic, string("/omni_slam/odometry_path"));
+    this->nhp_.param("path_ground_truth_topic", outputPathGndTopic, string("/omni_slam/odometry_path_truth"));
+    odometryPublisher_ = this->nh_.template advertise<geometry_msgs::PoseStamped>(outputTopic, 2);
+    odometryGndPublisher_ = this->nh_.template advertise<geometry_msgs::PoseStamped>(outputGndTopic, 2);
+    pathPublisher_ = this->nh_.template advertise<nav_msgs::Path>(outputPathTopic, 2);
+    pathGndPublisher_ = this->nh_.template advertise<nav_msgs::Path>(outputPathGndTopic, 2);
 }
 
-void OdometryEval::ProcessFrame(unique_ptr<data::Frame> &&frame)
+template <bool Stereo>
+void OdometryEval<Stereo>::ProcessFrame(unique_ptr<data::Frame> &&frame)
 {
-    trackingModule_->Update(frame);
-    odometryModule_->Update(trackingModule_->GetLandmarks(), *trackingModule_->GetFrames().back());
-    trackingModule_->Redetect();
+    this->trackingModule_->Update(frame);
+    odometryModule_->Update(this->trackingModule_->GetLandmarks(), *this->trackingModule_->GetFrames().back());
+    this->trackingModule_->Redetect();
 }
 
-void OdometryEval::Finish()
+template <bool Stereo>
+void OdometryEval<Stereo>::Finish()
 {
     ROS_INFO("Performing bundle adjustment...");
-    odometryModule_->BundleAdjust(trackingModule_->GetLandmarks());
+    odometryModule_->BundleAdjust(this->trackingModule_->GetLandmarks());
     PublishOdometry();
 }
 
-void OdometryEval::GetResultsData(std::map<std::string, std::vector<std::vector<double>>> &data)
+template <bool Stereo>
+void OdometryEval<Stereo>::GetResultsData(std::map<std::string, std::vector<std::vector<double>>> &data)
 {
     module::OdometryModule::Stats &stats = odometryModule_->GetStats();
 }
 
-void OdometryEval::Visualize(cv_bridge::CvImagePtr &base_img)
+template <bool Stereo>
+void OdometryEval<Stereo>::Visualize(cv_bridge::CvImagePtr &base_img)
 {
-    TrackingEval::Visualize(base_img);
+    TrackingEval<Stereo>::Visualize(base_img);
     PublishOdometry();
 }
 
-void OdometryEval::PublishOdometry()
+template <bool Stereo>
+void OdometryEval<Stereo>::PublishOdometry()
 {
     geometry_msgs::PoseStamped poseMsg;
     poseMsg.header.frame_id = "map";
-    if (trackingModule_->GetFrames().back()->HasEstimatedPose())
+    if (this->trackingModule_->GetFrames().back()->HasEstimatedPose())
     {
-        const Matrix<double, 3, 4> &pose = trackingModule_->GetFrames().back()->GetEstimatedPose();
+        const Matrix<double, 3, 4> &pose = this->trackingModule_->GetFrames().back()->GetEstimatedPose();
         poseMsg.pose.position.x = pose(0, 3);
         poseMsg.pose.position.y = pose(1, 3);
         poseMsg.pose.position.z = pose(2, 3);
@@ -94,10 +101,10 @@ void OdometryEval::PublishOdometry()
         poseMsg.pose.orientation.y = quat.normalized().y();
         poseMsg.pose.orientation.z = quat.normalized().z();
         poseMsg.pose.orientation.w = quat.normalized().w();
-        poseMsg.header.stamp = ::ros::Time(trackingModule_->GetFrames().back()->GetTime());
+        poseMsg.header.stamp = ::ros::Time(this->trackingModule_->GetFrames().back()->GetTime());
         odometryPublisher_.publish(poseMsg);
     }
-    const Matrix<double, 3, 4> &poseGnd = trackingModule_->GetFrames().back()->GetPose();
+    const Matrix<double, 3, 4> &poseGnd = this->trackingModule_->GetFrames().back()->GetPose();
     poseMsg.pose.position.x = poseGnd(0, 3);
     poseMsg.pose.position.y = poseGnd(1, 3);
     poseMsg.pose.position.z = poseGnd(2, 3);
@@ -106,7 +113,7 @@ void OdometryEval::PublishOdometry()
     poseMsg.pose.orientation.y = quatGnd.normalized().y();
     poseMsg.pose.orientation.z = quatGnd.normalized().z();
     poseMsg.pose.orientation.w = quatGnd.normalized().w();
-    poseMsg.header.stamp = ::ros::Time(trackingModule_->GetFrames().back()->GetTime());
+    poseMsg.header.stamp = ::ros::Time(this->trackingModule_->GetFrames().back()->GetTime());
     odometryGndPublisher_.publish(poseMsg);
 
     nav_msgs::Path path;
@@ -115,7 +122,7 @@ void OdometryEval::PublishOdometry()
     nav_msgs::Path pathGnd;
     pathGnd.header.stamp = ::ros::Time::now();
     pathGnd.header.frame_id = "map";
-    for (const std::unique_ptr<data::Frame> &frame : trackingModule_->GetFrames())
+    for (const std::unique_ptr<data::Frame> &frame : this->trackingModule_->GetFrames())
     {
         if (frame->HasEstimatedPose())
         {
@@ -154,5 +161,8 @@ void OdometryEval::PublishOdometry()
     pathGndPublisher_.publish(pathGnd);
 }
 
+template class OdometryEval<true>;
+template class OdometryEval<false>;
+
 }
 }
diff --git a/src/ros/odometry_eval.h b/src/ros/odometry_eval.h
index 6ca98fc..d86814f 100644
--- a/src/ros/odometry_eval.h
+++ b/src/ros/odometry_eval.h
@@ -12,23 +12,26 @@ namespace omni_slam
 namespace ros
 {
 
-class OdometryEval : public TrackingEval
+template <bool Stereo = false>
+class OdometryEval : public virtual TrackingEval<Stereo>
 {
 public:
     OdometryEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle &nh_private);
     OdometryEval() : OdometryEval(::ros::NodeHandle(), ::ros::NodeHandle("~")) {}
 
-private:
-    void InitPublishers();
+protected:
+    virtual void InitPublishers();
 
-    void ProcessFrame(std::unique_ptr<data::Frame> &&frame);
-    void Finish();
-    void GetResultsData(std::map<std::string, std::vector<std::vector<double>>> &data);
-    void Visualize(cv_bridge::CvImagePtr &base_img);
-    void PublishOdometry();
+    virtual void ProcessFrame(std::unique_ptr<data::Frame> &&frame);
+    virtual void Finish();
+    virtual void GetResultsData(std::map<std::string, std::vector<std::vector<double>>> &data);
+    virtual void Visualize(cv_bridge::CvImagePtr &base_img);
 
     std::unique_ptr<module::OdometryModule> odometryModule_;
 
+private:
+    void PublishOdometry();
+
     ::ros::Publisher odometryPublisher_;
     ::ros::Publisher odometryGndPublisher_;
     ::ros::Publisher pathPublisher_;
diff --git a/src/ros/reconstruction_eval.cc b/src/ros/reconstruction_eval.cc
index 2260da7..7bda19b 100644
--- a/src/ros/reconstruction_eval.cc
+++ b/src/ros/reconstruction_eval.cc
@@ -14,8 +14,9 @@ namespace omni_slam
 namespace ros
 {
 
-ReconstructionEval::ReconstructionEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle &nh_private)
-    : TrackingEval(nh, nh_private)
+template <bool Stereo>
+ReconstructionEval<Stereo>::ReconstructionEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle &nh_private)
+    : TrackingEval<Stereo>(nh, nh_private)
 {
     double maxReprojError;
     double minTriAngle;
@@ -24,12 +25,12 @@ ReconstructionEval::ReconstructionEval(const ::ros::NodeHandle &nh, const ::ros:
     bool logCeres;
     int numCeresThreads;
 
-    nhp_.param("max_reprojection_error", maxReprojError, 0.0);
-    nhp_.param("min_triangulation_angle", minTriAngle, 1.0);
-    nhp_.param("bundle_adjustment_max_iterations", baMaxIter, 500);
-    nhp_.param("bundle_adjustment_loss_coefficient", baLossCoeff, 0.1);
-    nhp_.param("bundle_adjustment_logging", logCeres, false);
-    nhp_.param("bundle_adjustment_num_threads", numCeresThreads, 1);
+    this->nhp_.param("max_reprojection_error", maxReprojError, 0.0);
+    this->nhp_.param("min_triangulation_angle", minTriAngle, 1.0);
+    this->nhp_.param("bundle_adjustment_max_iterations", baMaxIter, 500);
+    this->nhp_.param("bundle_adjustment_loss_coefficient", baLossCoeff, 0.1);
+    this->nhp_.param("bundle_adjustment_logging", logCeres, false);
+    this->nhp_.param("bundle_adjustment_num_threads", numCeresThreads, 1);
 
     unique_ptr<reconstruction::Triangulator> triangulator(new reconstruction::Triangulator(maxReprojError, minTriAngle));
     unique_ptr<optimization::BundleAdjuster> bundleAdjuster(new optimization::BundleAdjuster(baMaxIter, baLossCoeff, numCeresThreads, logCeres));
@@ -37,25 +38,28 @@ ReconstructionEval::ReconstructionEval(const ::ros::NodeHandle &nh, const ::ros:
     reconstructionModule_.reset(new module::ReconstructionModule(triangulator, bundleAdjuster));
 }
 
-void ReconstructionEval::InitPublishers()
+template <bool Stereo>
+void ReconstructionEval<Stereo>::InitPublishers()
 {
-    TrackingEval::InitPublishers();
+    TrackingEval<Stereo>::InitPublishers();
     string outputTopic;
-    nhp_.param("point_cloud_topic", outputTopic, string("/omni_slam/reconstructed"));
-    pointCloudPublisher_ = nh_.advertise<sensor_msgs::PointCloud2>(outputTopic, 2);
+    this->nhp_.param("point_cloud_topic", outputTopic, string("/omni_slam/reconstructed"));
+    pointCloudPublisher_ = this->nh_.template advertise<sensor_msgs::PointCloud2>(outputTopic, 2);
 }
 
-void ReconstructionEval::ProcessFrame(unique_ptr<data::Frame> &&frame)
+template <bool Stereo>
+void ReconstructionEval<Stereo>::ProcessFrame(unique_ptr<data::Frame> &&frame)
 {
-    trackingModule_->Update(frame);
-    reconstructionModule_->Update(trackingModule_->GetLandmarks());
-    trackingModule_->Redetect();
+    this->trackingModule_->Update(frame);
+    reconstructionModule_->Update(this->trackingModule_->GetLandmarks());
+    this->trackingModule_->Redetect();
 }
 
-void ReconstructionEval::Finish()
+template <bool Stereo>
+void ReconstructionEval<Stereo>::Finish()
 {
     ROS_INFO("Performing bundle adjustment...");
-    reconstructionModule_->BundleAdjust(trackingModule_->GetLandmarks());
+    reconstructionModule_->BundleAdjust(this->trackingModule_->GetLandmarks());
 
     pcl::PointCloud<pcl::PointXYZRGB> cloud;
     cv::Mat noarr;
@@ -67,12 +71,14 @@ void ReconstructionEval::Finish()
     pointCloudPublisher_.publish(msg);
 }
 
-void ReconstructionEval::GetResultsData(std::map<std::string, std::vector<std::vector<double>>> &data)
+template <bool Stereo>
+void ReconstructionEval<Stereo>::GetResultsData(std::map<std::string, std::vector<std::vector<double>>> &data)
 {
     module::ReconstructionModule::Stats &stats = reconstructionModule_->GetStats();
 }
 
-void ReconstructionEval::Visualize(cv_bridge::CvImagePtr &base_img)
+template <bool Stereo>
+void ReconstructionEval<Stereo>::Visualize(cv_bridge::CvImagePtr &base_img)
 {
     pcl::PointCloud<pcl::PointXYZRGB> cloud;
     reconstructionModule_->Visualize(base_img->image, cloud);
@@ -81,8 +87,11 @@ void ReconstructionEval::Visualize(cv_bridge::CvImagePtr &base_img)
     msg.header.frame_id = "map";
     msg.header.stamp = ::ros::Time::now();
     pointCloudPublisher_.publish(msg);
-    TrackingEval::Visualize(base_img);
+    TrackingEval<Stereo>::Visualize(base_img);
 }
 
+template class ReconstructionEval<true>;
+template class ReconstructionEval<false>;
+
 }
 }
diff --git a/src/ros/reconstruction_eval.h b/src/ros/reconstruction_eval.h
index 37cbc93..effc9f9 100644
--- a/src/ros/reconstruction_eval.h
+++ b/src/ros/reconstruction_eval.h
@@ -12,23 +12,25 @@ namespace omni_slam
 namespace ros
 {
 
-class ReconstructionEval : public TrackingEval
+template <bool Stereo = false>
+class ReconstructionEval : public virtual TrackingEval<Stereo>
 {
 public:
     ReconstructionEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle &nh_private);
     ReconstructionEval() : ReconstructionEval(::ros::NodeHandle(), ::ros::NodeHandle("~")) {}
 
-private:
-    void InitPublishers();
-
-    void ProcessFrame(std::unique_ptr<data::Frame> &&frame);
-    void Finish();
-    void GetResultsData(std::map<std::string, std::vector<std::vector<double>>> &data);
-    void Visualize(cv_bridge::CvImagePtr &base_img);
+protected:
+    virtual void InitPublishers();
 
-    ::ros::Publisher pointCloudPublisher_;
+    virtual void ProcessFrame(std::unique_ptr<data::Frame> &&frame);
+    virtual void Finish();
+    virtual void GetResultsData(std::map<std::string, std::vector<std::vector<double>>> &data);
+    virtual void Visualize(cv_bridge::CvImagePtr &base_img);
 
     std::unique_ptr<module::ReconstructionModule> reconstructionModule_;
+
+private:
+    ::ros::Publisher pointCloudPublisher_;
 };
 
 }
diff --git a/src/ros/stereo_eval.cc b/src/ros/stereo_eval.cc
new file mode 100644
index 0000000..230f78f
--- /dev/null
+++ b/src/ros/stereo_eval.cc
@@ -0,0 +1,60 @@
+#include "stereo_eval.h"
+
+#include "stereo/lk_stereo_matcher.h"
+#include "module/tracking_module.h"
+
+using namespace std;
+
+namespace omni_slam
+{
+namespace ros
+{
+
+StereoEval::StereoEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle &nh_private)
+    : TrackingEval<true>(nh, nh_private)
+{
+    int windowSize;
+    int numScales;
+    double errThresh;
+    double epiThresh;
+
+    this->nhp_.param("stereo_matcher_window_size", windowSize, 256);
+    this->nhp_.param("stereo_matcher_num_scales", numScales, 5);
+    this->nhp_.param("stereo_matcher_error_threshold", errThresh, 20.);
+    this->nhp_.param("stereo_matcher_epipolar_threshold", epiThresh, 0.005);
+
+    unique_ptr<stereo::StereoMatcher> stereo(new stereo::LKStereoMatcher(epiThresh, windowSize, numScales, errThresh));
+
+    stereoModule_.reset(new module::StereoModule(stereo));
+}
+
+void StereoEval::InitPublishers()
+{
+    string outputTopic;
+    this->nhp_.param("stereo_matched_image_topic", outputTopic, string("/omni_slam/stereo_matched"));
+    matchedImagePublisher_ = this->imageTransport_.advertise(outputTopic, 2);
+}
+
+void StereoEval::ProcessFrame(unique_ptr<data::Frame> &&frame)
+{
+    this->trackingModule_->GetLandmarks().clear();
+    this->trackingModule_->Update(frame);
+    this->trackingModule_->Redetect();
+    stereoModule_->Update(*this->trackingModule_->GetFrames().back(), this->trackingModule_->GetLandmarks());
+}
+
+void StereoEval::GetResultsData(std::map<std::string, std::vector<std::vector<double>>> &data)
+{
+    module::StereoModule::Stats &stats = stereoModule_->GetStats();
+}
+
+void StereoEval::Visualize(cv_bridge::CvImagePtr &base_img, cv_bridge::CvImagePtr &base_stereo_img)
+{
+    stereoModule_->Visualize(base_img->image, base_stereo_img->image);
+    matchedImagePublisher_.publish(base_img->toImageMsg());
+}
+
+}
+}
+
+
diff --git a/src/ros/stereo_eval.h b/src/ros/stereo_eval.h
new file mode 100644
index 0000000..7fc629f
--- /dev/null
+++ b/src/ros/stereo_eval.h
@@ -0,0 +1,38 @@
+#ifndef _STEREO_EVAL_H_
+#define _STEREO_EVAL_H_
+
+#include "tracking_eval.h"
+#include "module/stereo_module.h"
+
+#include <image_transport/image_transport.h>
+#include <ros/ros.h>
+#include <vector>
+
+namespace omni_slam
+{
+namespace ros
+{
+
+class StereoEval : public virtual TrackingEval<true>
+{
+public:
+    StereoEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle &nh_private);
+    StereoEval() : StereoEval(::ros::NodeHandle(), ::ros::NodeHandle("~")) {}
+
+protected:
+    virtual void InitPublishers();
+
+    virtual void ProcessFrame(std::unique_ptr<data::Frame> &&frame);
+    virtual void GetResultsData(std::map<std::string, std::vector<std::vector<double>>> &data);
+    virtual void Visualize(cv_bridge::CvImagePtr &base_img, cv_bridge::CvImagePtr &base_stereo_img);
+
+    std::unique_ptr<module::StereoModule> stereoModule_;
+
+private:
+    image_transport::Publisher matchedImagePublisher_;
+};
+
+}
+}
+
+#endif /* _STEREO_EVAL_H_ */
diff --git a/src/ros/tracking_eval.cc b/src/ros/tracking_eval.cc
index be3fa32..dbb7976 100644
--- a/src/ros/tracking_eval.cc
+++ b/src/ros/tracking_eval.cc
@@ -11,8 +11,9 @@ namespace omni_slam
 namespace ros
 {
 
-TrackingEval::TrackingEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle &nh_private)
-    : EvalBase(nh, nh_private)
+template <bool Stereo>
+TrackingEval<Stereo>::TrackingEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle &nh_private)
+    : EvalBase<Stereo>(nh, nh_private)
 {
     string detectorType;
     int trackerWindowSize;
@@ -24,15 +25,15 @@ TrackingEval::TrackingEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle
     map<string, double> detectorParams;
     int minFeaturesRegion;
 
-    nhp_.param("detector_type", detectorType, string("GFTT"));
-    nhp_.param("tracker_window_size", trackerWindowSize, 128);
-    nhp_.param("tracker_num_scales", trackerNumScales, 4);
-    nhp_.param("tracker_checker_epipolar_threshold", fivePointThreshold, 0.01745240643);
-    nhp_.param("tracker_checker_iterations", fivePointRansacIterations, 1000);
-    nhp_.param("tracker_delta_pixel_error_threshold", trackerDeltaPixelErrorThresh, 5.0);
-    nhp_.param("tracker_error_threshold", trackerErrorThresh, 20.);
-    nhp_.param("min_features_per_region", minFeaturesRegion, 5);
-    nhp_.getParam("detector_parameters", detectorParams);
+    this->nhp_.param("detector_type", detectorType, string("GFTT"));
+    this->nhp_.param("tracker_window_size", trackerWindowSize, 128);
+    this->nhp_.param("tracker_num_scales", trackerNumScales, 4);
+    this->nhp_.param("tracker_checker_epipolar_threshold", fivePointThreshold, 0.01745240643);
+    this->nhp_.param("tracker_checker_iterations", fivePointRansacIterations, 1000);
+    this->nhp_.param("tracker_delta_pixel_error_threshold", trackerDeltaPixelErrorThresh, 5.0);
+    this->nhp_.param("tracker_error_threshold", trackerErrorThresh, 20.);
+    this->nhp_.param("min_features_per_region", minFeaturesRegion, 5);
+    this->nhp_.getParam("detector_parameters", detectorParams);
 
     unique_ptr<feature::Detector> detector;
     if (feature::Detector::IsDetectorTypeValid(detectorType))
@@ -51,22 +52,25 @@ TrackingEval::TrackingEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle
     trackingModule_.reset(new module::TrackingModule(detector, tracker, checker, minFeaturesRegion));
 }
 
-void TrackingEval::InitPublishers()
+template <bool Stereo>
+void TrackingEval<Stereo>::InitPublishers()
 {
-    EvalBase::InitPublishers();
+    EvalBase<Stereo>::InitPublishers();
 
     string outputTopic;
-    nhp_.param("tracked_image_topic", outputTopic, string("/omni_slam/tracked"));
-    trackedImagePublisher_ = imageTransport_.advertise(outputTopic, 2);
+    this->nhp_.param("tracked_image_topic", outputTopic, string("/omni_slam/tracked"));
+    trackedImagePublisher_ = this->imageTransport_.advertise(outputTopic, 2);
 }
 
-void TrackingEval::ProcessFrame(unique_ptr<data::Frame> &&frame)
+template <bool Stereo>
+void TrackingEval<Stereo>::ProcessFrame(unique_ptr<data::Frame> &&frame)
 {
     trackingModule_->Update(frame);
     trackingModule_->Redetect();
 }
 
-void TrackingEval::GetResultsData(std::map<std::string, std::vector<std::vector<double>>> &data)
+template <bool Stereo>
+void TrackingEval<Stereo>::GetResultsData(std::map<std::string, std::vector<std::vector<double>>> &data)
 {
     module::TrackingModule::Stats &stats = trackingModule_->GetStats();
     data["failures"] = {stats.failureRadDists};
@@ -81,11 +85,15 @@ void TrackingEval::GetResultsData(std::map<std::string, std::vector<std::vector<
     data["track_lengths"] = {vector<double>(stats.trackLengths.begin(), stats.trackLengths.end())};
 }
 
-void TrackingEval::Visualize(cv_bridge::CvImagePtr &base_img)
+template <bool Stereo>
+void TrackingEval<Stereo>::Visualize(cv_bridge::CvImagePtr &base_img)
 {
     trackingModule_->Visualize(base_img->image);
     trackedImagePublisher_.publish(base_img->toImageMsg());
 }
 
+template class TrackingEval<true>;
+template class TrackingEval<false>;
+
 }
 }
diff --git a/src/ros/tracking_eval.h b/src/ros/tracking_eval.h
index f600cfc..d7a8226 100644
--- a/src/ros/tracking_eval.h
+++ b/src/ros/tracking_eval.h
@@ -14,7 +14,8 @@ namespace omni_slam
 namespace ros
 {
 
-class TrackingEval : public EvalBase
+template <bool Stereo = false>
+class TrackingEval : public virtual EvalBase<Stereo>
 {
 public:
     TrackingEval(const ::ros::NodeHandle &nh, const ::ros::NodeHandle &nh_private);
diff --git a/src/stereo/lk_stereo_matcher.cc b/src/stereo/lk_stereo_matcher.cc
new file mode 100644
index 0000000..0bed827
--- /dev/null
+++ b/src/stereo/lk_stereo_matcher.cc
@@ -0,0 +1,45 @@
+#include "lk_stereo_matcher.h"
+
+#include <cmath>
+
+namespace omni_slam
+{
+namespace stereo
+{
+
+LKStereoMatcher::LKStereoMatcher(double epipolar_thresh, int window_size, int num_scales, float err_thresh, int term_count, double term_eps)
+    : StereoMatcher(epipolar_thresh),
+    windowSize_(window_size / pow(2, num_scales), window_size / pow(2, num_scales)),
+    numScales_(num_scales),
+    errThresh_(err_thresh),
+    termCrit_(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, term_count, term_eps)
+{
+}
+
+void LKStereoMatcher::FindMatches(const cv::Mat &image1, const cv::Mat &image2, const std::vector<cv::KeyPoint> &pts1, std::vector<cv::KeyPoint> &pts2, std::vector<int> &matchedIndices) const
+{
+    std::vector<cv::Point2f> pointsToTrack;
+    pointsToTrack.reserve(pts1.size());
+    for (const cv::KeyPoint &kpt : pts1)
+    {
+        pointsToTrack.push_back(kpt.pt);
+    }
+    std::vector<cv::Point2f> results;
+    std::vector<unsigned char> status;
+    std::vector<float> err;
+    cv::calcOpticalFlowPyrLK(image1, image2, pointsToTrack, results, status, err, windowSize_, numScales_, termCrit_, 0);
+    pts2.clear();
+    matchedIndices.clear();
+    for (int i = 0; i < results.size(); i++)
+    {
+        cv::KeyPoint kpt(results[i], pts1[i].size);
+        pts2.push_back(kpt);
+        if (status[i] == 1 && err[i] <= errThresh_)
+        {
+            matchedIndices.push_back(i);
+        }
+    }
+}
+
+}
+}
diff --git a/src/stereo/lk_stereo_matcher.h b/src/stereo/lk_stereo_matcher.h
new file mode 100644
index 0000000..2db6fd3
--- /dev/null
+++ b/src/stereo/lk_stereo_matcher.h
@@ -0,0 +1,28 @@
+#ifndef _LK_STEREO_MATCHER_H_
+#define _LK_STEREO_MATCHER_H_
+
+#include "stereo_matcher.h"
+
+namespace omni_slam
+{
+namespace stereo
+{
+
+class LKStereoMatcher : public StereoMatcher
+{
+public:
+    LKStereoMatcher(double epipolar_thresh, int window_size, int num_scales, float err_thresh = 20., int term_count = 50, double term_eps = 0.01);
+
+private:
+    void FindMatches(const cv::Mat &image1, const cv::Mat &image2, const std::vector<cv::KeyPoint> &pts1, std::vector<cv::KeyPoint> &pts2, std::vector<int> &matchedIndices) const;
+
+    cv::TermCriteria termCrit_;
+    const cv::Size windowSize_;
+    const int numScales_;
+    const float errThresh_;
+};
+
+}
+}
+
+#endif /* _LK_STEREO_MATCHER_H_ */
diff --git a/src/stereo/stereo_matcher.cc b/src/stereo/stereo_matcher.cc
new file mode 100644
index 0000000..ce6f37e
--- /dev/null
+++ b/src/stereo/stereo_matcher.cc
@@ -0,0 +1,91 @@
+#include "stereo_matcher.h"
+
+#include "util/tf_util.h"
+
+namespace omni_slam
+{
+namespace stereo
+{
+
+StereoMatcher::StereoMatcher(double epipolar_thresh)
+    : epipolarThresh_(epipolar_thresh)
+{
+}
+
+int StereoMatcher::Match(data::Frame &frame, std::vector<data::Landmark> &landmarks) const
+{
+    if (!frame.HasStereoImage())
+    {
+        return 0;
+    }
+    std::vector<cv::KeyPoint> pointsToMatch;
+    std::vector<Vector3d> bearings1;
+    std::vector<int> origInx;
+    for (int i = 0; i < landmarks.size(); i++)
+    {
+        data::Landmark &landmark = landmarks[i];
+        if (landmark.HasEstimatedPosition())
+        {
+            continue;
+        }
+        const data::Feature *feat = landmark.GetObservationByFrameID(frame.GetID());
+        if (feat != nullptr)
+        {
+            pointsToMatch.push_back(feat->GetKeypoint());
+            bearings1.push_back(feat->GetBearing().normalized());
+            origInx.push_back(i);
+        }
+    }
+    if (pointsToMatch.size() == 0)
+    {
+        return 0;
+    }
+
+    std::vector<cv::KeyPoint> matchedPoints;
+    std::vector<int> matchedIndices;
+    FindMatches(frame.GetImage(), frame.GetStereoImage(), pointsToMatch, matchedPoints, matchedIndices);
+
+    Matrix<double, 3, 4> I;
+    I.block<3, 3>(0, 0) = Matrix3d::Identity();
+    I.block<3, 1>(0, 3) = Vector3d::Zero();
+    Matrix3d E = util::TFUtil::GetEssentialMatrixFromPoses(I, frame.GetStereoPose());
+    int good = 0;
+    #pragma omp parallel for reduction(+:good)
+    for (auto it = matchedIndices.begin(); it < matchedIndices.end(); it++)
+    {
+        int inx = *it;
+        Vector3d &bearing1 = bearings1[inx];
+        data::Feature feat(frame, matchedPoints[inx]);
+        Vector3d bearing2 = feat.GetBearing().normalized();
+
+        RowVector3d epiplane1 = bearing2.transpose() * E;
+        epiplane1.normalize();
+        double epiErr1 = std::abs(epiplane1 * bearing1);
+        Vector3d epiplane2 = E * bearing1;
+        epiplane2.normalize();
+        double epiErr2 = std::abs(bearing2.transpose() * epiplane2);
+        if (epiErr1 < epipolarThresh_ && epiErr2 < epipolarThresh_)
+        {
+            Matrix<double, 3, 4> framePose = frame.HasEstimatedPose() ? frame.GetEstimatedPose() : frame.GetPose();
+            landmarks[origInx[inx]].SetEstimatedPosition(util::TFUtil::TransformPoint(framePose, TriangulateDLT(bearing1, bearing2, I, frame.GetStereoPose())));
+            landmarks[origInx[inx]].AddStereoObservation(feat);
+            good++;
+        }
+    }
+    return good;
+}
+
+Vector3d StereoMatcher::TriangulateDLT(const Vector3d &x1, const Vector3d &x2, const Matrix<double, 3, 4> &pose1, const Matrix<double, 3, 4> &pose2) const
+{
+    Matrix4d design;
+    design.row(0) = x1(0) * pose1.row(2) - x1(2) * pose1.row(0);
+    design.row(1) = x1(1) * pose1.row(2) - x1(2) * pose1.row(1);
+    design.row(2) = x2(0) * pose2.row(2) - x2(2) * pose2.row(0);
+    design.row(3) = x2(1) * pose2.row(2) - x2(2) * pose2.row(1);
+
+    Eigen::JacobiSVD<Matrix4d> svd(design, Eigen::ComputeFullV);
+    return svd.matrixV().col(3).hnormalized();
+}
+
+}
+}
diff --git a/src/stereo/stereo_matcher.h b/src/stereo/stereo_matcher.h
new file mode 100644
index 0000000..7622500
--- /dev/null
+++ b/src/stereo/stereo_matcher.h
@@ -0,0 +1,31 @@
+#ifndef _STEREO_MATCHER_H_
+#define _STEREO_MATCHER_H_
+
+#include "data/landmark.h"
+#include <Eigen/Dense>
+
+using namespace Eigen;
+
+namespace omni_slam
+{
+namespace stereo
+{
+
+class StereoMatcher
+{
+public:
+    StereoMatcher(double epipolar_thresh);
+
+    int Match(data::Frame &frame, std::vector<data::Landmark> &landmarks) const;
+
+private:
+    virtual void FindMatches(const cv::Mat &image1, const cv::Mat &image2, const std::vector<cv::KeyPoint> &pts1, std::vector<cv::KeyPoint> &pts2, std::vector<int> &matchedIndices) const = 0;
+    Vector3d TriangulateDLT(const Vector3d &x1, const Vector3d &x2, const Matrix<double, 3, 4> &pose1, const Matrix<double, 3, 4> &pose2) const;
+
+    double epipolarThresh_;
+};
+
+}
+}
+
+#endif /* _STEREO_MATCHER_H_ */
diff --git a/src/util/tf_util.h b/src/util/tf_util.h
index 66f75a8..a42384c 100644
--- a/src/util/tf_util.h
+++ b/src/util/tf_util.h
@@ -67,6 +67,27 @@ inline Matrix<T, 3, 4> QuaternionTranslationToPoseMatrix(Quaternion<T> quat, Mat
     return pose;
 }
 
+template <typename T>
+inline Matrix<T, 3, 4> GetRelativeTransform(Matrix<T, 3, 4> pose1, Matrix<T, 3, 4> pose2)
+{
+    Matrix<T, 3, 4> rel;
+    rel.template block<3, 3>(0, 0) = GetRotationFromPoseMatrix(pose2) * GetRotationFromPoseMatrix(pose1).transpose();
+    rel.template block<3, 1>(0, 3) = pose2.template block<3, 1>(0, 3) - rel.template block<3, 3>(0, 0) * pose1.template block<3, 1>(0, 3);
+    return rel;
+}
+
+template <typename T>
+inline Matrix<T, 3, 3> GetEssentialMatrixFromPoses(Matrix<T, 3, 4> pose1, Matrix<T, 3, 4> pose2)
+{
+    Matrix<T, 3, 4> rel = GetRelativeTransform(pose1, pose2);
+    Vector3d t = rel.template block<3, 1>(0, 3);
+    Matrix3d tx;
+    tx << 0, -t(2), t(1),
+       t(2), 0, -t(0),
+       -t(1), t(0), 0;
+    return tx * GetRotationFromPoseMatrix(rel);
+}
+
 }
 }
 }