diff --git a/CHANGELOG.md b/CHANGELOG.md index a57124c3..b399041c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,24 @@ Change log ========== +2.2.0 (2023-11-21) +================== + +* API changes: + * :warning: All topics `measured_cp` and `setpoint_cp` use `PoseStamped` instead of `TransformStamped` + * `video.md` and video launch files moved to newly created `dvrk_video` package (still part of this repository) +* New features: + * Support for Si PSMs, ECM and SUJ + * All video related files (launch and md) moved to ROS package `dvrk_video` to avoid build dependencies on the full dVRK stack + * Added `vcs` files (replacing `wstool`) for dVRK 2.1, 2.2 and devel + * Added command line options to expose more IO and PID topics + * `dvrk_console_json` can locate `ros-io-.json` files using internal path, no need to specify the full path + * All Python examples updated to use newly introduced `crtk.ral` (ROS Abstraction Layer) and `crtk.check_connections` + * Added `dvrk_reset_teleoperation.py` to reposition MTMs and PSMs to better position between teleoperation tasks, very useful for user studies! + * `dvrk_bag_replay.py`: can now replay using `setpoint_jp` or `setpoint_cp`, fixed timing, doesn't use full `dvrk.psm` but creates a light class using `crtk.utils`, record and send joint velocities for better trajectory following +* Bug fixes: + * Many + 2.1.0 (2021-08-10) ================== diff --git a/README.md b/README.md index fea5cee8..3dc4b46f 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,9 @@ This repository has code related to daVinci Research Kit (dVRK) ROS packages. See https://github.com/jhu-dvrk/sawIntuitiveResearchKit/wiki # Install -We use the catkin build tools, NOT `catkin_make`. See download and compilation instructions: https://github.com/jhu-dvrk/sawIntuitiveResearchKit/wiki/CatkinBuild +We use the catkin build tools, NOT catkin_make. Please don't use catkin_make +* Download and compile the cisst libraries and SAW components for the dVRK, see the dVRK tutorial wiki: https://github.com/jhu-cisst/cisst/wiki/Compiling-cisst-and-SAW-with-CMake#13-building-using-catkin-build-tools-for-ros +* Download and compile dvrk-ros: https://github.com/jhu-dvrk/sawIntuitiveResearchKit/wiki/CatkinBuild # List of Packages: * `dvrk_robot` **[maintained]** diff --git a/dvrk-2.1.vcs b/dvrk-2.1.vcs new file mode 100644 index 00000000..93ff69de --- /dev/null +++ b/dvrk-2.1.vcs @@ -0,0 +1,53 @@ +repositories: + cisst-saw/cisstNetlib: + type: git + url: https://github.com/jhu-cisst/cisstNetlib.git + version: 3.0.0 + cisst-saw/cisst: + type: git + url: https://github.com/jhu-cisst/cisst.git + version: 1.1.0 + cisst-saw/cisst-ros: + type: git + url: https://github.com/jhu-cisst/cisst-ros.git + version: 2.0.0 + cisst-saw/sawKeyboard: + type: git + url: https://github.com/jhu-saw/sawKeyboard.git + version: 1.3.0 + cisst-saw/sawTextToSpeech: + type: git + url: https://github.com/jhu-saw/sawTextToSpeech.git + version: 1.3.0 + cisst-saw/sawRobotIO1394: + type: git + url: https://github.com/jhu-saw/sawRobotIO1394.git + version: 2.1.0 + cisst-saw/sawControllers: + type: git + url: https://github.com/jhu-saw/sawControllers.git + version: 2.0.0 + cisst-saw/sawIntuitiveResearchKit: + type: git + url: https://github.com/jhu-dvrk/sawIntuitiveResearchKit.git + version: 2.1.0 + crtk/crtk_msgs: + type: git + url: https://github.com/collaborative-robotics/crtk_msgs.git + version: 1.0.0 + crtk/crtk_python_client: + type: git + url: https://github.com/collaborative-robotics/crtk_python_client.git + version: 1.1.0 + crtk/crtk_matlab_client: + type: git + url: https://github.com/collaborative-robotics/crtk_matlab_client.git + version: 1.1.0 + dvrk/dvrk-ros: + type: git + url: https://github.com/jhu-dvrk/dvrk-ros.git + version: 2.1.0 + dvrk/dvrk-gravity-compensation: + type: git + url: https://github.com/jhu-dvrk/dvrk-gravity-compensation.git + version: 2.0.0 diff --git a/dvrk-2.2.vcs b/dvrk-2.2.vcs new file mode 100644 index 00000000..9bce5bb0 --- /dev/null +++ b/dvrk-2.2.vcs @@ -0,0 +1,61 @@ +repositories: + cisst-saw/cisstNetlib: + type: git + url: https://github.com/jhu-cisst/cisstNetlib.git + version: 3.1.0 + cisst-saw/cisst: + type: git + url: https://github.com/jhu-cisst/cisst.git + version: 1.2.0 + cisst-saw/cisst-ros: + type: git + url: https://github.com/jhu-cisst/cisst-ros.git + version: 2.1.0 + cisst-saw/sawKeyboard: + type: git + url: https://github.com/jhu-saw/sawKeyboard.git + version: 1.4.0 + cisst-saw/sawTextToSpeech: + type: git + url: https://github.com/jhu-saw/sawTextToSpeech.git + version: 1.4.0 + cisst-saw/sawRobotIO1394: + type: git + url: https://github.com/jhu-saw/sawRobotIO1394.git + version: 2.2.0 + cisst-saw/sawControllers: + type: git + url: https://github.com/jhu-saw/sawControllers.git + version: 2.1.0 + cisst-saw/sawIntuitiveResearchKit: + type: git + url: https://github.com/jhu-dvrk/sawIntuitiveResearchKit.git + version: 2.2.0 + crtk/crtk_msgs: + type: git + url: https://github.com/collaborative-robotics/crtk_msgs.git + version: 1.1.0 + crtk/crtk_python_client: + type: git + url: https://github.com/collaborative-robotics/crtk_python_client.git + version: 1.2.0 + crtk/crtk_matlab_client: + type: git + url: https://github.com/collaborative-robotics/crtk_matlab_client.git + version: 1.2.0 + dvrk/dvrk-ros: + type: git + url: https://github.com/jhu-dvrk/dvrk-ros.git + version: 2.2.0 + dvrk/dvrk-gravity-compensation: + type: git + url: https://github.com/jhu-dvrk/dvrk-gravity-compensation.git + version: 2.0.0 + dvrk/dvrk_config_jhu: + type: git + url: https://github.com/jhu-dvrk/dvrk_config_jhu.git + version: 2.2.0 + dvrk/dESSJ-firmware: + type: git + url: https://github.com/jhu-dvrk/dESSJ-firmware.git + version: 1.0.0 diff --git a/dvrk-devel.vcs b/dvrk-devel.vcs new file mode 100644 index 00000000..41e19044 --- /dev/null +++ b/dvrk-devel.vcs @@ -0,0 +1,61 @@ +repositories: + cisst-saw/cisstNetlib: + type: git + url: https://github.com/jhu-cisst/cisstNetlib.git + version: devel + cisst-saw/cisst: + type: git + url: https://github.com/jhu-cisst/cisst.git + version: devel + cisst-saw/cisst-ros: + type: git + url: https://github.com/jhu-cisst/cisst-ros.git + version: devel + cisst-saw/sawKeyboard: + type: git + url: https://github.com/jhu-saw/sawKeyboard.git + version: devel + cisst-saw/sawTextToSpeech: + type: git + url: https://github.com/jhu-saw/sawTextToSpeech.git + version: devel + cisst-saw/sawRobotIO1394: + type: git + url: https://github.com/jhu-saw/sawRobotIO1394.git + version: devel + cisst-saw/sawControllers: + type: git + url: https://github.com/jhu-saw/sawControllers.git + version: devel + cisst-saw/sawIntuitiveResearchKit: + type: git + url: https://github.com/jhu-dvrk/sawIntuitiveResearchKit.git + version: devel + crtk/crtk_msgs: + type: git + url: https://github.com/collaborative-robotics/crtk_msgs.git + version: devel + crtk/crtk_python_client: + type: git + url: https://github.com/collaborative-robotics/crtk_python_client.git + version: devel + crtk/crtk_matlab_client: + type: git + url: https://github.com/collaborative-robotics/crtk_matlab_client.git + version: devel + dvrk/dvrk-ros: + type: git + url: https://github.com/jhu-dvrk/dvrk-ros.git + version: devel + dvrk/dvrk-gravity-compensation: + type: git + url: https://github.com/jhu-dvrk/dvrk-gravity-compensation.git + version: devel + dvrk/dvrk_config_jhu: + type: git + url: https://github.com/jhu-dvrk/dvrk_config_jhu.git + version: devel + dvrk/dESSJ-firmware: + type: git + url: https://github.com/jhu-dvrk/dESSJ-firmware.git + version: devel diff --git a/dvrk_arms_from_ros/components/CMakeLists.txt b/dvrk_arms_from_ros/components/CMakeLists.txt index 7c3fc738..52d4dc6c 100644 --- a/dvrk_arms_from_ros/components/CMakeLists.txt +++ b/dvrk_arms_from_ros/components/CMakeLists.txt @@ -1,5 +1,5 @@ # -# (C) Copyright 2020-2021 Johns Hopkins University (JHU), All Rights Reserved. +# (C) Copyright 2020-2023 Johns Hopkins University (JHU), All Rights Reserved. # # --- begin cisst license - do not edit --- # @@ -15,7 +15,7 @@ set (CMAKE_CXX_STANDARD 14) set (CMAKE_CXX_STANDARD_REQUIRED ON) set (CMAKE_CXX_EXTENSIONS OFF) -project (dvrk_arms_from_ros) +project (dvrk_arms_from_ros VERSION 2.2.0) ## find catkin macros and libraries ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) @@ -38,7 +38,7 @@ set (REQUIRED_CISST_LIBRARIES cisstParameterTypes ) -find_package (cisst 1.1.0 REQUIRED ${REQUIRED_CISST_LIBRARIES}) +find_package (cisst 1.2.0 REQUIRED ${REQUIRED_CISST_LIBRARIES}) if (cisst_FOUND_AS_REQUIRED) diff --git a/dvrk_arms_from_ros/components/include/dvrk_arm_from_ros.h b/dvrk_arms_from_ros/components/include/dvrk_arm_from_ros.h index e0f75dba..3e54f781 100644 --- a/dvrk_arms_from_ros/components/include/dvrk_arm_from_ros.h +++ b/dvrk_arms_from_ros/components/include/dvrk_arm_from_ros.h @@ -5,7 +5,7 @@ Author(s): Anton Deguet Created on: 2020-01-13 - (C) Copyright 2020 Johns Hopkins University (JHU), All Rights Reserved. + (C) Copyright 2020-2021 Johns Hopkins University (JHU), All Rights Reserved. --- begin cisst license - do not edit --- @@ -19,16 +19,18 @@ no warranty. The complete license can be found in license.txt and #ifndef _dvrk_arm_from_ros_h #define _dvrk_arm_from_ros_h -#include +#include -class dvrk_arm_from_ros: public mtsROSBridge +class dvrk_arm_from_ros: public mts_ros_crtk_bridge_required { CMN_DECLARE_SERVICES(CMN_DYNAMIC_CREATION_ONEARG, CMN_LOG_ALLOW_DEFAULT); public: typedef mtsROSBridge BaseType; - dvrk_arm_from_ros(const std::string & componentName, const double periodInSeconds); + dvrk_arm_from_ros(const std::string & componentName, + ros::NodeHandle * _node_handle, + const double periodInSeconds); dvrk_arm_from_ros(const mtsTaskPeriodicConstructorArg & arg); ~dvrk_arm_from_ros(){} diff --git a/dvrk_arms_from_ros/components/include/dvrk_psm_from_ros.h b/dvrk_arms_from_ros/components/include/dvrk_psm_from_ros.h index 3d415f19..feaf5315 100644 --- a/dvrk_arms_from_ros/components/include/dvrk_psm_from_ros.h +++ b/dvrk_arms_from_ros/components/include/dvrk_psm_from_ros.h @@ -5,7 +5,7 @@ Author(s): Anton Deguet Created on: 2020-01-13 - (C) Copyright 2020 Johns Hopkins University (JHU), All Rights Reserved. + (C) Copyright 2020-2021 Johns Hopkins University (JHU), All Rights Reserved. --- begin cisst license - do not edit --- @@ -28,7 +28,9 @@ class dvrk_psm_from_ros: public dvrk_arm_from_ros public: typedef mtsROSBridge BaseType; - dvrk_psm_from_ros(const std::string & componentName, const double periodInSeconds); + dvrk_psm_from_ros(const std::string & componentName, + ros::NodeHandle * _node_handle, + const double periodInSeconds); dvrk_psm_from_ros(const mtsTaskPeriodicConstructorArg & arg); ~dvrk_psm_from_ros() {} diff --git a/dvrk_arms_from_ros/components/package.xml b/dvrk_arms_from_ros/components/package.xml index abaab05d..cdd1c06c 100644 --- a/dvrk_arms_from_ros/components/package.xml +++ b/dvrk_arms_from_ros/components/package.xml @@ -1,6 +1,6 @@ dvrk_arms_from_ros - 2.1.0 + 2.2.0 sawIntuitiveResearchKit compatible arm from ROS topics diff --git a/dvrk_arms_from_ros/components/src/dvrk_arm_from_ros.cpp b/dvrk_arms_from_ros/components/src/dvrk_arm_from_ros.cpp index 31693e18..668e81f7 100644 --- a/dvrk_arms_from_ros/components/src/dvrk_arm_from_ros.cpp +++ b/dvrk_arms_from_ros/components/src/dvrk_arm_from_ros.cpp @@ -5,7 +5,7 @@ Author(s): Anton Deguet Created on: 2020-01-13 - (C) Copyright 2020 Johns Hopkins University (JHU), All Rights Reserved. + (C) Copyright 2020-2022 Johns Hopkins University (JHU), All Rights Reserved. --- begin cisst license - do not edit --- @@ -22,76 +22,40 @@ no warranty. The complete license can be found in license.txt and #include CMN_IMPLEMENT_SERVICES_DERIVED_ONEARG(dvrk_arm_from_ros, - mtsROSBridge, + mts_ros_crtk_bridge_required, mtsTaskPeriodicConstructorArg); dvrk_arm_from_ros::dvrk_arm_from_ros(const std::string & componentName, + ros::NodeHandle * _node_handle, const double periodInSeconds) - : mtsROSBridge(componentName, periodInSeconds) + : mts_ros_crtk_bridge_required(componentName, _node_handle, periodInSeconds) { Init(); } dvrk_arm_from_ros::dvrk_arm_from_ros(const mtsTaskPeriodicConstructorArg & arg) - : mtsROSBridge(arg.Name, arg.Period) + : mts_ros_crtk_bridge_required(arg) { Init(); } void dvrk_arm_from_ros::Init(void) { - std::string ros_namespace = this->GetName(); - std::string interface_provided = this->GetName(); - - AddPublisherFromCommandWrite - (interface_provided, - "state_command", - ros_namespace + "/state_command"); - - AddPublisherFromCommandVoid - (interface_provided, - "Freeze", - ros_namespace + "/freeze"); - - AddPublisherFromCommandWrite - (interface_provided, - "servo_cp", - ros_namespace + "/servo_cp"); - - AddSubscriberToEventWrite - (interface_provided, "operating_state", - ros_namespace + "/operating_state"); - - AddSubscriberToCommandRead - (interface_provided, "operating_state", - ros_namespace + "/operating_state"); - - AddSubscriberToCommandRead - (interface_provided, - "setpoint_js", - ros_namespace + "/setpoint_js"); - - AddSubscriberToCommandRead - (interface_provided, - "measured_cp", - ros_namespace + "/measured_cp"); - - AddSubscriberToCommandRead - (interface_provided, - "period_statistics", - ros_namespace + "/period_statistics"); - - AddSubscriberToEventWrite - (interface_provided, "error", - ros_namespace + "/error"); - - AddSubscriberToEventWrite - (interface_provided, "warning", - ros_namespace + "/warning"); - - AddSubscriberToEventWrite - (interface_provided, "status", - ros_namespace + "/status"); + const auto ros_namespace = this->GetName(); + const auto interface_provided = this->GetName(); + + typedef std::vector Commands; + populate_interface_provided(interface_provided, + ros_namespace, + // void commands + Commands({"hold"}), + // write commands + Commands({"state_command", "servo_cp"}), + // read commands + Commands({"operating_state", "period_statistics", + "setpoint_js", "measured_js", "setpoint_cp"}), + // write events + Commands({"operating_state", "error", "warning", "status"})); } // Configure is a virtual method, we can redefine it and have our own diff --git a/dvrk_arms_from_ros/components/src/dvrk_psm_from_ros.cpp b/dvrk_arms_from_ros/components/src/dvrk_psm_from_ros.cpp index 77deb2e4..23656676 100644 --- a/dvrk_arms_from_ros/components/src/dvrk_psm_from_ros.cpp +++ b/dvrk_arms_from_ros/components/src/dvrk_psm_from_ros.cpp @@ -5,7 +5,7 @@ Author(s): Anton Deguet Created on: 2020-01-13 - (C) Copyright 2020 Johns Hopkins University (JHU), All Rights Reserved. + (C) Copyright 2020-2022 Johns Hopkins University (JHU), All Rights Reserved. --- begin cisst license - do not edit --- @@ -26,35 +26,34 @@ CMN_IMPLEMENT_SERVICES_DERIVED_ONEARG(dvrk_psm_from_ros, mtsTaskPeriodicConstructorArg); dvrk_psm_from_ros::dvrk_psm_from_ros(const std::string & componentName, + ros::NodeHandle * _node_handle, const double periodInSeconds) - : dvrk_arm_from_ros(componentName, periodInSeconds) + : dvrk_arm_from_ros(componentName, _node_handle, periodInSeconds) { InitPSM(); } dvrk_psm_from_ros::dvrk_psm_from_ros(const mtsTaskPeriodicConstructorArg & arg) - : dvrk_arm_from_ros(arg.Name, arg.Period) + : dvrk_arm_from_ros(arg) { InitPSM(); } void dvrk_psm_from_ros::InitPSM(void) { - std::string ros_namespace = this->GetName(); - std::string interface_provided = this->GetName(); - - AddPublisherFromCommandWrite - (interface_provided, - "jaw/servo_jp", - ros_namespace + "/jaw/servo_jp"); - - AddSubscriberToCommandRead - (interface_provided, - "jaw/measured_js", - ros_namespace + "/jaw/measured_js"); - - AddSubscriberToCommandRead - (interface_provided, - "jaw/setpoint_js", - ros_namespace + "/jaw/setpoint_js"); + + const auto ros_namespace = this->GetName(); + const auto interface_provided = this->GetName(); + + typedef std::vector Commands; + populate_interface_provided(interface_provided, + ros_namespace, + // void commands + Commands(), + // write commands + Commands({"jaw/servo_jp"}), + // read commands + Commands({"jaw/setpoint_js", "jaw/measured_js"}), + // write events + Commands()); } diff --git a/dvrk_hrsv_widget/CMakeLists.txt b/dvrk_hrsv_widget/CMakeLists.txt index c81583ea..83735448 100644 --- a/dvrk_hrsv_widget/CMakeLists.txt +++ b/dvrk_hrsv_widget/CMakeLists.txt @@ -1,5 +1,5 @@ -cmake_minimum_required (VERSION 2.8.3) -project (dvrk_hrsv_widget) +cmake_minimum_required (VERSION 3.1) +project (dvrk_hrsv_widget VERSION 2.2.0) set (CMAKE_CXX_STANDARD 14) set (CMAKE_CXX_STANDARD_REQUIRED ON) diff --git a/dvrk_hrsv_widget/package.xml b/dvrk_hrsv_widget/package.xml index 203a5950..4431946b 100644 --- a/dvrk_hrsv_widget/package.xml +++ b/dvrk_hrsv_widget/package.xml @@ -1,7 +1,7 @@ dvrk_hrsv_widget - 2.1.0 + 2.2.0 dVRK HRSV widget Anton Deguet diff --git a/dvrk_model/CMakeLists.txt b/dvrk_model/CMakeLists.txt index f78b857f..c84fce4e 100644 --- a/dvrk_model/CMakeLists.txt +++ b/dvrk_model/CMakeLists.txt @@ -1,5 +1,5 @@ -cmake_minimum_required(VERSION 2.8.3) -project(dvrk_model) +cmake_minimum_required(VERSION 3.1) +project(dvrk_model VERSION 2.1.0) ## Find catkin macros and libraries ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) diff --git a/dvrk_model/package.xml b/dvrk_model/package.xml index 15538779..993fc465 100644 --- a/dvrk_model/package.xml +++ b/dvrk_model/package.xml @@ -1,7 +1,7 @@ dvrk_model - 2.1.0 + 2.2.0 The dvrk_model package Anton Deguet diff --git a/dvrk_python/CMakeLists.txt b/dvrk_python/CMakeLists.txt index 223431c9..2b84f099 100644 --- a/dvrk_python/CMakeLists.txt +++ b/dvrk_python/CMakeLists.txt @@ -1,5 +1,5 @@ -cmake_minimum_required (VERSION 2.8.3) -project (dvrk_python) +cmake_minimum_required (VERSION 3.1) +project (dvrk_python VERSION 2.2.0) ## Find catkin macros and libraries ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) @@ -28,4 +28,4 @@ catkin_package ( # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} # ) -catkin_python_setup () \ No newline at end of file +catkin_python_setup () diff --git a/dvrk_python/doc/Makefile b/dvrk_python/doc/Makefile deleted file mode 100644 index a233e023..00000000 --- a/dvrk_python/doc/Makefile +++ /dev/null @@ -1,153 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = _build - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - -rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/src.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/src.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/src" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/src" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." diff --git a/dvrk_python/doc/conf.py b/dvrk_python/doc/conf.py deleted file mode 100644 index 4e38123d..00000000 --- a/dvrk_python/doc/conf.py +++ /dev/null @@ -1,287 +0,0 @@ -# -*- coding: utf-8 -*- -# -# src documentation build configuration file, created by -# sphinx-quickstart on Thu Jul 16 11:39:48 2015. -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys, os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../src')) - -# -- General configuration ----------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'src' -copyright = u'2015, Yijun Hu' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '' -# The full version, including alpha/beta/rc tags. -release = '' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'srcdoc' - - -# -- Options for LaTeX output -------------------------------------------------- - -latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ('index', 'src.tex', u'src Documentation', - u'Yijun Hu', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output -------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'src', u'src Documentation', - [u'Yijun Hu'], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------------ - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'src', u'src Documentation', - u'Yijun Hu', 'src', 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' - - -# -- Options for Epub output --------------------------------------------------- - -# Bibliographic Dublin Core info. -epub_title = u'src' -epub_author = u'Yijun Hu' -epub_publisher = u'Yijun Hu' -epub_copyright = u'2015, Yijun Hu' - -# The language of the text. It defaults to the language option -# or en if the language is not set. -#epub_language = '' - -# The scheme of the identifier. Typical schemes are ISBN or URL. -#epub_scheme = '' - -# The unique identifier of the text. This can be a ISBN number -# or the project homepage. -#epub_identifier = '' - -# A unique identification for the text. -#epub_uid = '' - -# A tuple containing the cover image and cover page html template filenames. -#epub_cover = () - -# HTML files that should be inserted before the pages created by sphinx. -# The format is a list of tuples containing the path and title. -#epub_pre_files = [] - -# HTML files shat should be inserted after the pages created by sphinx. -# The format is a list of tuples containing the path and title. -#epub_post_files = [] - -# A list of files that should not be packed into the epub file. -#epub_exclude_files = [] - -# The depth of the table of contents in toc.ncx. -#epub_tocdepth = 3 - -# Allow duplicate toc entries. -#epub_tocdup = True - -autodoc_member_order = 'bysource' diff --git a/dvrk_python/doc/getters.rst b/dvrk_python/doc/getters.rst deleted file mode 100644 index 4f1ae48b..00000000 --- a/dvrk_python/doc/getters.rst +++ /dev/null @@ -1,7 +0,0 @@ -getters Module -============== - -.. automodule:: getters - :members: - :undoc-members: - :show-inheritance: diff --git a/dvrk_python/doc/index.rst b/dvrk_python/doc/index.rst deleted file mode 100644 index 374a89fb..00000000 --- a/dvrk_python/doc/index.rst +++ /dev/null @@ -1,47 +0,0 @@ -.. src documentation master file, created by - sphinx-quickstart on Thu Jul 16 11:39:48 2015. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to Robot API's documentation! -===================================== - -This is the api you can use to run the daVinci Research Kit: - -.. toctree:: - :maxdepth: 4 - - robot - -Examples -======== -Here are the examples of how to run the code: - -.. toctree:: - :maxdepth: 4 - - power_on - power_off - getters - simple_cartesian_move - simple_joint_move - plotting_cartesian - plotting_joint - -More Examples -============= -Here are more complex examples: - -.. toctree:: - :maxdepth: 4 - - regular_polygon - picking_and_moving - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/dvrk_python/doc/make.bat b/dvrk_python/doc/make.bat deleted file mode 100644 index cc46f3aa..00000000 --- a/dvrk_python/doc/make.bat +++ /dev/null @@ -1,190 +0,0 @@ -@ECHO OFF - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set BUILDDIR=_build -set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . -set I18NSPHINXOPTS=%SPHINXOPTS% . -if NOT "%PAPER%" == "" ( - set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% - set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% -) - -if "%1" == "" goto help - -if "%1" == "help" ( - :help - echo.Please use `make ^` where ^ is one of - echo. html to make standalone HTML files - echo. dirhtml to make HTML files named index.html in directories - echo. singlehtml to make a single large HTML file - echo. pickle to make pickle files - echo. json to make JSON files - echo. htmlhelp to make HTML files and a HTML help project - echo. qthelp to make HTML files and a qthelp project - echo. devhelp to make HTML files and a Devhelp project - echo. epub to make an epub - echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter - echo. text to make text files - echo. man to make manual pages - echo. texinfo to make Texinfo files - echo. gettext to make PO message catalogs - echo. changes to make an overview over all changed/added/deprecated items - echo. linkcheck to check all external links for integrity - echo. doctest to run all doctests embedded in the documentation if enabled - goto end -) - -if "%1" == "clean" ( - for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i - del /q /s %BUILDDIR%\* - goto end -) - -if "%1" == "html" ( - %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/html. - goto end -) - -if "%1" == "dirhtml" ( - %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. - goto end -) - -if "%1" == "singlehtml" ( - %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. - goto end -) - -if "%1" == "pickle" ( - %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can process the pickle files. - goto end -) - -if "%1" == "json" ( - %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can process the JSON files. - goto end -) - -if "%1" == "htmlhelp" ( - %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can run HTML Help Workshop with the ^ -.hhp project file in %BUILDDIR%/htmlhelp. - goto end -) - -if "%1" == "qthelp" ( - %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can run "qcollectiongenerator" with the ^ -.qhcp project file in %BUILDDIR%/qthelp, like this: - echo.^> qcollectiongenerator %BUILDDIR%\qthelp\src.qhcp - echo.To view the help file: - echo.^> assistant -collectionFile %BUILDDIR%\qthelp\src.ghc - goto end -) - -if "%1" == "devhelp" ( - %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. - goto end -) - -if "%1" == "epub" ( - %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The epub file is in %BUILDDIR%/epub. - goto end -) - -if "%1" == "latex" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "text" ( - %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The text files are in %BUILDDIR%/text. - goto end -) - -if "%1" == "man" ( - %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The manual pages are in %BUILDDIR%/man. - goto end -) - -if "%1" == "texinfo" ( - %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. - goto end -) - -if "%1" == "gettext" ( - %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The message catalogs are in %BUILDDIR%/locale. - goto end -) - -if "%1" == "changes" ( - %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes - if errorlevel 1 exit /b 1 - echo. - echo.The overview file is in %BUILDDIR%/changes. - goto end -) - -if "%1" == "linkcheck" ( - %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck - if errorlevel 1 exit /b 1 - echo. - echo.Link check complete; look for any errors in the above output ^ -or in %BUILDDIR%/linkcheck/output.txt. - goto end -) - -if "%1" == "doctest" ( - %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest - if errorlevel 1 exit /b 1 - echo. - echo.Testing of doctests in the sources finished, look at the ^ -results in %BUILDDIR%/doctest/output.txt. - goto end -) - -:end diff --git a/dvrk_python/doc/picking_and_moving.rst b/dvrk_python/doc/picking_and_moving.rst deleted file mode 100644 index 9c18a176..00000000 --- a/dvrk_python/doc/picking_and_moving.rst +++ /dev/null @@ -1,7 +0,0 @@ -picking_and_moving Module -========================= - -.. automodule:: picking_and_moving - :members: - :undoc-members: - :show-inheritance: diff --git a/dvrk_python/doc/plotting_cartesian.rst b/dvrk_python/doc/plotting_cartesian.rst deleted file mode 100644 index a0e2b6a7..00000000 --- a/dvrk_python/doc/plotting_cartesian.rst +++ /dev/null @@ -1,7 +0,0 @@ -plotting_cartesian Module -========================= - -.. automodule:: plotting_cartesian - :members: - :undoc-members: - :show-inheritance: diff --git a/dvrk_python/doc/plotting_joint.rst b/dvrk_python/doc/plotting_joint.rst deleted file mode 100644 index 3db9089a..00000000 --- a/dvrk_python/doc/plotting_joint.rst +++ /dev/null @@ -1,7 +0,0 @@ -plotting_joint Module -===================== - -.. automodule:: plotting_joint - :members: - :undoc-members: - :show-inheritance: diff --git a/dvrk_python/doc/power_off.rst b/dvrk_python/doc/power_off.rst deleted file mode 100644 index 59a6464e..00000000 --- a/dvrk_python/doc/power_off.rst +++ /dev/null @@ -1,7 +0,0 @@ -power_off Module -================ - -.. automodule:: power_off - :members: - :undoc-members: - :show-inheritance: diff --git a/dvrk_python/doc/power_on.rst b/dvrk_python/doc/power_on.rst deleted file mode 100644 index 6c0b68ad..00000000 --- a/dvrk_python/doc/power_on.rst +++ /dev/null @@ -1,7 +0,0 @@ -power_on Module -=============== - -.. automodule:: power_on - :members: - :undoc-members: - :show-inheritance: diff --git a/dvrk_python/doc/regular_polygon.rst b/dvrk_python/doc/regular_polygon.rst deleted file mode 100644 index 7cd884cb..00000000 --- a/dvrk_python/doc/regular_polygon.rst +++ /dev/null @@ -1,7 +0,0 @@ -regular_polygon Module -====================== - -.. automodule:: regular_polygon - :members: - :undoc-members: - :show-inheritance: diff --git a/dvrk_python/doc/robot.rst b/dvrk_python/doc/robot.rst deleted file mode 100644 index f42a283c..00000000 --- a/dvrk_python/doc/robot.rst +++ /dev/null @@ -1,7 +0,0 @@ -robot Module -============ - -.. automodule:: robot - :members: - :undoc-members: - :show-inheritance: diff --git a/dvrk_python/doc/simple_cartesian_move.rst b/dvrk_python/doc/simple_cartesian_move.rst deleted file mode 100644 index 655da7d3..00000000 --- a/dvrk_python/doc/simple_cartesian_move.rst +++ /dev/null @@ -1,7 +0,0 @@ -simple_cartesian_move Module -============================ - -.. automodule:: simple_cartesian_move - :members: - :undoc-members: - :show-inheritance: diff --git a/dvrk_python/doc/simple_joint_move.rst b/dvrk_python/doc/simple_joint_move.rst deleted file mode 100644 index 22815b42..00000000 --- a/dvrk_python/doc/simple_joint_move.rst +++ /dev/null @@ -1,7 +0,0 @@ -simple_joint_move Module -======================== - -.. automodule:: simple_joint_move - :members: - :undoc-members: - :show-inheritance: diff --git a/dvrk_python/doc/writing.rst b/dvrk_python/doc/writing.rst deleted file mode 100644 index e247a8bf..00000000 --- a/dvrk_python/doc/writing.rst +++ /dev/null @@ -1,7 +0,0 @@ -writing Module -============== - -.. automodule:: writing - :members: - :undoc-members: - :show-inheritance: diff --git a/dvrk_python/package.xml b/dvrk_python/package.xml index f66e8802..3250c1c8 100644 --- a/dvrk_python/package.xml +++ b/dvrk_python/package.xml @@ -1,7 +1,7 @@ dvrk_python - 2.1.0 + 2.2.0 The dVRK Python package Anton Deguet diff --git a/dvrk_python/scripts/dvrk_arm_test.py b/dvrk_python/scripts/dvrk_arm_test.py index 03911457..ac489a05 100755 --- a/dvrk_python/scripts/dvrk_arm_test.py +++ b/dvrk_python/scripts/dvrk_arm_test.py @@ -3,7 +3,7 @@ # Author: Anton Deguet # Date: 2015-02-22 -# (C) Copyright 2015-2021 Johns Hopkins University (JHU), All Rights Reserved. +# (C) Copyright 2015-2023 Johns Hopkins University (JHU), All Rights Reserved. # --- begin cisst license - do not edit --- @@ -17,41 +17,41 @@ # > rosrun dvrk_robot dvrk_console_json -j # To communicate with the arm using ROS topics, see the python based example dvrk_arm_test.py: -# > rosrun dvrk_python dvrk_arm_test.py +# > rosrun dvrk_python dvrk_arm_test.py -a -import dvrk -import math +import argparse import sys import time -import rospy +import crtk +import dvrk +import math import numpy import PyKDL -import argparse - -# print with node id -def print_id(message): - print('%s -> %s' % (rospy.get_caller_id(), message)) # example of application using arm.py class example_application: # configuration - def configure(self, robot_name, expected_interval): - print_id('configuring dvrk_arm_test for %s' % robot_name) + def __init__(self, ral, arm_name, expected_interval): + print('configuring dvrk_arm_test for {}'.format(arm_name)) + self.ral = ral self.expected_interval = expected_interval - self.arm = dvrk.arm(arm_name = robot_name, + self.arm = dvrk.arm(ral = ral, + arm_name = arm_name, expected_interval = expected_interval) # homing example def home(self): - print_id('starting enable') + self.arm.check_connections() + + print('starting enable') if not self.arm.enable(10): sys.exit('failed to enable within 10 seconds') - print_id('starting home') + print('starting home') if not self.arm.home(10): sys.exit('failed to home within 10 seconds') # get current joints just to set size - print_id('move to starting position') + print('move to starting position') goal = numpy.copy(self.arm.setpoint_jp()) # go to zero position, for PSM and ECM make sure 3rd joint is past cannula goal.fill(0) @@ -59,14 +59,14 @@ def home(self): or (self.arm.name() == 'PSM3') or (self.arm.name() == 'ECM')): goal[2] = 0.12 # move and wait - print_id('moving to starting position') + print('moving to starting position') self.arm.move_jp(goal).wait() # try to move again to make sure waiting is working fine, i.e. not blocking - print_id('testing move to current position') + print('testing move to current position') move_handle = self.arm.move_jp(goal) - time.sleep(1.0) # add some artificial latency on this side + print('move handle should return immediately') move_handle.wait() - print_id('home complete') + print('home complete') # get methods def run_get(self): @@ -102,30 +102,37 @@ def run_get(self): # direct joint control example def run_servo_jp(self): - print_id('starting servo_jp') + print('starting servo_jp') # get current position initial_joint_position = numpy.copy(self.arm.setpoint_jp()) - print_id('testing direct joint position for 2 joints out of %i' % initial_joint_position.size) + print('testing direct joint position for 2 joints out of %i' % initial_joint_position.size) amplitude = math.radians(5.0) # +/- 5 degrees duration = 5 # seconds samples = duration / self.expected_interval # create a new goal starting with current position - goal = numpy.copy(initial_joint_position) - start = rospy.Time.now() + goal_p = numpy.copy(initial_joint_position) + goal_v = numpy.zeros(goal_p.size) + start = time.time() + + sleep_rate = self.ral.create_rate(1.0 / self.expected_interval) for i in range(int(samples)): - goal[0] = initial_joint_position[0] + amplitude * (1.0 - math.cos(i * math.radians(360.0) / samples)) - goal[1] = initial_joint_position[1] + amplitude * (1.0 - math.cos(i * math.radians(360.0) / samples)) - self.arm.servo_jp(goal) - rospy.sleep(self.expected_interval) - actual_duration = rospy.Time.now() - start - print_id('servo_jp complete in %2.2f seconds (expected %2.2f)' % (actual_duration.to_sec(), duration)) + angle = i * math.radians(360.0) / samples + goal_p[0] = initial_joint_position[0] + amplitude * (1.0 - math.cos(angle)) + goal_p[1] = initial_joint_position[1] + amplitude * (1.0 - math.cos(angle)) + goal_v[0] = amplitude * math.sin(angle) + goal_v[1] = goal_v[0] + self.arm.servo_jp(goal_p, goal_v) + sleep_rate.sleep() + + actual_duration = time.time() - start + print('servo_jp complete in %2.2f seconds (expected %2.2f)' % (actual_duration, duration)) # goal joint control example def run_move_jp(self): - print_id('starting move_jp') + print('starting move_jp') # get current position initial_joint_position = numpy.copy(self.arm.setpoint_jp()) - print_id('testing goal joint position for 2 joints out of %i' % initial_joint_position.size) + print('testing goal joint position for 2 joints out of %i' % initial_joint_position.size) amplitude = math.radians(10.0) # create a new goal starting with current position goal = numpy.copy(initial_joint_position) @@ -139,14 +146,15 @@ def run_move_jp(self): self.arm.move_jp(goal).wait() # back to initial position self.arm.move_jp(initial_joint_position).wait() - print_id('move_jp complete') + print('move_jp complete') # utility to position tool/camera deep enough before cartesian examples def prepare_cartesian(self): # make sure the camera is past the cannula and tool vertical goal = numpy.copy(self.arm.setpoint_jp()) - if ((self.arm.name() == 'PSM1') or (self.arm.name() == 'PSM2') - or (self.arm.name() == 'PSM3') or (self.arm.name() == 'ECM')): + if ((self.arm.name().endswith('PSM1')) or (self.arm.name().endswith('PSM2')) + or (self.arm.name().endswith('PSM3')) or (self.arm.name().endswith('ECM'))): + print('preparing for cartesian motion') # set in position joint mode goal[0] = 0.0 goal[1] = 0.0 @@ -156,7 +164,7 @@ def prepare_cartesian(self): # direct cartesian control example def run_servo_cp(self): - print_id('starting servo_cp') + print('starting servo_cp') self.prepare_cartesian() # create a new goal starting with current position @@ -170,7 +178,9 @@ def run_servo_cp(self): amplitude = 0.02 # 4 cm total duration = 5 # 5 seconds samples = duration / self.expected_interval - start = rospy.Time.now() + start = time.time() + + sleep_rate = self.ral.create_rate(1.0 / self.expected_interval) for i in range(int(samples)): goal.p[0] = initial_cartesian_position.p[0] + amplitude * (1.0 - math.cos(i * math.radians(360.0) / samples)) goal.p[1] = initial_cartesian_position.p[1] + amplitude * (1.0 - math.cos(i * math.radians(360.0) / samples)) @@ -184,14 +194,15 @@ def run_servo_cp(self): errorZ = goal.p[2] - setpoint_cp.p[2] error = math.sqrt(errorX * errorX + errorY * errorY + errorZ * errorZ) if error > 0.002: # 2 mm - print_id('Inverse kinematic error in position [%i]: %s (might be due to latency)' % (i, error)) - rospy.sleep(self.expected_interval) - actual_duration = rospy.Time.now() - start - print_id('servo_cp complete in %2.2f seconds (expected %2.2f)' % (actual_duration.to_sec(), duration)) + print('Inverse kinematic error in position [%i]: %s (might be due to latency)' % (i, error)) + sleep_rate.sleep() + + actual_duration = time.time() - start + print('servo_cp complete in %2.2f seconds (expected %2.2f)' % (actual_duration, duration)) # direct cartesian control example def run_move_cp(self): - print_id('starting move_cp') + print('starting move_cp') self.prepare_cartesian() # create a new goal starting with current position @@ -229,7 +240,7 @@ def run_move_cp(self): goal.p[0] = initial_cartesian_position.p[0] goal.p[1] = initial_cartesian_position.p[1] self.arm.move_cp(goal).wait() - print_id('move_cp complete') + print('move_cp complete') # main method def run(self): @@ -241,10 +252,8 @@ def run(self): self.run_move_cp() if __name__ == '__main__': - # ros init node so we can use default ros arguments (e.g. __ns:= for namespace) - rospy.init_node('dvrk_arm_test', anonymous=True) - # strip ros arguments - argv = rospy.myargv(argv=sys.argv) + # extract ros arguments (e.g. __ns:= for namespace) + argv = crtk.ral.parse_argv(sys.argv[1:]) # skip argv[0], script name # parse arguments parser = argparse.ArgumentParser() @@ -253,8 +262,8 @@ def run(self): help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace') parser.add_argument('-i', '--interval', type=float, default=0.01, help = 'expected interval in seconds between messages sent by the device') - args = parser.parse_args(argv[1:]) # skip argv[0], script name + args = parser.parse_args(argv) - application = example_application() - application.configure(args.arm, args.interval) - application.run() + ral = crtk.ral('dvrk_arm_test') + application = example_application(ral, args.arm, args.interval) + ral.spin_and_execute(application.run) diff --git a/dvrk_python/scripts/dvrk_bag_replay.py b/dvrk_python/scripts/dvrk_bag_replay.py index 9cf38ea7..81192f7b 100755 --- a/dvrk_python/scripts/dvrk_bag_replay.py +++ b/dvrk_python/scripts/dvrk_bag_replay.py @@ -3,7 +3,7 @@ # Author: Anton Deguet # Date: 2021-06-24 -# (C) Copyright 2021 Johns Hopkins University (JHU), All Rights Reserved. +# (C) Copyright 2021-2023 Johns Hopkins University (JHU), All Rights Reserved. # --- begin cisst license - do not edit --- @@ -13,28 +13,60 @@ # --- end cisst license --- -# Start a single arm using +# First collect a bag of data using rosbag while the robot is moving: +# > rosbag record /PSM1/setpoint_cp /PSM1/setpoint_cv /PSM1/setpoint_js /PSM1/jaw/setpoint_js +# Hit ctrl+c to stop rosbag recording + +# Then start a single arm using: # > rosrun dvrk_robot dvrk_console_json -j -# To communicate with the arm using ROS topics, see the python based example dvrk_arm_test.py: -# > rosrun dvrk_python dvrk_bag_replay.py -a PSM1 -b /home/anton/2021-06-24-10-55-04.bag -t /PSM1/local/measured_cp +# After that, you can replay the trajectory using: +# > rosrun dvrk_python dvrk_bag_replay.py -a PSM1 -b /home/anton/2021-06-24-10-55-04.bag -m servo_cp + +# If you have a PSM and want to also replay the jaw motion, use -j +# > rosrun dvrk_python dvrk_bag_replay.py -a PSM1 -b /home/anton/2021-06-24-10-55-04.bag -m servo_cp -j -import dvrk +import crtk import sys import time -import rospy import rosbag import numpy import PyKDL import argparse +# simplified arm class to replay motion, better performance than +# dvrk.arm since we're only subscribing to topics we need +class replay_device: + + # simplified jaw class to control the jaws, will not be used without the -j option + class __jaw_device: + def __init__(self, ral, + expected_interval, operating_state_instance): + self.__crtk_utils = crtk.utils(self, ral, + expected_interval, operating_state_instance) + self.__crtk_utils.add_move_jp() + self.__crtk_utils.add_servo_jp() + + def __init__(self, ral, expected_interval): + # populate this class with all the ROS topics we need + self.__ral = ral + self.crtk_utils = crtk.utils(self, ral, expected_interval) + self.crtk_utils.add_operating_state() + self.crtk_utils.add_servo_jp() + self.crtk_utils.add_move_jp() + self.crtk_utils.add_servo_cp() + self.crtk_utils.add_move_cp() + self.jaw = self.__jaw_device(ral.create_child('/jaw'), + expected_interval, + operating_state_instance = self) + def ral(self): + return self.__ral + if sys.version_info.major < 3: input = raw_input -# ros init node so we can use default ros arguments (e.g. __ns:= for namespace) -rospy.init_node('dvrk_bag_replay', anonymous=True) -# strip ros arguments -argv = rospy.myargv(argv=sys.argv) +# extract ros arguments (e.g. __ns:= for namespace) +argv = crtk.ral.parse_argv(sys.argv[1:]) # skip argv[0], script name # parse arguments parser = argparse.ArgumentParser() @@ -45,69 +77,120 @@ help = 'expected interval in seconds between messages sent by the device') parser.add_argument('-b', '--bag', type = argparse.FileType('r'), required = True, help = 'ros bag containing the trajectory to replay. The script assumes the topic to use is //setpoint_cp. You can change the topic used with the -t option') +parser.add_argument('-m', '--mode', type = str, required = True, + choices = ['servo_jp', 'servo_cp'], + help = 'topic used to send command to arm, either joint or cartesian positions') parser.add_argument('-t', '--topic', type = str, help = 'topic used in the ros bag. If not set, the script will use //setpoint_cp. Other examples: /PSM1/local/setpoint_cp. This is useful if you recorded the trajectory on a PSM with a base-frame defined in the console.json and you are replaying the trajectory on a PSM without a base-frame (e.g. default console provided with PSM in kinematic simulation mode. This option allows to use the recorded setpoints without base-frame') +parser.add_argument('-j', '--jaw', action = 'store_true', + help = 'specify if the PSM jaw should also be replayed. The topic //jaw/setpoint_js will be used to determine the jaw trajectory') -args = parser.parse_args(argv[1:]) # skip argv[0], script name +args = parser.parse_args(argv) +ral = crtk.ral('dvrk_bag_replay') + +is_cp = (args.mode == 'servo_cp') +has_jaw = args.jaw + +# default topic to get data from the ros bag, depends on the mode if args.topic is None: - topic = '/' + args.arm + '/setpoint_cp' + if is_cp: + topic = '/' + args.arm + '/setpoint_cp' + else: + topic = '/' + args.arm + '/setpoint_js' else: topic = args.topic +if has_jaw: + jaw_topic = '/' + args.arm + '/jaw/setpoint_js' + # info print('-- This script will use the topic %s\n to replay a trajectory on arm %s' % (topic, args.arm)) # parse bag and create list of points -bbmin = numpy.zeros(3) -bbmax = numpy.zeros(3) -last_message_time = 0.0 -out_of_order_counter = 0 -poses = [] +setpoint_time_previous = 0.0 +setpoints_out_of_order = 0 +setpoints = [] + +if has_jaw: + jaw_setpoint_time_previous = 0.0 + jaw_setpoints_out_of_order = 0 + jaw_setpoints = [] + +# if mode is cartesian, compute a bounding box so user can make sure +# scale and reference frame make sense +if is_cp: + bbmin = numpy.zeros(3) + bbmax = numpy.zeros(3) print('-- Parsing bag %s' % (args.bag.name)) for bag_topic, bag_message, t in rosbag.Bag(args.bag.name).read_messages(): if bag_topic == topic: # check order of timestamps, drop if out of order - transform_time = bag_message.header.stamp.to_sec() - if transform_time <= last_message_time: - out_of_order_counter = out_of_order_counter + 1 + setpoint_time = ral.to_sec(bag_message.header.stamp) + if setpoint_time <= setpoint_time_previous: + setpoints_out_of_order += 1 else: # append message - poses.append(bag_message) - # keep track of workspace - position = numpy.array([bag_message.transform.translation.x, - bag_message.transform.translation.y, - bag_message.transform.translation.z]) - if len(poses) == 1: - bbmin = position - bbmax = position + setpoints.append(bag_message) + setpoint_time_previous = setpoint_time + + # keep track of workspace in cartesian space + if is_cp: + position = numpy.array([bag_message.pose.position.x, + bag_message.pose.position.y, + bag_message.pose.position.z]) + if len(setpoints) == 1: + bbmin = position + bbmax = position + else: + bbmin = numpy.minimum(bbmin, position) + bbmax = numpy.maximum(bbmax, position) + + if has_jaw: + if bag_topic == jaw_topic: + # check order of timestamps, drop if out of order + jaw_setpoint_time = ral.to_sec(bag_message.header.stamp) + if jaw_setpoint_time <= jaw_setpoint_time_previous: + jaw_setpoints_out_of_order += 1 else: - bbmin = numpy.minimum(bbmin, position) - bbmax = numpy.maximum(bbmax, position) + # append message + jaw_setpoints.append(bag_message) + jaw_setpoint_time_previous = jaw_setpoint_time -print('-- Found %i setpoints using topic %s' % (len(poses), topic)) -if len(poses) == 0: +print('-- Found %i setpoints using topic %s' % (len(setpoints), topic)) +if len(setpoints) == 0: print ('-- No trajectory found!') sys.exit() # report out of order setpoints -if out_of_order_counter > 0: - print('-- Found and removed %i out of order setpoints' % (out_of_order_counter)) +if setpoints_out_of_order > 0: + print('-- Found and removed %i out of order setpoints' % (setpoints_out_of_order)) + +# same thing for jaws +if has_jaw: + print('-- Found %i jaw setpoints using topic %s' % (len(jaw_setpoints), jaw_topic)) + if len(jaw_setpoints) == 0: + print ('-- No jaw trajectory found!') + sys.exit() + if jaw_setpoints_out_of_order > 0: + print('-- Found and removed %i out of order jaw setpoints' % (jaw_setpoints_out_of_order)) # convert to mm -bbmin = bbmin * 1000.0 -bbmax = bbmax * 1000.0 -print ('-- Range of motion in mm:\n X:[%f, %f]\n Y:[%f, %f]\n Z:[%f, %f]' - % (bbmin[0], bbmax[0], bbmin[1], bbmax[1], bbmin[2], bbmax[2])) +if is_cp: + bbmin = bbmin * 1000.0 + bbmax = bbmax * 1000.0 + print ('-- Range of motion in mm:\n X:[%f, %f]\n Y:[%f, %f]\n Z:[%f, %f]\n Make sure these values make sense. If the ros bag was based on a different console configuration, the base frame might have changed and the trajectory might not be safe nor feasible.' + % (bbmin[0], bbmax[0], bbmin[1], bbmax[1], bbmin[2], bbmax[2])) # compute duration -duration = poses[-1].header.stamp.to_sec() - poses[0].header.stamp.to_sec() +duration = ral.to_sec(setpoints[-1].header.stamp) - ral.to_sec(setpoints[0].header.stamp) print ('-- Duration of trajectory: %f seconds' % (duration)) # send trajectory to arm -arm = dvrk.arm(arm_name = args.arm, - expected_interval = args.interval) +arm = replay_device(ral.create_child(args.arm), + expected_interval = args.interval) +arm.ral().check_connections() # make sure the arm is powered print('-- Enabling arm') @@ -118,48 +201,61 @@ if not arm.home(10): sys.exit('-- Failed to home within 10 seconds') -input('---> Make sure the arm is ready to move using cartesian positions. For a PSM or ECM, you need to have a tool in place and the tool tip needs to be outside the cannula. You might have to manually adjust your arm. Press "\Enter" when the arm is ready.') +if is_cp: + input('-> Make sure the arm is ready to move using cartesian positions. For a PSM or ECM, you need to have a tool in place and the tool tip needs to be outside the cannula. You might have to manually adjust your arm. Press "Enter" when the arm is ready.') -input('---> Press \"Enter\" to move to start position') +input('-> Press "Enter" to move to start position') -# Create frame using first pose and use blocking move -cp = PyKDL.Frame() -cp.p = PyKDL.Vector(poses[0].transform.translation.x, - poses[0].transform.translation.y, - poses[0].transform.translation.z) -cp.M = PyKDL.Rotation.Quaternion(poses[0].transform.rotation.x, - poses[0].transform.rotation.y, - poses[0].transform.rotation.z, - poses[0].transform.rotation.w) -arm.move_cp(cp).wait() +# move to the first position using arm trajectory generation (move_) +if is_cp: + arm.move_cp(crtk.msg_conversions.FrameFromPoseMsg(setpoints[0].pose)).wait() +else: + arm.move_jp(numpy.array(setpoints[0].position)).wait() + +if has_jaw: + arm.jaw.move_jp(numpy.array(jaw_setpoints[0].position)).wait() -# Replay -input('---> Press \"Enter\" to replay trajectory') +# replay +input('-> Press "Enter" to replay trajectory') -last_time = poses[0].header.stamp.to_sec() +last_bag_time = ral.to_sec(setpoints[0].header.stamp) counter = 0 -total = len(poses) +if has_jaw: + total = min(len(setpoints), len(jaw_setpoints)) +else: + total = len(setpoints) + start_time = time.time() -for pose in poses: - # crude sleep to emulate period. This doesn't take into account - # time spend to compute cp from pose and publish so this will - # definitely be slower than recorded trajectory - new_time = pose.header.stamp.to_sec() - time.sleep(new_time - last_time) - last_time = new_time - cp.p = PyKDL.Vector(pose.transform.translation.x, - pose.transform.translation.y, - pose.transform.translation.z) - cp.M = PyKDL.Rotation.Quaternion(pose.transform.rotation.x, - pose.transform.rotation.y, - pose.transform.rotation.z, - pose.transform.rotation.w) - arm.servo_cp(cp) +# for the replay, use the jp/cp setpoint for the arm to control the +# execution time. Jaw positions are picked in order without checking +# time. There might be better ways to synchronized the two +# sequences... +for index in range(total): + # record start time + loop_start_time = time.time() + # compute expected dt + new_bag_time = ral.to_sec(setpoints[index].header.stamp) + delta_bag_time = new_bag_time - last_bag_time + last_bag_time = new_bag_time + # replay + if is_cp: + arm.servo_cp(crtk.msg_conversions.FrameFromPoseMsg(setpoints[index].pose)) + else: + arm.servo_jp(numpy.array(setpoints[index].position), numpy.array(setpoints[index].velocity)) + if has_jaw: + arm.jaw.servo_jp(numpy.array(jaw_setpoints[index].position), numpy.array(jaw_setpoints[index].velocity)) + # update progress counter = counter + 1 sys.stdout.write('\r-- Progress %02.1f%%' % (float(counter) / float(total) * 100.0)) sys.stdout.flush() + # try to keep motion synchronized + loop_end_time = time.time() + sleep_time = delta_bag_time - (loop_end_time - loop_start_time) + # if process takes time larger than console rate, don't sleep + if sleep_time > 0: + time.sleep(sleep_time) print('\n--> Time to replay trajectory: %f seconds' % (time.time() - start_time)) print('--> Done!') diff --git a/dvrk_python/scripts/dvrk_calibrate_potentiometer_psm.py b/dvrk_python/scripts/dvrk_calibrate_potentiometer_psm.py new file mode 100755 index 00000000..a94bedd5 --- /dev/null +++ b/dvrk_python/scripts/dvrk_calibrate_potentiometer_psm.py @@ -0,0 +1,255 @@ +#!/usr/bin/env python + +# Author: Anton Deguet +# Date: 2015-02-22 + +# (C) Copyright 2015-2023 Johns Hopkins University (JHU), All Rights Reserved. + +# --- begin cisst license - do not edit --- + +# This software is provided "as is" under an open source license, with +# no warranty. The complete license can be found in license.txt and +# http://www.cisst.org/cisst/license.txt. + +# --- end cisst license --- + +# Start a single arm using +# > rosrun dvrk_robot dvrk_console_json -j + +# To communicate with the arm using ROS topics, see the python based example dvrk_arm_test.py: +# > rosrun dvrk_python dvrk_arm_test.py + +import crtk +import dvrk + +import math +import sys +import time +import select +import tty +import termios +import numpy +import argparse + +import os.path +import xml.etree.ElementTree as ET + +# for keyboard capture +def is_there_a_key_press(): + return select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], []) + +# example of application using arm.py +class calibration_psm: + + # configuration + def __init__(self, ral, arm_name, config_file, expected_interval = 0.01): + self.expected_interval = expected_interval + self.config_file = config_file + # check that the config file is good + if not os.path.exists(self.config_file): + sys.exit('Config file "{:s}" not found'.format(self.config_file)) + + # XML parsing, find current offset + self.tree = ET.parse(config_file) + root = self.tree.getroot() + + # find Robot in config file and make sure name matches + xpath_search_results = root.findall('./Robot') + if len(xpath_search_results) == 1: + xmlRobot = xpath_search_results[0] + else: + sys.exit('Can\'t find "Robot" in configuration file {:s}'.format(self.config_file)) + + if xmlRobot.get('Name') == arm_name: + serial_number = xmlRobot.get('SN') + print('Successfully found robot "{:s}", serial number {:s} in XML file'.format(arm_name, serial_number)) + robotFound = True + else: + sys.exit('Found robot "{:s}" while looking for "{:s}", make sure you\'re using the correct configuration file!'.format(xmlRobot.get('Name'), arm_name)) + + hardware_version = xmlRobot.get('HardwareVersion') + self.analog_potentiometers = True + if hardware_version in ['QLA1', 'DQLA']: + print('Calibrating analog potentiometers on PSM Classic') + elif hardware_version in ['dRA1']: + self.analog_potentiometers = False + print('Calibrating digital potentiometers on PSM Si') + else: + sys.exit('"HardwareVersion is not defined/supported for robot "{:s}"'.format(arm_name)) + + # now find the offset for joint 2, we assume there's only one result + if self.analog_potentiometers: + xpath_search_results = root.findall("./Robot/Actuator[@ActuatorID='2']/AnalogIn/VoltsToPosSI") + self.xmlPot = xpath_search_results[0] + print('Potentiometer offset for joint 2 is currently: {:s}'.format(self.xmlPot.get('Offset'))) + + self.arm = dvrk.psm(ral = ral, + arm_name = arm_name, + expected_interval = expected_interval) + self.arm.check_connections() + + # homing example + def home(self): + print('Enabling...') + if not self.arm.enable(10): + sys.exit('failed to enable within 10 seconds') + print('Homing...') + if not self.arm.home(10): + sys.exit('failed to home within 10 seconds') + + # get current joints just to set size + print('Moving to zero position...') + goal = numpy.copy(self.arm.setpoint_jp()) + goal.fill(0) + self.arm.move_jp(goal).wait() + self.arm.jaw.move_jp(numpy.array([0.0])).wait() + # identify depth for tool j5 using forward kinematics + cp = self.arm.forward_kinematics(numpy.array([0.0, 0.0, 0.0, 0.0])) + self.q2 = cp.p.z() + print('Depth required to position O5 on RCM point: {0:4.2f}mm'.format(self.q2 * 1000.0)) + + # find range + def find_range(self, swing_joint): + if swing_joint == 0: + print('Finding range of motion for joint 0\nMove the arm manually (pressing the clutch) to find the maximum range of motion for the first joint (left to right motion).\n - press "d" when you\'re done\n - press "q" to abort\n') + else: + print('Finding range of motion for joint 1\nMove the arm manually (pressing the clutch) to find the maximum range of motion for the second joint (back to front motion).\n - press "d" when you\'re done\n - press "q" to abort\n') + + self.min = math.radians( 180.0) + self.max = math.radians(-180.0) + done = False + increment = numpy.copy(self.arm.setpoint_jp()) + increment.fill(0) + + # termios settings + old_settings = termios.tcgetattr(sys.stdin) + try: + tty.setcbreak(sys.stdin.fileno()) + while not done: + # process key + if is_there_a_key_press(): + c = sys.stdin.read(1) + if c == 'd': + done = True + elif c == 'q': + sys.exit('... calibration aborted by user') + # get measured joint values + p = self.arm.measured_jp() + if p[swing_joint] > self.max: + self.max = p[swing_joint] + elif p[swing_joint] < self.min: + self.min = p[swing_joint] + # display current range + sys.stdout.write('\rRange[%02.2f, %02.2f]' % (math.degrees(self.min), math.degrees(self.max))) + sys.stdout.flush() + # sleep + time.sleep(self.expected_interval) + finally: + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings) + print('') + + # direct joint control example + def calibrate_third_joint(self, swing_joint): + print('\nAdjusting translation offset\nPress the keys "+" (or "=") and "-" or ("_") to adjust the depth until the axis 5 is mostly immobile (one can use a camera to look at the point)\n - press "d" when you\'re done\n - press "q" to abort\n') + # move to max position as starting point + initial_joint_position = numpy.copy(self.arm.setpoint_jp()) + goal = numpy.copy(self.arm.setpoint_jp()) + goal.fill(0) + goal[swing_joint] = self.max + goal[2] = self.q2 # to start close to expected RCM + if swing_joint == 0: + goal[3] = math.radians(90.0) # so axis is facing user + else: + goal[3] = math.radians(0.0) + + self.arm.move_jp(goal).wait() + + # parameters to move back and forth + cos_ratio = (self.max - self.min) / 2.0 + + # termios settings + old_settings = termios.tcgetattr(sys.stdin) + correction = 0.0 + try: + tty.setcbreak(sys.stdin.fileno()) + start = time.time() + done = False + while not done: + # process key + if is_there_a_key_press(): + c = sys.stdin.read(1) + if c == 'd': + done = True + elif c == 'q': + sys.exit('... calibration aborted by user') + elif c == '-' or c == '_': + correction = correction - 0.0001 + elif c == '+' or c == '=': + correction = correction + 0.0001 + # move back and forth + dt = time.time() - start + t = dt / 2.0 + goal[swing_joint] = self.max + cos_ratio * (math.cos(t) - 1.0) + goal[2] = self.q2 + correction + self.arm.servo_jp(goal) + # display current offset + sys.stdout.write('\rCorrection = %02.2f mm' % (correction * 1000.0)) + sys.stdout.flush() + # sleep + time.sleep(self.expected_interval) + finally: + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings) + print('') + + # now save the new offset + if self.analog_potentiometers: + oldOffset = float(self.xmlPot.get('Offset')) / 1000.0 # convert from XML (mm) to m + newOffset = oldOffset - correction # add in meters + self.xmlPot.set('Offset', str(newOffset * 1000.0)) # convert from m to XML (mm) + os.rename(self.config_file, self.config_file + '-backup') + self.tree.write(self.config_file) + print('Old offset: {:2.2f}mm\nNew offset: {:2.2f}mm\n'.format(oldOffset * 1000.0, newOffset * 1000.0)) + print('Results saved in {}. Restart your dVRK application to use the new file!'.format(self.config_file)) + print('Old file saved as {}-backup.'.format(self.config_file)) + else: + print('need to correct all in lookup table') + + # main method + def run(self, swing_joint): + self.home() + self.find_range(swing_joint) + self.calibrate_third_joint(swing_joint) + +if __name__ == '__main__': + # extract ros arguments (e.g. __ns:= for namespace) + argv = crtk.ral.parse_argv(sys.argv[1:]) # skip argv[0], script name + + # parse arguments + parser = argparse.ArgumentParser() + parser.add_argument('-a', '--arm', type=str, required=True, + choices=['PSM1', 'PSM2', 'PSM3'], + help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace') + parser.add_argument('-i', '--interval', type=float, default=0.01, + help = 'expected interval in seconds between messages sent by the device') + parser.add_argument('-c', '--config', type=str, required=True, + help = 'arm IO config file, i.e. something like sawRobotIO1394-xwz-12345.xml') + parser.add_argument('-s', '--swing-joint', type=int, required=False, + choices=[0, 1], default=0, + help = 'joint use for the swing motion around RCM') + args = parser.parse_args(argv) + + print ('\nThis program can be used to improve the potentiometer offset for the third joint ' + 'of the PSM arm (translation stage). The goal is increase the absolute accuracy of the PSM.\n' + 'The main idea is to position a known point on the tool where the RCM should be. ' + 'If the calibration is correct, the point shouldn\'t move while the arm is rocking from left to right. ' + 'For this application we\'re going to use the axis of the first joint of the wrist, i.e. the first joint ' + 'at the end of the tool shaft. To perform this calibration you need to remove the canulla otherwise you won\'t see' + ' the RCM point. One simple way to track the motion is to use a camera and place the cursor where the axis is.\n\n' + 'You must first home your PSM and make sure a tool is engaged. ' + 'Once this is done, there are two steps:\n' + ' -1- find a safe range of motion for the rocking movement\n' + ' -2- adjust the depth so that the first hinge on the tool wrist is as close as possible to the RCM.\n\n') + + ral = crtk.ral('dvrk_calibrate_potentiometer_psm') + application = calibration_psm(ral, args.arm, args.config, args.interval) + ral.spin_and_execute(application.run, args.swing_joint) diff --git a/dvrk_python/scripts/dvrk_calibrate_potentiometer_psm_cv.py b/dvrk_python/scripts/dvrk_calibrate_potentiometer_psm_cv.py new file mode 100755 index 00000000..11b40099 --- /dev/null +++ b/dvrk_python/scripts/dvrk_calibrate_potentiometer_psm_cv.py @@ -0,0 +1,421 @@ +#!/usr/bin/env python + +# Authors: Anton Deguet, Brendan Burkhart +# Date: 2015-02-22 + +# (C) Copyright 2015-2023 Johns Hopkins University (JHU), All Rights Reserved. + +# --- begin cisst license - do not edit --- + +# This software is provided "as is" under an open source license, with +# no warranty. The complete license can be found in license.txt and +# http://www.cisst.org/cisst/license.txt. + +# --- end cisst license --- + +# Start a single arm using +# > rosrun dvrk_robot dvrk_console_json -j + +# To communicate with the arm using ROS topics, see the python based example dvrk_arm_test.py: +# > rosrun dvrk_python dvrk_arm_test.py + +import crtk +import dvrk + +import math +import sys +import time +import select +import tty +import termios +import threading +import numpy +import argparse + +import psm_calibration_cv + +import os.path +import xml.etree.ElementTree as ET + + +# for keyboard capture +def is_there_a_key_press(): + return select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], []) + + +class calibration_psm_cv: + + # configuration + def __init__(self, ral, arm_name, config_file, expected_interval, timeout, convergence_threshold): + self.expected_interval = expected_interval + self.config_file = config_file + # check that the config file is good + if not os.path.exists(self.config_file): + sys.exit('Config file "{:s}" not found'.format(self.config_file)) + + # XML parsing, find current offset + self.tree = ET.parse(config_file) + root = self.tree.getroot() + + # find Robot in config file and make sure name matches + xpath_search_results = root.findall('./Robot') + if len(xpath_search_results) == 1: + xmlRobot = xpath_search_results[0] + else: + sys.exit('Can\'t find "Robot" in configuration file {:s}'.format(self.config_file)) + + if xmlRobot.get('Name') == arm_name: + serial_number = xmlRobot.get('SN') + print('Successfully found robot "{:s}", serial number {:s} in XML file'.format(arm_name, serial_number)) + robotFound = True + else: + sys.exit('Found robot "{:s}" while looking for "{:s}", make sure you\'re using the correct configuration file!'.format(xmlRobot.get('Name'), arm_name)) + + # now find the offset for joint 2, we assume there's only one result + xpath_search_results = root.findall("./Robot/Actuator[@ActuatorID='2']/AnalogIn/VoltsToPosSI") + self.xmlPot = xpath_search_results[0] + print('Potentiometer offset for joint 2 is currently: {:s}'.format(self.xmlPot.get('Offset'))) + + self.arm = dvrk.psm(ral = ral, + arm_name = arm_name, + expected_interval = expected_interval) + + # Calibration parameters + self.calibration_timeout = timeout + self.calibration_convergence_threshold = convergence_threshold*1e-3 # mm to m + + def home(self): + print('Enabling...') + if not self.arm.enable(10): + sys.exit('failed to enable within 10 seconds') + print('Homing...') + if not self.arm.home(10): + sys.exit('failed to home within 10 seconds') + + # get current joints just to set size + print('Moving to zero position...') + goal = numpy.copy(self.arm.setpoint_jp()) + goal.fill(0) + self.arm.move_jp(goal).wait() + self.arm.jaw.move_jp(numpy.array([0.0])).wait() + # identify depth for tool j5 using forward kinematics + cp = self.arm.forward_kinematics(numpy.array([0.0, 0.0, 0.0, 0.0])) + self.q2 = cp.p.z() + print('Depth required to position O5 on RCM point: {0:4.2f}mm'.format(self.q2 * 1000.0)) + + + # get safe range of motion from user + def find_range(self): + print("Finding range of motion for joint {}".format(self.swing_joint)) + print("Move the arm manually (pressing the clutch) to find the maximum range of motion for the first joint (left to right motion).") + print(" - press 'd' when you're done") + print(" - press 'q' to abort") + + self.min = math.radians( 180.0) + self.max = math.radians(-180.0) + + self.done = False + + # termios settings + old_settings = termios.tcgetattr(sys.stdin) + try: + tty.setcbreak(sys.stdin.fileno()) + while True: + if is_there_a_key_press(): + char = sys.stdin.read(1) + if char == 'd': + self.done = True + elif char == 'q': + self.ok = False + + if not self.ok: + sys.exit("\n\nCalibration aborted by user") + + if self.done: + break + + # get measured joint values + pose = self.arm.measured_jp() + self.max = max(pose[self.swing_joint], self.max) + self.min = min(pose[self.swing_joint], self.min) + + # display current range + sys.stdout.write("\rRange: [{:02.2f}, {:02.2f}]".format(math.degrees(self.min), math.degrees(self.max))) + sys.stdout.flush() + + # sleep + time.sleep(self.expected_interval) + + # restrict range to center it at 0 + angle = min(math.fabs(self.min), math.fabs(self.max)) + self.min = -max(angle, self.min) + self.max = min(angle, self.max) + print("\nRange to be used: [{:02.2f}, {:02.2f}]".format(math.degrees(self.min), math.degrees(self.max))) + # pass range of motion to vision tracking + self.tracker.set_motion_range(self.max - self.min) + + finally: + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings) + print('') + + + # Move arm to middle of motion range and position joint 05 near RCM + def move_to_start(self): + goal_pose = numpy.copy(self.arm.setpoint_jp()) + goal_pose.fill(0) + goal_pose[self.swing_joint] = self.min + 0.5 * (self.max - self.min) + goal_pose[2] = self.q2 # to start close to expected RCM + + # rotate so vision target (joint 05) is facing user/camera + if self.swing_joint == 0: + goal_pose[3] = math.radians(90.0) + else: + goal_pose[3] = math.radians(0.0) + + # move to starting position + self.arm.move_jp(goal_pose).wait() + return goal_pose + + + # get camera-relative target position at two arm poses to + # establish camera orientation and scale (pixel to meters ratio) + # exploratory_range is distance in meters to move arm + def get_camera_jacobian(self, exploratory_range=0.016): + print("\n\nMeasuring the orientation/scale of the camera") + goal_pose = self.move_to_start() + + # move arm down from RCM by half of exploratory range + goal_pose[2] = self.q2 + 0.5*exploratory_range + self.arm.move_jp(goal_pose).wait() + + # get camera position of target (joint 05) + print('Please click the target on the screen to aid target acquisition') + ok, point_one = self.tracker.acquire_point() + if not ok: + print("Camera measurement failed") + return False, None + + # slow down speed of generated trajectories - helps CV not to lose track of target + self.arm.trajectory_j_set_ratio(0.05) + + # move arm up from RCM by half + goal_pose[2] = self.q2 - 0.5*exploratory_range + self.arm.move_jp(goal_pose).wait() + + # restore normal trajectory speed + self.arm.trajectory_j_set_ratio(1.0) + + # get camera position of target again + ok, point_two = self.tracker.acquire_point() + if not ok: + print("Camera measurement failed") + return False, None + + # use difference in points with known physical difference and direction + # to measure orientation/scale of camera + point_difference = point_one - point_two + scale = exploratory_range/numpy.linalg.norm(point_difference) + orientation = point_difference/numpy.linalg.norm(point_difference) + jacobian = scale*orientation + + print("Camera measurement completed successfully") + return True, jacobian + + # called by vision tracking whenever a good estimate of the current RCM offset is obtained + # return value indicates whether arm was moved along calibration axis + def update_correction(self, rcm_offset): + self.residual_error = numpy.dot(rcm_offset, self.jacobian) + # slow convergence at 0.25 mm + convergence_rate = 0.325 if math.fabs(self.residual_error) < 0.00025 else 1.0 + + # Move at most 5 mm (0.005 m) in direction of estimated RCM + correction_delta = math.copysign(min(math.fabs(convergence_rate*self.residual_error), 0.005), self.residual_error) + print("Estimated remaining calibration error: {:0.2f} mm".format(1000*self.residual_error)) + + # Limit total correction to 20 mm + if abs(self.correction + correction_delta) > 0.020: + print("Can't exceed 20 mm correction, please manually improve calibration first!") + return + + self.correction += correction_delta + + # Adjust translation stage to apply new calibration correction + def apply_correction(self, correction): + # stop tracking rcm, apply correction, re-start rcm tracking + self.tracker.stop_rcm_tracking() + self.goal_pose[2] = self.q2 + correction + self.arm.move_jp(self.goal_pose).wait() + self.tracker.clear_history() + self.tracker.rcm_tracking(self.update_correction) + + # auto-calibration routine for third joint + def calibrate_third_joint(self): + print("\n\nBeginning auto-calibration...") + + # slow down arm, move to start position, restore normal speed + self.arm.trajectory_j_set_ratio(0.05) + self.goal_pose = self.move_to_start() + self.arm.trajectory_j_set_ratio(1.0) + + print("During calibation, the target should be highlighted in magenta - if it becomes green or the highlight disappears,") + print("then the target has been lost. If this occurs, please click on it to resume calibration.") + print("Press q to stop calibration, for example if the camera is moved accidentally") + + self.correction = 0.0 + previous_correction = self.correction + self.residual_error = self.calibration_convergence_threshold+1 + self.tracker.clear_history() + self.tracker.rcm_tracking(self.update_correction) + + move_command_handle = None + self.start_time = time.time() + + # Swing arm back and forth until timeout, convergence, or error/user abort + old_settings = termios.tcgetattr(sys.stdin) + try: + tty.setcbreak(sys.stdin.fileno()) + self.arm.trajectory_j_set_ratio(0.15) + + # move back and forth while tracker measures calibration error + while True: + if is_there_a_key_press(): + char = sys.stdin.read(1) + if char == 'q': + self.ok = False + if not self.ok: + print("\n\nCalibration aborted by user") + pose = self.arm.measured_jp() + self.arm.move_jp(pose).wait() + return + + # thread-safe check for change in calibration correction + correction = self.correction + if correction != previous_correction: + self.apply_correction(correction) + previous_correction = correction + + # Once arm has reached end of swing, set trajectory to swing back other way + if move_command_handle is None or not move_command_handle.is_busy(): + self.goal_pose[2] = self.q2 + correction + self.goal_pose[self.swing_joint] = self.max if self.goal_pose[self.swing_joint] == self.min else self.min + move_command_handle = self.arm.move_jp(self.goal_pose) + + time_elapsed = time.time() - self.start_time + if time_elapsed.to_sec() > self.calibration_timeout: + self.tracker.stop() + print('\n\nCalibration failed to converge within {} seconds'.format(self.calibration_timeout)) + print('Try adding diffuse lighting, increasing the range of motion, or increase the timeout') + return + + if math.fabs(self.residual_error) < self.calibration_convergence_threshold: + self.tracker.stop() + print("\n\nCalibration successfully converged, with residual error of <{} mm".format(1000*self.calibration_convergence_threshold)) + break + + time.sleep(self.expected_interval) + + # Restore normal terminal behavior and normal arm speed + finally: + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings) + self.arm.trajectory_j_set_ratio(1.0) + + # return to start position + self.move_to_start() + + # now save the new offset + old_offset = float(self.xmlPot.get("Offset")) / 1000.0 # convert from XML (mm) to m + new_offset = old_offset - self.correction # add in meters + self.xmlPot.set("Offset", str(new_offset * 1000.0)) # convert from m to XML (mm) + self.tree.write(self.config_file + "-new") + print("Old offset: {:2.2f}mm\nNew offset: {:2.2f}mm\n".format(old_offset * 1000.0, new_offset * 1000.0)) + print("Results saved in {:s}-new.".format(self.config_file)) + print("Restart your dVRK application with the new file and make sure you re-bias the potentiometer offsets!") + print("To be safe, power off and on the dVRK PSM controller.") + print("To copy the new file over the existing one: cp {:s}-new {:s}".format(self.config_file, self.config_file)) + + # Exit key (q/ESCAPE) handler for GUI + def _on_quit(self): + self.ok = False + self.tracker.stop() + + # Enter (or 'd') handler for GUI + def _on_enter(self): + self.done = True + + # application entry point + def run(self, swing_joint, rom): + try: + self.ok = True + self.swing_joint = swing_joint + + # initialize vision tracking + self.tracker = psm_calibration_cv.RCMTracker() + self.ok = self.tracker.start(self._on_enter, self._on_quit) + if not self.ok: + return + + self.home() + + if rom == 0: + # find safe range of motion for arm + self.find_range() + else: + self.max = math.radians(rom) + self.min = math.radians(-rom) + self.tracker.set_motion_range(self.max - self.min) + + # camera orientation/scale measurement + self.ok, self.jacobian = self.get_camera_jacobian() + if not self.ok: + print("Aborting calibration") + return + + # actual calibration + self.calibrate_third_joint() + + finally: + self.tracker.stop() + +if __name__ == '__main__': + # extract ros arguments (e.g. __ns:= for namespace) + argv = crtk.ral.parse_argv(sys.argv[1:]) # skip argv[0], script name + + # parse arguments + parser = argparse.ArgumentParser() + parser.add_argument('-a', '--arm', type=str, required=True, + choices=['PSM1', 'PSM2', 'PSM3'], + help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace') + parser.add_argument('-i', '--interval', type=float, default=0.01, + help = 'expected interval in seconds between messages sent by the device') + parser.add_argument('-r', '--range', type=float, default=0.0, + help = 'range of motion, degrees') + parser.add_argument('-c', '--config', type=str, required=True, + help = 'arm IO config file, i.e. something like sawRobotIO1394-xwz-12345.xml') + parser.add_argument('-s', '--swing-joint', type=int, required=False, + choices=[0, 1], default=0, + help = 'joint use for the swing motion around RCM') + parser.add_argument('-t', '--timeout', type=int, required=False, default=180, + help = 'calibration timeout in seconds') + parser.add_argument('--threshold', type=int, required=False, default=0.1, + help = 'calibration convergence threshold in mm') + args = parser.parse_args(argv) + + print ('\nThis program can be used to improve the potentiometer offset for the third joint ' + 'of the PSM arm (translation stage). The goal is increase the absolute accuracy of the PSM.\n' + 'The main idea is to position a known point on the tool (joint 05) where the RCM should be. ' + 'If the calibration is correct, the point shouldn\'t move while the arm is rocking from left to right. ' + 'For this application we\'re going to use the axis of the first joint of the wrist, i.e. the first joint ' + 'at the end of the tool shaft. To perform this calibration you need to remove the canulla otherwise you won\'t see' + ' the RCM point. This tool will use the camera to automatically track the point we want to be the RCM, and attempt ' + ' to calibrate the translation joint based.\n\n' + 'You must first home your PSM and make sure a tool is engaged.\n' + 'You should also have a camera point at the end of the tool, within about 2-4 inches if possible.\n' + 'For the camera to accurately track the target joint, it must be painted with bright pink nail polish.\n' + 'Once this is done, there are three steps:\n\n' + ' -1- find a safe range of motion for the rocking movement\n' + ' -2- detemine orientation/scale of camera relative to PSM\n' + ' -3- monitor the application while auto-calibration is performed for safety.\n\n') + + ral = crtk.ral('dvrk_calibrate_potentiometer_psm_cv') + application = calibration_psm_cv(ral, args.arm, args.config, args.interval, args.timeout, args.threshold) + ral.spin_and_execute(application.run, args.swing_joint, args.range) diff --git a/dvrk_python/scripts/dvrk_calibrate_potentiometers.py b/dvrk_python/scripts/dvrk_calibrate_potentiometers.py new file mode 100755 index 00000000..29a59e89 --- /dev/null +++ b/dvrk_python/scripts/dvrk_calibrate_potentiometers.py @@ -0,0 +1,359 @@ +#!/usr/bin/env python + +# Authors: Nick Eusman, Anton Deguet +# Date: 2015-09-24 +# Copyright JHU 2015-2023 + +import time +import math +import sys +import csv +import datetime +import numpy +import argparse +import os.path +import os + +import dvrk +import crtk + +import xml.etree.ElementTree as ET + +# for actuator_to_joint_position +import cisst_msgs.srv + +if sys.version_info.major < 3: + input = raw_input + + +def slope(x, y): + a = [] + for i in range(len(x)): + a.append(x[i] * y[i]) + sum_a = sum(a) + final_a = (sum_a * len(x)) + final_b = (sum(x) * sum(y)) + + c_squared = [] + for i in x: + c_squared.append(i**2) + + c_sum = sum(c_squared) + final_c = (c_sum * len(x)) + final_d = (sum(x)**2) + + result = (final_a - final_b) / (final_c - final_d) + return result + +class potentiometer_calibration: + + # class to contain measured_js + class __sensor: + def __init__(self, ral, expected_interval): + self.__crtk_utils = crtk.utils(self, ral, expected_interval) + self.__crtk_utils.add_measured_js() + + + def __init__(self, ral, arm_name, expected_interval = 0.01): + self.serial_number = "" + self.expected_interval = expected_interval + self.ros_namespace = arm_name + # Create the dVRK python ROS client + self.arm = dvrk.arm(ral = ral, arm_name = arm_name, expected_interval = expected_interval) + self.potentiometers = self.__sensor(ral.create_child(arm_name + '/io/pot'), expected_interval) + self.encoders = self.__sensor(ral.create_child(arm_name + '/io/actuator'), expected_interval) + self.arm.check_connections() + + + def run(self, calibration_type, filename): + nb_joint_positions = 20 # number of positions between limits + nb_samples_per_position = 250 # number of values collected at each position + total_samples = nb_joint_positions * nb_samples_per_position + samples_so_far = 0 + + sleep_time_after_motion = 0.5 # time after motion from position to position to allow potentiometers to stabilize + sleep_time_between_samples = self.expected_interval * 2.0 # time between two samples read (potentiometers) + + encoders = [] + potentiometers = [] + range_of_motion_joint = [] + + average_encoder = [] # all encoder values collected at a sample position used for averaging the values of that position + average_potentiometer = [] # all potentiometer values collected at a sample position used for averaging the values of that position + d2r = math.pi / 180.0 # degrees to radians + r2d = 180.0 / math.pi # radians to degrees + + slopes = [] + offsets = [] + average_offsets = [] + + # Looking in XML assuming following tree structure + # config > Robot> Actuator > AnalogIn > VoltsToPosSI > Scale = ____ or Offset = ____ + xmlVoltsToPosSI = {} + + # Make sure file exists + if not os.path.exists(filename): + print('Config file "{}" not found'.format(filename)) + return + + tree = ET.parse(filename) + root = tree.getroot() + + # Find Robot in config file and make sure name matches + xpath_search_results = root.findall('./Robot') + if len(xpath_search_results) == 1: + xmlRobot = xpath_search_results[0] + else: + print('Can\'t find "Robot" in configuration file') + return + + if xmlRobot.get('Name') == self.ros_namespace: + self.serial_number = xmlRobot.get('SN') + print('Successfully found robot "{}", serial number {} in XML file'.format(self.ros_namespace, self.serial_number)) + robotFound = True + else: + print('Found robot "{}" while looking for "{}", make sure you\'re using the correct configuration file!'.format(xmlRobot.get('Name'), self.ros_namespace)) + return + + # Look for all actuators/VoltsToPosSI + xpath_search_results = root.findall('./Robot/Actuator') + for actuator in xpath_search_results: + actuatorId = int(actuator.get('ActuatorID')) + voltsToPosSI = actuator.find('./AnalogIn/VoltsToPosSI') + xmlVoltsToPosSI[actuatorId] = voltsToPosSI + + # set joint limits and number of axis based on arm type, using robot name + if ('').join(list(self.ros_namespace)[:-1]) == 'PSM': #checks to see if the robot being tested is a PSM + arm_type = 'PSM' + lower_joint_limits = [-60.0 * d2r, -30.0 * d2r, 0.005, -170.0 * d2r, -170.0 * d2r, -170.0 * d2r, -170.0 * d2r] + upper_joint_limits = [ 60.0 * d2r, 30.0 * d2r, 0.235, 170.0 * d2r, 170.0 * d2r, 170.0 * d2r, 170.0 * d2r] + nb_axis = 7 + elif self.ros_namespace == 'MTML': + arm_type = 'MTM' + lower_joint_limits = [-15.0 * d2r, -10.0 * d2r, -10.0 * d2r, -180.0 * d2r, -80.0 * d2r, -40.0 * d2r, -100.0 * d2r] + upper_joint_limits = [ 35.0 * d2r, 20.0 * d2r, 10.0 * d2r, 80.0 * d2r, 160.0 * d2r, 40.0 * d2r, 100.0 * d2r] + nb_axis = 7 + elif self.ros_namespace == 'MTMR': + arm_type = 'MTM' + lower_joint_limits = [-30.0 * d2r, -10.0 * d2r, -10.0 * d2r, -80.0 * d2r, -80.0 * d2r, -40.0 * d2r, -100.0 * d2r] + upper_joint_limits = [ 15.0 * d2r, 20.0 * d2r, 10.0 * d2r, 180.0 * d2r, 160.0 * d2r, 40.0 * d2r, 100.0 * d2r] + nb_axis = 7 + elif self.ros_namespace == 'ECM': + arm_type = 'ECM' + lower_joint_limits = [-60.0 * d2r, -40.0 * d2r, 0.005, -80.0 * d2r] + upper_joint_limits = [ 60.0 * d2r, 40.0 * d2r, 0.230, 80.0 * d2r] + nb_axis = 4 + + # resize all arrays + for axis in range(nb_axis): + encoders.append([]) + offsets.append([]) + potentiometers.append([]) + average_encoder.append([]) + average_offsets.append([]) + average_potentiometer.append([]) + range_of_motion_joint.append(math.fabs(upper_joint_limits[axis] - lower_joint_limits[axis])) + + # Check that everything is working + try: + time.sleep(20.0 * self.expected_interval) + self.potentiometers.measured_jp() + except: + print('It seems the console for {} is not started or is not publishing the IO topics'.format(self.ros_namespace)) + print('Make sure you use "ros2 run dvrk_robot dvrk_console_json" with -i ros-io-{}.json'.format(self.ros_namespace)) + print('Start the dvrk_console_json with the proper options first') + return + + + print('The serial number found in the XML file is: {}'.format(self.serial_number)) + print('Make sure the dvrk_console_json is using the same configuration file. Serial number can be found in GUI tab "IO".') + ok = input('Press "c" and [enter] to continue\n') + if ok != 'c': + print('Quitting') + return + + ######## scale calibration + now = datetime.datetime.now() + now_string = now.strftime('%Y-%m-%d-%H:%M') + + if calibration_type == 'scales': + + print('Calibrating scales using encoders as reference') + + # write all values to csv file + csv_file_name = 'pot_calib_scales_' + self.ros_namespace + '-' + self.serial_number + '-' + now_string + '.csv' + print('Values will be saved in: {}'.format(csv_file_name)) + f = open(csv_file_name, 'w') + writer = csv.writer(f) + header = [] + for axis in range(nb_axis): + header.append('potentiometer' + str(axis)) + for axis in range(nb_axis): + header.append('encoder' + str(axis)) + writer.writerow(header) + + # messages + input('To start with some initial values, you first need to "home" the robot. When homed, press [enter]\n') + + if arm_type == 'PSM': + input('Since you are calibrating a PSM, make sure there is no instrument inserted. Please remove instrument, sterile adapter or calibration plate if any and press [enter]\n') + if arm_type == 'ECM': + input('Since you are calibrating an ECM, remove the endoscope and press [enter]\n') + input('The robot will make LARGE MOVEMENTS, please hit [enter] to continue once it is safe to proceed\n') + + for position in range(nb_joint_positions): + # create joint goal + joint_goal = [] + for axis in range(nb_axis): + joint_goal.append(lower_joint_limits[axis] + position * (range_of_motion_joint[axis] / nb_joint_positions)) + average_encoder[axis] = [] + average_potentiometer[axis] = [] + + # move and sleep + self.arm.move_jp(numpy.array(joint_goal)).wait() + time.sleep(sleep_time_after_motion) + + # collect nb_samples_per_position at current position to compute average + for sample in range(nb_samples_per_position): + last_pot = self.potentiometers.measured_jp() + last_enc = self.encoders.measured_jp() + for axis in range(nb_axis): + average_potentiometer[axis].append(last_pot[axis]) + average_encoder[axis].append(last_enc[axis]) + # log data + writer.writerow(last_pot + last_enc) + time.sleep(sleep_time_between_samples) + samples_so_far = samples_so_far + 1 + sys.stdout.write('\rProgress %02.1f%%' % (float(samples_so_far) / float(total_samples) * 100.0)) + sys.stdout.flush() + + # compute averages + for axis in range(nb_axis): + potentiometers[axis].append(math.fsum(average_potentiometer[axis]) / nb_samples_per_position) + encoders[axis].append(math.fsum(average_encoder[axis]) / nb_samples_per_position) + + + # at the end, return to home position + if arm_type == 'PSM' or arm_type == 'MTM': + self.arm.move_jp(numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])).wait() + elif arm_type == 'ECM': + self.arm.move_jp(numpy.array([0.0, 0.0, 0.0, 0.0])).wait() + + # close file + f.close() + + + ######## offset calibration + if calibration_type == 'offsets': + + print('Calibrating offsets') + + # write all values to csv file + csv_file_name = 'pot_calib_offsets_' + self.ros_namespace + '-' + self.serial_number + '-' + now_string + '.csv' + print('Values will be saved in: ', csv_file_name) + f = open(csv_file_name, 'w') + writer = csv.writer(f) + header = [] + for axis in range(nb_axis): + header.append('potentiometer' + str(axis)) + writer.writerow(header) + + # messages + print('Please home the robot first. Then you can either power off the robot and hold/clamp your arm in zero position or you can keep the robot on and move it to the physical zero position (e.g. using the arm widget/direct-control)') + if arm_type == 'PSM': + print('For a PSM, you need to hold at least the last 4 joints in zero position. If you don\'t have a way to constrain the first 3 joints, you can still just calibrate the last 4. This program will ask you later if you want to save all PSM joint offsets'); + input('Press [enter] to continue\n') + nb_samples = 2 * nb_samples_per_position + for sample in range(nb_samples): + last_pot = self.potentiometers.measured_jp() + for axis in range(nb_axis): + average_offsets[axis].append(last_pot[axis] * r2d) + writer.writerow(last_pot) + time.sleep(sleep_time_between_samples) + sys.stdout.write('\rProgress %02.1f%%' % (float(sample) / float(nb_samples) * 100.0)) + sys.stdout.flush() + for axis in range(nb_axis): + offsets[axis] = (math.fsum(average_offsets[axis]) / (nb_samples) ) + + print('') + + + if calibration_type == 'scales': + print('index | old scale | new scale | correction') + for index in range(nb_axis): + # find existing values + oldScale = float(xmlVoltsToPosSI[index].get('Scale')) + # compute new values + correction = slope(encoders[index], potentiometers[index]) + newScale = oldScale / correction + + # display + print(' %d | % 04.6f | % 04.6f | % 04.6f' % (index, oldScale, newScale, correction)) + # replace values + xmlVoltsToPosSI[index].set('Scale', str(newScale)) + + if calibration_type == 'offsets': + # convert offsets to joint space + a_to_j_service = ral.service_client(self.ros_namespace + '/actuator_to_joint_position', cisst_msgs.srv.ConvertFloat64Array) + request = cisst_msgs.srv.ConvertFloat64ArrayRequest() + request.input = offsets + response = a_to_j_service(request) + offsets = response.output + + newOffsets = [] + print('index | old offset | new offset | correction') + for index in range(nb_axis): + # find existing values + oldOffset = float(xmlVoltsToPosSI[index].get('Offset')) + # compute new values + newOffsets.append(oldOffset - offsets[index]) + + # display + print(' %d | % 04.6f | % 04.6f | % 04.6f ' % (index, oldOffset, newOffsets[index], offsets[index])) + + if arm_type == 'PSM': + all = input('Do you want to save all joint offsets or just the last 4, press "a" followed by [enter] to save all\n'); + if all == 'a': + print('This program will save ALL new PSM offsets') + for axis in range(nb_axis): + # replace values + xmlVoltsToPosSI[axis].set('Offset', str(newOffsets[axis])) + else: + print('This program will only save the last 4 PSM offsets') + for axis in range(3, nb_axis): + # replace values + xmlVoltsToPosSI[axis].set('Offset', str(newOffsets[axis])) + else: + for axis in range(nb_axis): + # replace values + xmlVoltsToPosSI[axis].set('Offset', str(newOffsets[axis])) + + save = input('To save this, press "y" followed by [enter]\n') + if save == 'y': + os.rename(filename, filename + '-backup') + tree.write(filename) + print('Results saved in {}. Restart your dVRK application to use the new file!'.format(filename)) + print('Old file saved as {}-backup.'.format(filename)) + else: + print('Results not saved!') + + +if __name__ == '__main__': + # extract ros arguments (e.g. __ns:= for namespace) + argv = crtk.ral.parse_argv(sys.argv[1:]) # skip argv[0], script name + + # parse arguments + parser = argparse.ArgumentParser() + parser.add_argument('-t', '--type', type=str, required=True, + choices=['scales', 'offsets'], + help = 'use either "scales" or "offsets"') + parser.add_argument('-a', '--arm', type=str, required=True, + choices=['ECM', 'MTML', 'MTMR', 'PSM1', 'PSM2', 'PSM3'], + help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace') + parser.add_argument('-c', '--config', type=str, required=True, + help = 'arm IO config file, i.e. something like sawRobotIO1394-xwz-12345.xml') + args = parser.parse_args(argv) + + ral = crtk.ral('dvrk_calibrate_potentiometers') + application = potentiometer_calibration(ral, args.arm) + ral.spin_and_execute(application.run, args.type, args.config) diff --git a/dvrk_python/scripts/dvrk_console_test.py b/dvrk_python/scripts/dvrk_console_test.py new file mode 100755 index 00000000..3ddf54e9 --- /dev/null +++ b/dvrk_python/scripts/dvrk_console_test.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python + +# Author: Anton Deguet +# Date: 2015-02-22 + +# (C) Copyright 2015-2023 Johns Hopkins University (JHU), All Rights Reserved. + +# --- begin cisst license - do not edit --- + +# This software is provided "as is" under an open source license, with +# no warranty. The complete license can be found in license.txt and +# http://www.cisst.org/cisst/license.txt. + +# --- end cisst license --- + +# Start a single arm using +# > rosrun dvrk_robot dvrk_console_json -j + +# To communicate with the arm using ROS topics, see the python based example dvrk_arm_test.py: +# > rosrun dvrk_python dvrk_arm_test.py -a + +import argparse +import sys +import time +import crtk +import dvrk + +# example of application using console.py +class example_application: + + # configuration + def __init__(self, ral): + print('configuring dvrk_console_test') + self.ral = ral + self.console = dvrk.console(ral = ral) + self.ral.check_connections() + + # main method + def run(self): + print('powering off') + self.console.power_off() + input('press [enter] once the dVRK console is powered off') + + print('powering') + self.console.power_on() + input('press [enter] once the dVRK console is powered on') + + print('homing') + self.console.home() + input('press [enter] once all arms are homed') + + print('starting teleoperation') + self.console.teleop_start() + input('press [enter] once teleoperation is turned on') + + print('stopping teleoperation') + self.console.teleop_stop() + input('press [enter] once teleoperation is turned off') + + current_scale = self.console.teleop_get_scale() + if current_scale > 0.5: + new_scale = round(current_scale - 0.1, 2) + else: + new_scale = round(current_scale + 0.1, 2) + print(f'current teleoperation scale is {current_scale}, setting it to {new_scale}') + self.console.teleop_set_scale(new_scale) + input('press [enter] once the scale has been changed in console GUI') + current_scale = self.console.teleop_get_scale() + print(f'current teleoperation scale is {current_scale}, expected {new_scale}') + +if __name__ == '__main__': + # extract ros arguments (e.g. __ns:= for namespace) + argv = crtk.ral.parse_argv(sys.argv[1:]) # skip argv[0], script name + + ral = crtk.ral('dvrk_console_test') + application = example_application(ral) + ral.spin_and_execute(application.run) diff --git a/dvrk_robot/scripts/dvrk_ecm_gc_collect.py b/dvrk_python/scripts/dvrk_ecm_psm_gc_collect.py similarity index 62% rename from dvrk_robot/scripts/dvrk_ecm_gc_collect.py rename to dvrk_python/scripts/dvrk_ecm_psm_gc_collect.py index fca13dba..5e1d4f4b 100755 --- a/dvrk_robot/scripts/dvrk_ecm_gc_collect.py +++ b/dvrk_python/scripts/dvrk_ecm_psm_gc_collect.py @@ -2,28 +2,35 @@ # Authors: Anton Deguet # Date: 2018-09-10 -# Copyright JHU 2010 +# Copyright JHU 2018-2023 -# Todo: -# - test calibrating 3rd offset on PSM? +# Simple script to collect data on ECM Classic for gravity +# compensation identification. + +# This scripts needs to be use different joints limits for different +# arms, i.e. Classic vs Si, PSM vs ECM. import time -import rospy import math import sys import csv import datetime +import crtk import dvrk import numpy +import argparse -def dvrk_ecm_gc_collect(robot_name): +def dvrk_ecm_psm_gc_collect(ral, name, expected_interval): # create dVRK robot - robot = dvrk.ecm(robot_name) + robot = dvrk.ecm(ral = ral, + arm_name = name, + expected_interval = expected_interval) + ral.check_connections() # file to save data now = datetime.datetime.now() now_string = now.strftime("%Y-%m-%d-%H-%M") - csv_file_name = 'ecm-gc-' + now_string + '.csv' + csv_file_name = name + '-gc-' + now_string + '.csv' print("Values will be saved in: ", csv_file_name) f = open(csv_file_name, 'w') writer = csv.writer(f) @@ -78,7 +85,17 @@ def dvrk_ecm_gc_collect(robot_name): robot.move_jp(numpy.array([0.0, 0.0, 0.0, 0.0])).wait() if __name__ == '__main__': - if (len(sys.argv) != 2): - print('%s requires arm name, e.g. ECM' % (sys.argv[0])) - else: - dvrk_ecm_gc_collect(sys.argv[1]) + # extract ros arguments (e.g. __ns:= for namespace) + argv = crtk.ral.parse_argv(sys.argv[1:]) # skip argv[0], script name + + # parse arguments + parser = argparse.ArgumentParser() + parser.add_argument('-a', '--arm', type=str, required=True, + choices=['ECM', 'PSM1', 'PSM2', 'PSM3'], + help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace') + parser.add_argument('-i', '--interval', type=float, default=0.01, + help = 'expected interval in seconds between messages sent by the device') + args = parser.parse_args(argv) + + ral = crtk.ral('dvrk_ecm_psm_gc_collect') + ral.spin_and_execute(dvrk_ecm_psm_gc_collect, ral, args.arm, args.interval) diff --git a/dvrk_python/scripts/dvrk_move_wait_test.py b/dvrk_python/scripts/dvrk_move_wait_test.py index a0f4e915..bacbc5ef 100755 --- a/dvrk_python/scripts/dvrk_move_wait_test.py +++ b/dvrk_python/scripts/dvrk_move_wait_test.py @@ -15,93 +15,99 @@ # Start a single arm using # > rosrun dvrk_robot dvrk_console_json -j +# Run test script: +# > rossun dvrk_python dvrk_move_wait_test.py -a -# To communicate with the arm using ROS topics, see the python based example dvrk_arm_test.py: -# > rosrun dvrk_python dvrk_arm_test.py - +import argparse +import crtk import dvrk import math +import numpy import sys import time -import rospy -import numpy -import PyKDL -import argparse -if sys.version_info.major < 3: - input = raw_input -# ros init node so we can use default ros arguments (e.g. __ns:= for namespace) -rospy.init_node('dvrk_move_wait_test', anonymous = True) +def main(ral, arm_name, expected_interval): + arm = dvrk.arm(ral = ral, + arm_name = arm_name, + expected_interval = expected_interval) -# strip ros arguments -argv = rospy.myargv(argv=sys.argv) + ral.check_connections() -# parse arguments -parser = argparse.ArgumentParser() -parser.add_argument('-a', '--arm', type=str, required=True, - choices=['ECM', 'MTML', 'MTMR', 'PSM1', 'PSM2', 'PSM3'], - help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace') -parser.add_argument('-i', '--interval', type=float, default=0.01, - help = 'expected interval in seconds between messages sent by the device') -args = parser.parse_args(argv[1:]) # skip argv[0], script name + if not arm.enable(10): + sys.exit('failed to enable within 10 seconds') + if not arm.home(10): + sys.exit('failed to enable within 10 seconds') + + print('starting move_jp') + + # get current position + initial_joint_position = numpy.copy(arm.setpoint_jp()) + amplitude = math.radians(10.0) + goal = numpy.copy(initial_joint_position) -arm = dvrk.arm(arm_name = args.arm, - expected_interval = args.interval) + print('--> Testing the trajectory with wait()') -print('starting move_jp') + start_time = time.time() -# get current position -initial_joint_position = numpy.copy(arm.setpoint_jp()) -amplitude = math.radians(10.0) -goal = numpy.copy(initial_joint_position) + # first motion + goal[0] = initial_joint_position[0] + amplitude + arm.move_jp(goal).wait() -print('--> Testing the trajectory with wait()') + # second motion + goal[0] = initial_joint_position[0] - amplitude + arm.move_jp(goal).wait() -start_time = time.time() + # back to initial position + arm.move_jp(initial_joint_position).wait() + print('--> Time for the full trajectory: %f seconds' % (time.time() - start_time)) -# first motion -goal[0] = initial_joint_position[0] + amplitude -arm.move_jp(goal).wait() + print('--> Testing the trajectory with busy loop') + start_time = time.time() -# second motion -goal[0] = initial_joint_position[0] - amplitude -arm.move_jp(goal).wait() + # first motion + goal[0] = initial_joint_position[0] + amplitude + counter = 0 -# back to initial position -arm.move_jp(initial_joint_position).wait() -print('--> Time for the full trajectory: %f seconds' % (time.time() - start_time)) + handle = arm.move_jp(goal) + while handle.is_busy(): + counter = counter + 1 + sys.stdout.write('\r---> Loop counter: %d' % (counter)) + sys.stdout.flush() + # second motion + goal[0] = initial_joint_position[0] - amplitude + handle = arm.move_jp(goal) + while handle.is_busy(): + counter = counter + 1 + sys.stdout.write('\r---> Loop counter: %d' % (counter)) + sys.stdout.flush() -print('--> Testing the trajectory with busy loop') -start_time = time.time() + # back to initial position + handle = arm.move_jp(initial_joint_position) + while handle.is_busy(): + counter = counter + 1 + sys.stdout.write('\r---> Loop counter: %d' % (counter)) + sys.stdout.flush() -# first motion -goal[0] = initial_joint_position[0] + amplitude -counter = 0 + print('') + print('--> Time for the full trajectory: %f seconds' % (time.time() - start_time)) -handle = arm.move_jp(goal) -while handle.is_busy(): - counter = counter + 1 - sys.stdout.write('\r---> Loop counter: %d' % (counter)) - sys.stdout.flush() + print('--> You can change the trajectory velocity in the GUI using "%s", "Direct control" and lower the "100%%" factor. Then re-run this program.' % (args.arm)) -# second motion -goal[0] = initial_joint_position[0] - amplitude -handle = arm.move_jp(goal) -while handle.is_busy(): - counter = counter + 1 - sys.stdout.write('\r---> Loop counter: %d' % (counter)) - sys.stdout.flush() -# back to initial position -handle = arm.move_jp(initial_joint_position) -while handle.is_busy(): - counter = counter + 1 - sys.stdout.write('\r---> Loop counter: %d' % (counter)) - sys.stdout.flush() +if __name__ == "__main__": + argv = crtk.ral.parse_argv(sys.argv[1:]) # skip argv[0], script name + ral = crtk.ral('dvrk_move_wait_test') -print('') -print('--> Time for the full trajectory: %f seconds' % (time.time() - start_time)) + # parse arguments + parser = argparse.ArgumentParser() + parser.add_argument('-a', '--arm', type=str, required=True, + choices=['ECM', 'MTML', 'MTMR', 'PSM1', 'PSM2', 'PSM3'], + help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace') + parser.add_argument('-i', '--interval', type=float, default=0.01, + help = 'expected interval in seconds between messages sent by the device') + args = parser.parse_args(argv) -print('--> You can change the trajectory velocity in the GUI using "%s", "Direct control" and lower the "100%%" factor. Then re-run this program.' % (args.arm)) + run = lambda: main(ral, args.arm, args.interval) + ral.spin_and_execute(run) diff --git a/dvrk_python/scripts/dvrk_mtm_cartesian_impedance.py b/dvrk_python/scripts/dvrk_mtm_cartesian_impedance.py index 1a3692b0..381a0dc9 100755 --- a/dvrk_python/scripts/dvrk_mtm_cartesian_impedance.py +++ b/dvrk_python/scripts/dvrk_mtm_cartesian_impedance.py @@ -3,7 +3,7 @@ # Author: Anton Deguet # Date: 2017-07-22 -# (C) Copyright 2017-2020 Johns Hopkins University (JHU), All Rights Reserved. +# (C) Copyright 2017-2023 Johns Hopkins University (JHU), All Rights Reserved. # --- begin cisst license - do not edit --- @@ -15,189 +15,163 @@ # Start a single arm using # > rosrun dvrk_robot dvrk_console_json -j +# Run test script: +# > rosrun dvrk_python dvrk_mtm_cartesian_impedance.py -a -# To communicate with the arm using ROS topics, see the python based example dvrk_arm_test.py: -# > rosrun dvrk_python dvrk_mtm_cartesian_impedance - +import argparse +import crtk +from crtk_msgs.msg import CartesianImpedance import dvrk -import sys -import rospy import numpy -import threading -import argparse -from sensor_msgs.msg import Joy -from cisst_msgs.msg import prmCartesianImpedanceGains +import sys -# print with node id -def print_id(message): - print('%s -> %s' % (rospy.get_caller_id(), message)) -# example of application using arm.py class example_application: - - # configuration - def configure(self, robot_name, expected_interval): - print_id('configuring dvrk_mtm_cartesian_impedance for %s' % robot_name) + def __init__(self, ral, arm_name, expected_interval): + print('configuring dvrk_mtm_cartesian_impedance for {}'.format(arm_name)) + self.ral = ral self.expected_interval = expected_interval - self.arm = dvrk.mtm(arm_name = robot_name, + self.arm = dvrk.mtm(ral = ral, + arm_name = arm_name, expected_interval = expected_interval) - self.coag_event = threading.Event() - rospy.Subscriber('footpedals/coag', - Joy, self.coag_event_cb) - self.set_gains_pub = rospy.Publisher(self.arm._arm__full_ros_namespace + '/set_cartesian_impedance_gains', - prmCartesianImpedanceGains, latch = True, queue_size = 1) + self.coag = crtk.joystick_button(ral, 'footpedals/coag', 0) # homing example def home(self): - print_id('starting enable') + self.ral.check_connections() + + print('starting enable') if not self.arm.enable(10): sys.exit('failed to enable within 10 seconds') - print_id('starting home') + print('starting home') if not self.arm.home(10): sys.exit('failed to home within 10 seconds') # get current joints just to set size - print_id('move to starting position') + print('move to starting position') goal = numpy.copy(self.arm.setpoint_jp()) # go to zero position, make sure 3rd joint is past cannula goal.fill(0) self.arm.move_jp(goal).wait() - # foot pedal callback - def coag_event_cb(self, data): - if (data.buttons[0] == 1): - self.coag_event.set() - - # wait for foot pedal - def wait_for_coag(self): - self.coag_event.clear() - self.coag_event.wait(600) - - # tests def tests(self): # turn on gravity compensation self.arm.use_gravity_compensation(True) - gains = prmCartesianImpedanceGains() + gains = CartesianImpedance() # set orientation to identity quaternions - gains.ForceOrientation.w = 1.0 - gains.TorqueOrientation.w = 1.0 + gains.force_orientation.w = 1.0 + gains.torque_orientation.w = 1.0 - print_id('press COAG pedal to move to next example') + print('press and release the COAG pedal to move to next example, always hold the arm') - print_id('arm will be constrained in X/Y plane around the current position') - self.wait_for_coag() + print('arm will be constrained in X/Y plane around the current position') + self.coag.wait(value = 0) # set gains in z direction - gains.PosStiffNeg.z = -200.0 - gains.PosStiffPos.z = -200.0 - gains.PosDampingNeg.z = -5.0 - gains.PosDampingPos.z = -5.0 - gains.ForcePosition.x = self.arm.measured_cp().p[0] - gains.ForcePosition.y = self.arm.measured_cp().p[1] - gains.ForcePosition.z = self.arm.measured_cp().p[2] - self.set_gains_pub.publish(gains) - - print_id('orientation will be locked') - self.wait_for_coag() + gains.position_negative.p.z = -200.0 + gains.position_positive.p.z = -200.0 + gains.position_negative.d.z = -5.0 + gains.position_positive.d.z = -5.0 + gains.force_position.x = self.arm.measured_cp().p[0] + gains.force_position.y = self.arm.measured_cp().p[1] + gains.force_position.z = self.arm.measured_cp().p[2] + self.arm.servo_ci(gains) + + print('orientation will be locked') + self.coag.wait(value = 0) self.arm.lock_orientation_as_is() - print_id('arm will be constrained in X/Y half plane around the current position') - self.wait_for_coag() + print('arm will be constrained in X/Y half plane around the current position') + self.coag.wait(value = 0) # set gains in z direction, stiffer in half positive, 0 in negative - gains.PosStiffNeg.z = 0.0 - gains.PosStiffPos.z = -500.0 - gains.PosDampingNeg.z = 0.0 - gains.PosDampingPos.z = -15.0 - gains.ForcePosition.x = self.arm.measured_cp().p[0] - gains.ForcePosition.y = self.arm.measured_cp().p[1] - gains.ForcePosition.z = self.arm.measured_cp().p[2] - self.set_gains_pub.publish(gains) - - print_id('an horizontal line will be created around the current position, with viscosity along the line') - self.wait_for_coag() + gains.position_negative.p.z = 0.0 + gains.position_positive.p.z = -500.0 + gains.position_negative.d.z = 0.0 + gains.position_positive.d.z = -15.0 + gains.force_position.x = self.arm.measured_cp().p[0] + gains.force_position.y = self.arm.measured_cp().p[1] + gains.force_position.z = self.arm.measured_cp().p[2] + self.arm.servo_ci(gains) + + print('an horizontal line will be created around the current position, with viscosity along the line') + self.coag.wait(value = 0) # set gains in x, z directions for the line - gains.PosStiffNeg.x = -200.0 - gains.PosStiffPos.x = -200.0 - gains.PosDampingNeg.x = -5.0 - gains.PosDampingPos.x = -5.0 - gains.PosStiffNeg.z = -200.0 - gains.PosStiffPos.z = -200.0 - gains.PosDampingNeg.z = -5.0 - gains.PosDampingPos.z = -5.0 + gains.position_negative.p.x = -200.0 + gains.position_positive.p.x = -200.0 + gains.position_negative.d.x = -5.0 + gains.position_positive.d.x = -5.0 + gains.position_negative.p.z = -200.0 + gains.position_positive.p.z = -200.0 + gains.position_negative.d.z = -5.0 + gains.position_positive.d.z = -5.0 # viscosity along the line - gains.PosDampingNeg.y = -10.0 - gains.PosDampingPos.y = -10.0 + gains.position_negative.d.y = -10.0 + gains.position_positive.d.y = -10.0 # always start from current position to avoid jumps - gains.ForcePosition.x = self.arm.measured_cp().p[0] - gains.ForcePosition.y = self.arm.measured_cp().p[1] - gains.ForcePosition.z = self.arm.measured_cp().p[2] - self.set_gains_pub.publish(gains) + gains.force_position.x = self.arm.measured_cp().p[0] + gains.force_position.y = self.arm.measured_cp().p[1] + gains.force_position.z = self.arm.measured_cp().p[2] + self.arm.servo_ci(gains) - print_id('a plane will be created perpendicular to the master gripper') - self.wait_for_coag() + print('a plane will be created perpendicular to the master gripper') + self.coag.wait(value = 0) # set gains in x, z directions for the line - gains.PosStiffNeg.x = 0.0 - gains.PosStiffPos.x = 0.0 - gains.PosDampingNeg.x = 0.0 - gains.PosDampingPos.x = 0.0 - gains.PosStiffNeg.y = 0.0 - gains.PosStiffPos.y = 0.0 - gains.PosDampingNeg.y = 0.0 - gains.PosDampingPos.y = 0.0 - gains.PosStiffNeg.z = -200.0 - gains.PosStiffPos.z = -200.0 - gains.PosDampingNeg.z = -5.0 - gains.PosDampingPos.z = -5.0 + gains.position_negative.p.x = 0.0 + gains.position_positive.p.x = 0.0 + gains.position_negative.d.x = 0.0 + gains.position_positive.d.x = 0.0 + gains.position_negative.p.y = 0.0 + gains.position_positive.p.y = 0.0 + gains.position_negative.d.y = 0.0 + gains.position_positive.d.y = 0.0 + gains.position_negative.p.z = -200.0 + gains.position_positive.p.z = -200.0 + gains.position_negative.d.z = -5.0 + gains.position_positive.d.z = -5.0 stiffOri = -0.2 dampOri = -0.01 - gains.OriStiffNeg.x = stiffOri - gains.OriStiffPos.x = stiffOri - gains.OriDampingNeg.x = dampOri - gains.OriDampingPos.x = dampOri - gains.OriStiffNeg.y = stiffOri - gains.OriStiffPos.y = stiffOri - gains.OriDampingNeg.y = dampOri - gains.OriDampingPos.y = dampOri - gains.OriStiffNeg.z = 0.0 - gains.OriStiffPos.z = 0.0 - gains.OriDampingNeg.z = 0.0 - gains.OriDampingPos.z = 0.0 + gains.orientation_negative.p.x = stiffOri + gains.orientation_positive.p.x = stiffOri + gains.orientation_negative.d.x = dampOri + gains.orientation_positive.d.x = dampOri + gains.orientation_negative.p.y = stiffOri + gains.orientation_positive.p.y = stiffOri + gains.orientation_negative.d.y = dampOri + gains.orientation_positive.d.y = dampOri + gains.orientation_negative.p.z = 0.0 + gains.orientation_positive.p.z = 0.0 + gains.orientation_negative.d.z = 0.0 + gains.orientation_positive.d.z = 0.0 # always start from current position to avoid jumps - gains.ForcePosition.x = self.arm.measured_cp().p[0] - gains.ForcePosition.y = self.arm.measured_cp().p[1] - gains.ForcePosition.z = self.arm.measured_cp().p[2] + gains.force_position.x = self.arm.measured_cp().p[0] + gains.force_position.y = self.arm.measured_cp().p[1] + gains.force_position.z = self.arm.measured_cp().p[2] orientationQuaternion = self.arm.measured_cp().M.GetQuaternion() - gains.ForceOrientation.x = orientationQuaternion[0] - gains.ForceOrientation.y = orientationQuaternion[1] - gains.ForceOrientation.z = orientationQuaternion[2] - gains.ForceOrientation.w = orientationQuaternion[3] - gains.TorqueOrientation.x = orientationQuaternion[0] - gains.TorqueOrientation.y = orientationQuaternion[1] - gains.TorqueOrientation.z = orientationQuaternion[2] - gains.TorqueOrientation.w = orientationQuaternion[3] - self.set_gains_pub.publish(gains) + gains.force_orientation.x = orientationQuaternion[0] + gains.force_orientation.y = orientationQuaternion[1] + gains.force_orientation.z = orientationQuaternion[2] + gains.force_orientation.w = orientationQuaternion[3] + gains.torque_orientation.x = orientationQuaternion[0] + gains.torque_orientation.y = orientationQuaternion[1] + gains.torque_orientation.z = orientationQuaternion[2] + gains.torque_orientation.w = orientationQuaternion[3] + self.arm.servo_ci(gains) self.arm.unlock_orientation() - print_id('keep holding arm, press coag, arm will freeze in position') - self.wait_for_coag() - self.arm.move_jp(self.arm.measured_jp()).wait() - - print_id('press coag to end') - self.wait_for_coag() - + print('arm will freeze in position') + self.coag.wait(value = 0) + self.arm.hold() # main method def run(self): self.home() self.tests() -if __name__ == '__main__': - # ros init node so we can use default ros arguments (e.g. __ns:= for namespace) - rospy.init_node('dvrk_mtm_cartesian_impedance') - # strip ros arguments - argv = rospy.myargv(argv=sys.argv) + +def main(): + argv = crtk.ral.parse_argv(sys.argv[1:]) # skip argv[0], script name # parse arguments parser = argparse.ArgumentParser() @@ -206,8 +180,12 @@ def run(self): help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace') parser.add_argument('-i', '--interval', type=float, default=0.01, help = 'expected interval in seconds between messages sent by the device') - args = parser.parse_args(argv[1:]) # skip argv[0], script name + args = parser.parse_args(argv) + + ral = crtk.ral('dvrk_mtm_cartesian_impedance') + application = example_application(ral, args.arm, args.interval) + ral.spin_and_execute(application.run) - application = example_application() - application.configure(args.arm, args.interval) - application.run() + +if __name__ == '__main__': + main() diff --git a/dvrk_python/scripts/dvrk_mtm_test.py b/dvrk_python/scripts/dvrk_mtm_test.py index 4395a3fa..e71bea19 100755 --- a/dvrk_python/scripts/dvrk_mtm_test.py +++ b/dvrk_python/scripts/dvrk_mtm_test.py @@ -3,7 +3,7 @@ # Author: Anton Deguet # Date: 2017-07-22 -# (C) Copyright 2017-2020 Johns Hopkins University (JHU), All Rights Reserved. +# (C) Copyright 2017-2023 Johns Hopkins University (JHU), All Rights Reserved. # --- begin cisst license - do not edit --- @@ -15,99 +15,81 @@ # Start a single arm using # > rosrun dvrk_robot dvrk_console_json -j +# Run test script: +# > rosrun dvrk_python dvrk_mtm_test.py -a -# To communicate with the arm using ROS topics, see the python based example dvrk_arm_test.py: -# > rosrun dvrk_python dvrk_arm_test.py - +import argparse +import crtk import dvrk -import sys -import rospy import numpy -import threading -import argparse -from sensor_msgs.msg import Joy +import sys -# print with node id -def print_id(message): - print('%s -> %s' % (rospy.get_caller_id(), message)) -# example of application using arm.py class example_application: - - # configuration - def configure(self, robot_name, expected_interval): - print_id('configuring dvrk_mtm_test for %s' % robot_name) + def __init__(self, ral, arm_name, expected_interval): + print('configuring dvrk_mtm_test for {}'.format(arm_name)) + self.ral = ral self.expected_interval = expected_interval - self.arm = dvrk.mtm(arm_name = robot_name, + self.arm = dvrk.mtm(ral = ral, + arm_name = arm_name, expected_interval = expected_interval) - self.coag_event = threading.Event() - rospy.Subscriber('footpedals/coag', - Joy, self.coag_event_cb) + self.coag = crtk.joystick_button(ral, 'footpedals/coag', 0) # homing example def home(self): - print_id('starting enable') + self.ral.check_connections() + + print('starting enable') if not self.arm.enable(10): sys.exit('failed to enable within 10 seconds') - print_id('starting home') + print('starting home') if not self.arm.home(10): sys.exit('failed to home within 10 seconds') # get current joints just to set size - print_id('move to starting position') + print('move to starting position') goal = numpy.copy(self.arm.setpoint_jp()) # go to zero position, make sure 3rd joint is past cannula goal.fill(0) self.arm.move_jp(goal).wait() - # foot pedal callback - def coag_event_cb(self, data): - if data.buttons[0] == 1: - self.coag_event.set() - - # wait for foot pedal - def wait_for_coag(self): - self.coag_event.clear() - self.coag_event.wait(100000) - # tests def tests(self): # turn on gravity compensation self.arm.use_gravity_compensation(True) - print_id('press COAG pedal to move to the next test') - - print_id('arm will go limp, hold it and press coag') - self.wait_for_coag() + print('press and release the COAG pedal to move to the next example, always hold the arm') + print('arm will go limp') + self.coag.wait(value = 0) self.arm.body.servo_cf(numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])) - print_id('keep holding arm, press coag, a force in body frame will be applied (direction depends on wrist orientation)') - self.wait_for_coag() + print('a force in body frame will be applied (direction depends on wrist orientation)') + self.coag.wait(value = 0) self.arm.body_set_cf_orientation_absolute(False) self.arm.body.servo_cf(numpy.array([0.0, 0.0, -3.0, 0.0, 0.0, 0.0])) - print_id('keep holding arm, press coag, a force in world frame will be applied (fixed direction)') - self.wait_for_coag() + print('a force in world frame will be applied (fixed direction)') + self.coag.wait(value = 0) self.arm.body_set_cf_orientation_absolute(True) self.arm.body.servo_cf(numpy.array([0.0, 0.0, -3.0, 0.0, 0.0, 0.0])) - print_id('keep holding arm, press coag, orientation will be locked') - self.wait_for_coag() + print('orientation will be locked') + self.coag.wait(value = 0) self.arm.lock_orientation_as_is() - print_id('keep holding arm, press coag, force will be removed') - self.wait_for_coag() + print('force will be removed') + self.coag.wait(value = 0) self.arm.body.servo_cf(numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])) - print_id('keep holding arm, press coag, orientation will be unlocked') - self.wait_for_coag() + print('orientation will be unlocked') + self.coag.wait(value = 0) self.arm.unlock_orientation() - print_id('keep holding arm, press coag, arm will freeze in position') - self.wait_for_coag() - self.arm.move_jp(self.arm.measured_jp()).wait() + print('arm will freeze in position') + self.coag.wait(value = 0) + self.arm.hold() - print_id('press coag to end') - self.wait_for_coag() + print('press and release coag to end') + self.coag.wait(value = 0) # main method def run(self): @@ -115,11 +97,9 @@ def run(self): self.tests() -if __name__ == '__main__': - # ros init node so we can use default ros arguments (e.g. __ns:= for namespace) - rospy.init_node('dvrk_mtm_test') - # strip ros arguments - argv = rospy.myargv(argv=sys.argv) +def main(): + argv = crtk.ral.parse_argv(sys.argv[1:]) # skip argv[0], script name + ral = crtk.ral('dvrk_mtm_test') # parse arguments parser = argparse.ArgumentParser() @@ -128,8 +108,10 @@ def run(self): help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace') parser.add_argument('-i', '--interval', type=float, default=0.01, help = 'expected interval in seconds between messages sent by the device') - args = parser.parse_args(argv[1:]) # skip argv[0], script name + args = parser.parse_args(argv) + + application = example_application(ral, args.arm, args.interval) + ral.spin_and_execute(application.run) - application = example_application() - application.configure(args.arm, args.interval) - application.run() +if __name__ == '__main__': + main() diff --git a/dvrk_python/scripts/dvrk_psm_ecm_registration.py b/dvrk_python/scripts/dvrk_psm_ecm_registration.py new file mode 100755 index 00000000..2bcb8c68 --- /dev/null +++ b/dvrk_python/scripts/dvrk_psm_ecm_registration.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python + +# Author: Anton Deguet +# Date: 2021-06-24 + +# (C) Copyright 2021-2023 Johns Hopkins University (JHU), All Rights Reserved. + +# --- begin cisst license - do not edit --- + +# This software is provided "as is" under an open source license, with +# no warranty. The complete license can be found in license.txt and +# http://www.cisst.org/cisst/license.txt. + +# --- end cisst license --- + +import crtk +import rospy +import numpy +import PyKDL +import argparse +import sys +import math +import time + +# simplified arm class for the PSM +class simple_psm: + + # simplified jaw class to control the jaws + class __jaw_device: + def __init__(self, ral, + expected_interval, operating_state_instance): + self.__crtk_utils = crtk.utils(self, ral, + expected_interval, operating_state_instance) + self.__crtk_utils.add_servo_jf() # to control jaw effort + + def __init__(self, ral, expected_interval): + # populate this class with all the ROS topics we need + self.crtk_utils = crtk.utils(self, ral, expected_interval) + self.crtk_utils.add_operating_state() + self.crtk_utils.add_servo_jf() # to make arm limp + self.crtk_utils.add_measured_cp() # to measure actual cartesian pose + self.jaw = self.__jaw_device(ral.create_child('jaw'), + expected_interval, + operating_state_instance = self) + +# simplified arm class for the ECM +class simple_ecm: + + def __init__(self, ral, expected_interval): + # populate this class with all the ROS topics we need + self.crtk_utils = crtk.utils(self, ral, expected_interval) + self.crtk_utils.add_operating_state() + self.crtk_utils.add_move_jp() + self.crtk_utils.add_setpoint_js() + self.crtk_utils.add_move_cp() + self.crtk_utils.add_setpoint_cp() + +if sys.version_info.major < 3: + input = raw_input + +# extract ros arguments (e.g. __ns:= for namespace) +argv = crtk.ral.parse_argv(sys.argv[1:]) # skip argv[0], script name + +# parse arguments +parser = argparse.ArgumentParser() +parser.add_argument('-a', '--arm', type = str, required = True, + choices = ['PSM1', 'PSM2', 'PSM3'], + help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace') +parser.add_argument('-i', '--interval', type = float, default = 0.01, + help = 'expected interval in seconds between messages sent by the device') + +args = parser.parse_args(argv) + +ral = crtk.ral('dvrk_psm_ecm_registration') + +# create PSM and ECM +psm = simple_psm(ral = ral.create_child(args.arm), + expected_interval = args.interval) + +ecm = simple_ecm(ral = ral.create_child('ECM'), + expected_interval = args.interval) +ral.check_connections() +ral.spin() + +input('-> Press "Enter" to align ECM') +ecm_setpoint_jp = ecm.setpoint_jp() +ecm_setpoint_jp[0] = 0.0 +ecm_setpoint_jp[1] = 0.0 +ecm_setpoint_jp[3] = 0.0 +ecm.move_jp(ecm_setpoint_jp).wait() + +input('-> Press "Enter" to close the PSM jaw') +psm.jaw.servo_jf(numpy.array([-0.1])) + +input('-> Press "Enter" to start ECM motion') + +# ECM base to tip +ecm_t_b = PyKDL.Frame() +ecm_t_b.p = ecm.setpoint_cp().p +ecm_t_b.M = ecm.setpoint_cp().M + +goal = PyKDL.Frame() +goal.p = ecm.setpoint_cp().p +goal.M = ecm.setpoint_cp().M + +amplitude = 0.02 # 1/2 of the full motion + +# X motion +disp = PyKDL.Vector(0.0, 0.0, 0.0) +disp[0] = -amplitude +goal.p = ecm_t_b.p + ecm_t_b.M * disp +ecm.move_cp(goal).wait() +time.sleep(0.5) +psm_x0 = psm.measured_cp().p + +disp[0] = amplitude +goal.p = ecm_t_b.p + ecm_t_b.M * disp +ecm.move_cp(goal).wait() +time.sleep(0.5) +psm_x1 = psm.measured_cp().p + +psm_x = psm_x1 - psm_x0 +print('-- Amplitude of x motion: %f' % (psm_x.Normalize())) + +# Y motion +disp[0] = 0.0 +disp[1] = -amplitude +goal.p = ecm_t_b.p + ecm_t_b.M * disp +ecm.move_cp(goal).wait() +time.sleep(0.5) +psm_y0 = psm.measured_cp().p + +disp[1] = amplitude +goal.p = ecm_t_b.p + ecm_t_b.M * disp +ecm.move_cp(goal).wait() +time.sleep(0.5) +psm_y1 = psm.measured_cp().p + +psm_y = psm_y1 - psm_y0 +print('-- Amplitude of y motion: %f' % (psm_y.Normalize())) + +# Z motion +disp[1] = 0.0 +disp[2] = -amplitude +goal.p = ecm_t_b.p + ecm_t_b.M * disp +ecm.move_cp(goal).wait() +time.sleep(0.5) +psm_z0 = psm.measured_cp().p + +disp[2] = amplitude +goal.p = ecm_t_b.p + ecm_t_b.M * disp +ecm.move_cp(goal).wait() +time.sleep(0.5) +psm_z1 = psm.measured_cp().p + +psm_z = psm_z1 - psm_z0 +print('-- Amplitude of z motion: %f' % (psm_z.Normalize())) + +# back to zero +ecm.move_cp(ecm_t_b).wait() + +# quick sanity checks, should be close-ish to 90 degrees +print ('-- Angle between x and y: %f' % (math.degrees(math.acos(PyKDL.dot(psm_x, psm_y))))) +print ('-- Angle between y and z: %f' % (math.degrees(math.acos(PyKDL.dot(psm_y, psm_z))))) +print ('-- Angle between z and x: %f' % (math.degrees(math.acos(PyKDL.dot(psm_z, psm_x))))) + +# convert to quaternion +q = PyKDL.Rotation(psm_x, psm_y, psm_z).GetQuaternion() + +# normalize +q = q / (numpy.linalg.norm(q)) + +# get normalized rotational matrix +r = PyKDL.Rotation.Quaternion(q[0], q[1], q[2], q[3]) + +# print text to copy/paste in json file +print(',\n"base-frame": {\n"reference-frame": "ECM",\n"transform": [[ %.10f, %.10f, %.10f, 0.0],\n[ %.10f, %.10f, %.10f, 0.0],\n[%.10f, %.10f, %.10f, 0.0],\n[0.0, 0.0, 0.0, 1.0]]\n}' + % ( r[0,0], r[0,1], r[0,2], + r[1,0], r[1,1], r[1,2], + r[2,0], r[2,1], r[2,2])) + +input('-> Press "Enter" to release the PSM jaw') +psm.jaw.servo_jf(numpy.array([0.0])) + +ral.shutdown() diff --git a/dvrk_python/scripts/dvrk_psm_effort_test.py b/dvrk_python/scripts/dvrk_psm_effort_test.py index 8924b69b..dd7f4cf3 100755 --- a/dvrk_python/scripts/dvrk_psm_effort_test.py +++ b/dvrk_python/scripts/dvrk_psm_effort_test.py @@ -3,7 +3,7 @@ # Author: Anton Deguet # Date: 2015-02-22 -# (C) Copyright 2015-2021 Johns Hopkins University (JHU), All Rights Reserved. +# (C) Copyright 2015-2023 Johns Hopkins University (JHU), All Rights Reserved. # --- begin cisst license - do not edit --- @@ -15,44 +15,40 @@ # Start a single arm using # > rosrun dvrk_robot dvrk_console_json -j +# Run test script: +# > rosrun dvrk_python dvrk_psm_effort_test.py -a -# To communicate with the arm using ROS topics, see the python based example dvrk_arm_test.py: -# > rosrun dvrk_python dvrk_arm_test.py - +import argparse +import crtk import dvrk import math -import sys -import rospy import numpy -import argparse +import sys if sys.version_info.major < 3: input = raw_input -# print with node id -def print_id(message): - print('%s -> %s' % (rospy.get_caller_id(), message)) - # example of application using arm.py class example_application: - - # configuration - def configure(self, robot_name, expected_interval): - print_id('configuring dvrk_psm_test for %s' % robot_name) + def __init__(self, ral, arm_name, expected_interval): + print('configuring dvrk_psm_effort_test for {}'.format(arm_name)) + self.ral = ral self.expected_interval = expected_interval - self.arm = dvrk.psm(arm_name = robot_name, + self.arm = dvrk.psm(ral = ral, + arm_name = arm_name, expected_interval = expected_interval) + self.arm.check_connections() # homing example def home(self): - print_id('starting enable') + print('starting enable') if not self.arm.enable(10): sys.exit('failed to enable within 10 seconds') - print_id('starting home') + print('starting home') if not self.arm.home(10): sys.exit('failed to home within 10 seconds') # get current joints just to set size - print_id('move to starting position') + print('move to starting position') goal = numpy.copy(self.arm.setpoint_jp()) # go to zero position, make sure 3rd joint is past cannula goal.fill(0) @@ -72,90 +68,92 @@ def prepare_cartesian(self): # effort jaw control example def jaw_effort(self): - print_id('starting jaw effort') + print('starting jaw effort') # get current joint torques just to set size effort_joint = numpy.copy(self.arm.measured_jf()) print(effort_joint) effort_joint.fill(0.0) - print_id('the jaws will open using a constant effort, the arm will go limp') - input(" Press Enter to continue...") + print('the jaws will open using a constant effort, the arm will go limp') + input('press Enter to continue...') self.arm.servo_jf(effort_joint) - self.arm.jaw.servo_jf(numpy.array(0.015)) + self.arm.jaw.servo_jf(numpy.array([0.015])) - print_id('the jaws will close using a constant effort, you can place a small object between the jaws now') - input(" Press Enter to continue...") - self.arm.jaw.servo_jf(numpy.array(-0.02)) + print('the jaws will close using a constant effort, you can place a small object between the jaws now') + input('press Enter to continue...') + self.arm.jaw.servo_jf(numpy.array([-0.02])) - print_id('the jaws will be released') - input(" Press Enter to continue...") - self.arm.jaw.servo_jf(numpy.array(0.0)) + print('the jaws will be released') + input('press Enter to continue') + self.arm.jaw.servo_jf(numpy.array([0.0])) # effort joint control example def joint_effort(self): - print_id('starting joint effort') + print('starting joint effort') # get current joint torques just to set size effort_joint = numpy.copy(self.arm.measured_jf()) effort_joint.fill(0.0) - print_id('the jaws and arm will go limp') - input(" Press Enter to continue...") + print('the jaws and arm will go limp') + input('press Enter to continue...') self.arm.servo_jf(effort_joint) - self.arm.jaw.servo_jf(numpy.array(0.0)) + self.arm.jaw.servo_jf(numpy.array([0.0])) - print_id('hold the arm from the top (near white clutch button) and keep an hand on the Enter key, the arm will push on the first joint') - input(" Press Enter to continue...") + print('hold the arm from the top (near white clutch button) and keep an hand on the Enter key, the arm will push on the first joint') + input('press Enter to continue...') effort_joint[0] = -0.5 self.arm.servo_jf(effort_joint) - print_id('arm will now push in opposite direction') - input(" Press Enter to continue...") + print('arm will now push in opposite direction') + input('press Enter to continue...') effort_joint[0] = 0.5 self.arm.servo_jf(effort_joint) - print_id('arm will now apply sine wave forces on first two joints') - input(" Press Enter to continue...") + print('arm will now apply sine wave forces on first two joints') + input('press Enter to continue...') duration = 10 # seconds samples = duration / self.expected_interval + + sleep_rate = self.ral.create_rate(1.0 / self.expected_interval) # create a new goal starting with current position for i in range(int(samples)): effort_joint[0] = 0.5 * (1.0 - math.cos(i * 10.0 * math.radians(360.0) / samples)) effort_joint[1] = 0.5 * (1.0 - math.cos(i * 10.0 * math.radians(360.0) / samples)) - rospy.sleep(self.expected_interval) self.arm.servo_jf(effort_joint) + sleep_rate.sleep() - print_id('arm will now go limp') - input(" Press Enter to continue...") + print('arm will now go limp') + input('press Enter to continue...') effort_joint.fill(0.0) self.arm.servo_jf(effort_joint) # wrench and jaw effort control example def wrench_jaw_effort(self): - print_id('starting wrench jaw effort') + print('starting wrench jaw effort') # get current joint torques just to set size effort_joint = numpy.copy(self.arm.measured_jf()) effort_joint.fill(0.0) - print_id('the jaws will open and arm will go limp') - input(" Press Enter to continue...") + print('the jaws will open and arm will go limp') + input('press Enter to continue...') self.arm.servo_jf(effort_joint) - self.arm.jaw.servo_jf(numpy.array(0.015)) + self.arm.jaw.servo_jf(numpy.array([0.015])) - print_id('the jaws will close using a constant effort, you can place a small object between the jaws now') - input(" Press Enter to continue...") - self.arm.jaw.servo_jf(numpy.array(-0.02)) + print('the jaws will close using a constant effort, you can place a small object between the jaws now') + input('press Enter to continue...') + self.arm.jaw.servo_jf(numpy.array([-0.02])) - print_id('hold the arm close to the tool tip and keep an hand on the Enter key, the arm will push in Z direction') - input(" Press Enter to continue...") + print('hold the arm close to the tool tip and keep an hand on the Enter key, the arm will push in Z direction') + input('press Enter to continue...') self.arm.body_set_cf_orientation_absolute(True) self.arm.body.servo_cf(numpy.array([0.0, 0.0, 2.0, 0.0, 0.0, 0.0])) - print_id('the jaws will be released') - input(" Press Enter to continue...") - self.arm.jaw.servo_jf(numpy.array(0.0)) + print('the jaws will be released') + input('press Enter to continue...') + self.arm.jaw.servo_jf(numpy.array([0.0])) - print_id('arm will now go limp') - input(" Press Enter to continue...") + print('arm will now go limp') + input('press Enter to continue...') self.arm.body.servo_cf(numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])) # main method @@ -165,11 +163,9 @@ def run(self): self.joint_effort() self.wrench_jaw_effort() + if __name__ == '__main__': - # ros init node so we can use default ros arguments (e.g. __ns:= for namespace) - rospy.init_node('dvrk_arm_test') - # strip ros arguments - argv = rospy.myargv(argv=sys.argv) + argv = crtk.ral.parse_argv(sys.argv[1:]) # skip argv[0], script name # parse arguments parser = argparse.ArgumentParser() @@ -178,8 +174,8 @@ def run(self): help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace') parser.add_argument('-i', '--interval', type=float, default=0.01, help = 'expected interval in seconds between messages sent by the device') - args = parser.parse_args(argv[1:]) # skip argv[0], script name + args = parser.parse_args(argv) - application = example_application() - application.configure(args.arm, args.interval) - application.run() + ral = crtk.ral('dvrk_psm_effort_test') + application = example_application(ral, args.arm, args.interval) + ral.spin_and_execute(application.run) diff --git a/dvrk_python/scripts/dvrk_psm_test.py b/dvrk_python/scripts/dvrk_psm_test.py index 69336a0f..876b6a5c 100755 --- a/dvrk_python/scripts/dvrk_psm_test.py +++ b/dvrk_python/scripts/dvrk_psm_test.py @@ -3,7 +3,7 @@ # Author: Anton Deguet # Date: 2015-02-22 -# (C) Copyright 2015-2020 Johns Hopkins University (JHU), All Rights Reserved. +# (C) Copyright 2015-2023 Johns Hopkins University (JHU), All Rights Reserved. # --- begin cisst license - do not edit --- @@ -15,45 +15,45 @@ # Start a single arm using # > rosrun dvrk_robot dvrk_console_json -j +# Run test script: +# > rosrun dvrk_python dvrk_arm_test.py -a -# To communicate with the arm using ROS topics, see the python based example dvrk_arm_test.py: -# > rosrun dvrk_python dvrk_arm_test.py - +import argparse +import crtk import dvrk import math -import sys -import rospy import numpy import PyKDL -import argparse +import sys if sys.version_info.major < 3: input = raw_input -# print with node id -def print_id(message): - print('%s -> %s' % (rospy.get_caller_id(), message)) # example of application using arm.py class example_application: + def __init__(self, ral, arm_name, expected_interval): + print('configuring dvrk_psm_test for {}'.format(arm_name)) - # configuration - def configure(self, robot_name, expected_interval): - print_id('configuring dvrk_psm_test for %s' % robot_name) + self.ral = ral self.expected_interval = expected_interval - self.arm = dvrk.psm(arm_name = robot_name, + self.arm = dvrk.psm(ral = ral, + arm_name = arm_name, expected_interval = expected_interval) # homing example def home(self): - print_id('starting enable') + self.ral.check_connections() + + print('starting enable') if not self.arm.enable(10): sys.exit('failed to enable within 10 seconds') - print_id('starting home') + print('starting home') if not self.arm.home(10): sys.exit('failed to home within 10 seconds') + # get current joints just to set size - print_id('move to starting position') + print('move to starting position') goal = numpy.copy(self.arm.setpoint_jp()) # go to zero position, make sure 3rd joint is past cannula goal.fill(0) @@ -62,47 +62,49 @@ def home(self): # utility to position tool/camera deep enough before cartesian examples def prepare_cartesian(self): - # make sure the camera is past the cannula and tool vertical + # make sure the tip is past the cannula and tool vertical goal = numpy.copy(self.arm.setpoint_jp()) - if ((self.arm.name() == 'PSM1') or (self.arm.name() == 'PSM2') or (self.arm.name() == 'PSM3') or (self.arm.name() == 'ECM')): + if ((self.arm.name().endswith('PSM1')) or (self.arm.name().endswith('PSM2')) + or (self.arm.name().endswith('PSM3'))): + print('preparing for cartesian motion') # set in position joint mode goal[0] = 0.0 goal[1] = 0.0 goal[2] = 0.12 + goal[3] = 0.0 self.arm.move_jp(goal).wait() - # goal jaw control example def run_jaw_move(self): - print_id('starting jaw move') + print('starting jaw move') # try to open and close with the cartesian part of the arm in different modes - print_id('close and open without other move command') + print('close and open without other move command') input(" Press Enter to continue...") - print_id('closing (1)') + print('closing (1)') self.arm.jaw.close().wait() - print_id('opening (2)') + print('opening (2)') self.arm.jaw.open().wait() - print_id('closing (3)') + print('closing (3)') self.arm.jaw.close().wait() - print_id('opening (4)') + print('opening (4)') self.arm.jaw.open().wait() # try to open and close with a joint goal - print_id('close and open with joint move command') + print('close and open with joint move command') input(" Press Enter to continue...") - print_id('closing and moving up (1)') + print('closing and moving up (1)') self.arm.jaw.close().wait(is_busy = True) self.arm.insert_jp(0.1).wait() - print_id('opening and moving down (2)') + print('opening and moving down (2)') self.arm.jaw.open().wait(is_busy = True) self.arm.insert_jp(0.15).wait() - print_id('closing and moving up (3)') + print('closing and moving up (3)') self.arm.jaw.close().wait(is_busy = True) self.arm.insert_jp(0.1).wait() - print_id('opening and moving down (4)') + print('opening and moving down (4)') self.arm.jaw.open().wait(is_busy = True) self.arm.insert_jp(0.15).wait() - print_id('close and open with cartesian move command') + print('close and open with cartesian move command') input(" Press Enter to continue...") # try to open and close with a cartesian goal @@ -121,42 +123,44 @@ def run_jaw_move(self): # first motion goal.p[0] = initial_cartesian_position.p[0] - amplitude goal.p[1] = initial_cartesian_position.p[1] - print_id('closing and moving right (1)') + print('closing and moving right (1)') self.arm.move_cp(goal).wait(is_busy = True) self.arm.jaw.close().wait() # second motion goal.p[0] = initial_cartesian_position.p[0] + amplitude goal.p[1] = initial_cartesian_position.p[1] - print_id('opening and moving left (1)') + print('opening and moving left (1)') self.arm.move_cp(goal).wait(is_busy = True) self.arm.jaw.open().wait() # back to starting point goal.p[0] = initial_cartesian_position.p[0] goal.p[1] = initial_cartesian_position.p[1] - print_id('moving back (3)') + print('moving back (3)') self.arm.move_cp(goal).wait() # goal jaw control example def run_jaw_servo(self): - print_id('starting jaw servo') + print('starting jaw servo') # try to open and close directly, needs interpolation - print_id('close and open without other servo command') + print('close and open without other servo command') input(" Press Enter to continue...") start_angle = math.radians(50.0) self.arm.jaw.open(angle = start_angle).wait() - # assume we start at 30 the move +/- 30 + # assume we start at 50, and then move +/- 30 amplitude = math.radians(30.0) duration = 5 # seconds samples = int(duration / self.expected_interval) + + sleep_rate = self.ral.create_rate(1.0 / self.expected_interval) # create a new goal starting with current position for i in range(samples * 4): - goal = start_angle + amplitude * (math.cos(i * math.radians(360.0) / samples) - 1.0) - self.arm.jaw.servo_jp(numpy.array(goal)) - rospy.sleep(self.expected_interval) - + sine = math.sin(2 * math.pi * float(i) / samples) + goal = start_angle + amplitude * sine + self.arm.jaw.servo_jp(numpy.array([goal])) + sleep_rate.sleep() # main method def run(self): @@ -167,10 +171,8 @@ def run(self): if __name__ == '__main__': - # ros init node so we can use default ros arguments (e.g. __ns:= for namespace) - rospy.init_node('dvrk_psm_test') # strip ros arguments - argv = rospy.myargv(argv=sys.argv) + argv = crtk.ral.parse_argv(sys.argv[1:]) # skip argv[0], script name # parse arguments parser = argparse.ArgumentParser() @@ -179,8 +181,8 @@ def run(self): help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace') parser.add_argument('-i', '--interval', type=float, default=0.01, help = 'expected interval in seconds between messages sent by the device') - args = parser.parse_args(argv[1:]) # skip argv[0], script name + args = parser.parse_args(argv) - application = example_application() - application.configure(args.arm, args.interval) - application.run() + ral = crtk.ral('dvrk_psm_test') + application = example_application(ral, args.arm, args.interval) + ral.spin_and_execute(application.run) diff --git a/dvrk_python/scripts/dvrk_reset_teleoperation.py b/dvrk_python/scripts/dvrk_reset_teleoperation.py new file mode 100755 index 00000000..de675d71 --- /dev/null +++ b/dvrk_python/scripts/dvrk_reset_teleoperation.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python + +# Author: Anton Deguet +# Date: 2023-09-12 + +# (C) Copyright 2023 Johns Hopkins University (JHU), All Rights Reserved. + +# --- begin cisst license - do not edit --- + +# This software is provided "as is" under an open source license, with +# no warranty. The complete license can be found in license.txt and +# http://www.cisst.org/cisst/license.txt. + +# --- end cisst license --- + +# Start a console with at least one PSM teleoperation component +# > rosrun dvrk_robot dvrk_console_json -j + +# To reset the arms in the +# > rosrun dvrk_python dvrk_reset_teleoperation -m -p + +""" +Reposition MTMs and PSMs for teleoperation. You can specify one +or more MTMs and PSMs. Examples: +> dvrk_reset_teleoperation -m MTMR -p PSM1 +> dvrk_reset_teleoperation -m MTMR MTML -p PSM1 PSM2 PSM3 +> dvrk_reset_teleoperation -m MTMR -p PSM1 -m MTML -p PSM2 PSM3 +""" + +import sys +import argparse +import crtk +import dvrk +import numpy +import PyKDL + +# example of application using arm.py +class reset_teleoperation: + + # configuration + def __init__(self, ral, mtms, psms, expected_interval): + print('dvrk_reset_teleoperation for MTM(s): {} and PSM(s): {}'.format(mtms, psms)) + self.ral = ral + self.expected_interval = expected_interval + self.mtms = [] + for mtm in mtms: + self.mtms.append(dvrk.mtm(ral = ral, + arm_name = mtm, + expected_interval = expected_interval)) + self.psms = [] + for psm in psms: + self.psms.append(dvrk.psm(ral = ral, + arm_name = psm, + expected_interval = expected_interval)) + self.console = dvrk.console(ral = ral) + + # run + def run(self): + # check all connections at once + self.ral.check_connections() + + # disable teleoperation + self.console.teleop_stop() + + # send move to the all arms + move_handles = [] + for mtm in self.mtms: + move_handles.append(mtm.move_jp(numpy.zeros(7))) + for psm in self.psms: + psm_goal_jp = numpy.copy(psm.setpoint_jp()) + psm_goal_jp[3] = 0.0 + move_handles.append(psm.move_jp(psm_goal_jp)) + + # wait for all completed + for handle in move_handles: + handle.wait() + + # restart teleop + self.console.teleop_start() + print('done') + + +if __name__ == '__main__': + # extract ros arguments (e.g. __ns:= for namespace) + argv = crtk.ral.parse_argv(sys.argv[1:]) # skip argv[0], script name + + # parse arguments + parser = argparse.ArgumentParser(description = __doc__, + formatter_class = argparse.RawTextHelpFormatter) + parser.add_argument('-m', '--mtms', type = str, required = True, + action = 'extend', nargs = '+', + choices = ['MTML', 'MTMR'], + help = 'MTM arm name that needs to be reset') + parser.add_argument('-p', '--psms', type = str, + action = 'extend', nargs = '+', + choices = ['PSM1', 'PSM2', 'PSM3'], + help = 'PSM arm name that needs to be reset') + parser.add_argument('-i', '--interval', type=float, default=0.01, + help = 'expected interval in seconds between messages sent by the device') + args = parser.parse_args(argv) + + ral = crtk.ral('dvrk_reset_teleoperation') + application = reset_teleoperation(ral, args.mtms, args.psms, args.interval) + ral.spin_and_execute(application.run) diff --git a/dvrk_python/scripts/psm_calibration_cv.py b/dvrk_python/scripts/psm_calibration_cv.py new file mode 100644 index 00000000..2aad06b5 --- /dev/null +++ b/dvrk_python/scripts/psm_calibration_cv.py @@ -0,0 +1,362 @@ +#!/usr/bin/env python + +# Authors: Anton Deguet, Brendan Burkhart +# Date: 2022-03-10 + +# (C) Copyright 2022 Johns Hopkins University (JHU), All Rights Reserved. + +# --- begin cisst license - do not edit --- + +# This software is provided "as is" under an open source license, with +# no warranty. The complete license can be found in license.txt and +# http://www.cisst.org/cisst/license.txt. + +# --- end cisst license --- + +# Used by dvrk_calibrate_potentiometer_psm_cv.py + + +import collections +import cv2 +import math +import numpy as np +import threading + + +# Represents a single tracked detection, with location history information +class TrackedObject: + max_strength = 10 + + def __init__(self, detection, history_length): + # Last known position + self.position = (detection[0], detection[1]) + self.size = detection[2] + + # 'signal-strength' i.e. how long and consistently this object has been detected for + self.strength = 1 + + # queue will 'remember' last history_length object locations + self.location_history = collections.deque(maxlen=history_length) + self.location_history.append(self.position) + + # Manhattan/L1 distance between this object and 'position' + def distance_to(self, position): + dx = self.position[0] - position[0] + dy = self.position[1] - position[1] + return math.sqrt(dx*dx + dy*dy) + + def is_strong(self): + return self.strength > (TrackedObject.max_strength - 2) + + # when a match is found, known position is updated and strength increased by 1, + # if no match is found, strength decays by 2 to prevent strength from oscillating + def update(self, detection): + if detection is not None: + self.position = (detection[0], detection[1]) + self.size = detection[2] + self.location_history.append(self.position) + + # cap strength so objects never remain too long + self.strength = min(self.strength+1, TrackedObject.max_strength) + else: + self.strength -= 2 + + +# Track all detected objects as they move over time +class ObjectTracking: + # max_distance is how far objects can move between frames + # and still be considered the same object + def __init__(self, max_distance, history_length): + self.objects = [] + self.max_distance = max_distance + self.primaryTarget = None + self.history_length = history_length + + # mark on object as the primary object to track + def set_primary_target(self, position): + nearbyObjects = [x for x in self.objects if x.distance_to(position) < 1.35*x.size] + self.primaryTarget = nearbyObjects[0] if len(nearbyObjects) == 1 else None + if self.primaryTarget is not None: + self.primaryTarget.location_history.clear() + + # Clear location history of all tracked objects + def clear_history(self): + for obj in self.objects: + obj.location_history.clear() + + # register detections from the current frame with tracked objects, + # removing, updating, and adding tracked objects as needed + def register(self, detections): + # Matrix of minimum distance between each detection and each tracked object + distances = np.array([ + np.array([obj.distance_to(d) for d in detections]) + for obj in self.objects + ]) + + current_object_count = len(self.objects) + + if len(self.objects) > 0: + if len(detections) > 0: + # Array of closest tracked object to each detection + closest = np.argmin(distances, axis=0) + + # associate detections with existing tracked objects or add as new objects + for i, detection in enumerate(detections): + if distances[closest[i], i] <= self.max_distance: + self.objects[closest[i]].update(detection) + else: + self.objects.append(TrackedObject(detection, history_length=self.history_length)) + + # Update tracked objects not associated with current detection + closest = np.argmin(distances, axis=1) + for j in range(current_object_count): + if distances[j, closest[j]] > self.max_distance: + self.objects[j].update(None) + else: + for obj in self.objects: + obj.update(None) + + # Remove stale tracked objects and check if primary target is stale + self.objects = [x for x in self.objects if x.strength > 0] + if not self.primaryTarget in self.objects and self.primaryTarget is not None: + print("Lost track of target! Please click on target to re-acquire") + self.primaryTarget = None + + # No existing tracked objects, add all detections as new objects + else: + for d in detections: + self.objects.append(TrackedObject(d, history_length=self.history_length)) + + +# tracks current offset of remote-center-of-motion of PSM +class RCMTracker: + def __init__(self, tracking_distance=15, window_title="CV Calibration"): + self.objects = ObjectTracking(tracking_distance, history_length=200) + self.window_title = window_title + + + def _mouse_callback(self, event, x, y, flags, params): + if event != cv2.EVENT_LBUTTONDOWN: + return + + self.objects.set_primary_target((x, y)) + + + def _create_window(self): + cv2.namedWindow(self.window_title) + cv2.setMouseCallback(self.window_title, lambda *args: self._mouse_callback(*args)) + + + def _init_video(self): + self.video_capture = cv2.VideoCapture(0) + self.video_capture.set(cv2.CAP_PROP_FPS, 30) + ok = False + if self.video_capture.isOpened(): + ok, _ = self.video_capture.read() + + if not ok: + print("\n\nFailed to read from camera.") + + return ok + + + def __del__(self): + self.video_capture.release() + cv2.destroyWindow(self.window_title) + + + # Process a frame - find, track, outline all potential targets + def _process(self, frame): + blurred = cv2.medianBlur(frame, 2*5 + 1) + hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV) + thresholded = cv2.inRange(hsv, (125, int(35*2.55), int(25*2.55)), (180, int(100*2.55), int(75*2.55))) + + contours, _ = cv2.findContours( + thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE + ) + + contours = [c for c in contours if cv2.contourArea(c) > 15] + + #cv2.drawContours(frame, contours, -1, (0, 255, 0), 3) + + moments = [cv2.moments(c) for c in contours] + radius = lambda area: math.sqrt(area/math.pi) + detections = [(int(M['m10']/M['m00']), int(M['m01']/M['m00']), radius(M['m00'])) for M in moments if M['m00'] > 0] + + self.objects.register(detections) + + for i in range(len(contours)): + contour_center = (detections[i][0], detections[i][1]) + + color = (0, 255, 0) + if self.objects.primaryTarget is not None and self.objects.primaryTarget.position == contour_center: + color = (255, 0, 255) + + cv2.drawContours(frame, [contours[i]], -1, color, 3) + + + def _is_good_ellipse_fit(self, target_bound, ellipse_bound): + rect_area = lambda rect: rect[1][0]*rect[1][1] + target_area = rect_area(target_bound) + if target_area <= 0.0: + return False + + # theoretical ratio between bounding rect area of observed target trajectory and + # bounding rect area of full circle + expected_area_ratio = (1.0 - math.cos(0.5*self.arm_swing_angle))*(2*math.sin(0.5*self.arm_swing_angle))/4.0 + allowed_margin = 2.0 + + # If the fitted ellipse is significantly larger than expected, there is likely outlier points + # causing a poor fit + if expected_area_ratio * rect_area(ellipse_bound) > allowed_margin * target_area: + return False + + intersection_type, intersection_contour = cv2.rotatedRectangleIntersection(ellipse_bound, target_bound) + overlap_ratio = 0.0 + if intersection_type != cv2.INTERSECT_NONE: + overlap_ratio = cv2.contourArea(intersection_contour)/target_area + + # Bounding rects should have nearly 100% overlap, but poor fit can cause ellipse to point in the + # wrong direction, etc. leading to smaller overlap. So this indicates bad fit + if overlap_ratio < 0.65: + return False + + return True + + + # Fits ellipse to target's trajectory to estimate the current offset of the target from the RCM + # Returns + # - RCM offset as vector in units of pixels + # - Bounding rect of the fitted ellipse + # - Bounding rect of target's trajectory + # If a good fit cannot be found, RCM offset will be None + def _fit_ellipse(self, tracked_object): + location_history = np.array(tracked_object.location_history) + bounding_rect = cv2.minAreaRect(location_history) + ellipse_bound = cv2.fitEllipse(location_history) + + # Sanity check quality of ellipse fitting + if not self._is_good_ellipse_fit(bounding_rect, ellipse_bound): + return None, ellipse_bound, bounding_rect + + ellipse_center, _, _ = ellipse_bound + ellipse_center = np.array(ellipse_center) + location_history_center = bounding_rect[0] + rcm_offset = np.array(ellipse_center) - location_history_center + + return rcm_offset, ellipse_bound, bounding_rect + + + def set_motion_range(self, motion_range_angle): + self.arm_swing_angle = motion_range_angle + + + def clear_history(self): + self.objects.clear_history() + + + # In background, run object tracking and display video + def start(self, enter_handler, quit_handler): + self.should_stop = False + self._enter_handler = enter_handler + self._quit_handler = quit_handler + self._should_run_point_acquisition = False + self._should_run_rcm_tracking = False + + self._create_window() + ok = self._init_video() + if not ok: + return False + + def run_camera(): + ok = True + while ok and not self.should_stop: + ok, frame = self.video_capture.read() + if not ok: + print("\n\nFailed to read from camera") + self._quit_handler() + + self._process(frame) + + if self._should_run_point_acquisition: + self._run_point_acquisition(frame) + if self._should_run_rcm_tracking: + self._run_rcm_tracking(frame) + + cv2.imshow(self.window_title, frame) + key = cv2.waitKey(20) + key = key & 0xFF # Upper bits are modifiers (control, alt, etc.) + escape = 27 + if key == ord("q") or key == escape: + self._quit_handler() + elif key == ord("d") or key == ord("\n") or key == ord("\r"): + self._enter_handler() + + self.background_task = threading.Thread(target=run_camera) + self.background_task.start() + return True + + + def stop(self): + self.should_stop = True + + + def _run_point_acquisition(self, frame): + target = self.objects.primaryTarget + if target is not None and target.is_strong(): + cv2.circle(frame, target.position, radius=3, color=(0, 0, 255), thickness=cv2.FILLED) + # once at least 5 data points have been collected since user selected target, + # output average location of target + if len(target.location_history) > 5: + mean = np.mean(target.location_history, axis=0) + self._acquired_point = np.int32(mean) + + # Get location of target + def acquire_point(self): + self.objects.clear_history() + self._acquired_point = None + self._should_run_point_acquisition = True + + while not self.should_stop: + if self._acquired_point is not None: + return True, self._acquired_point + + return False, None + + + def _run_rcm_tracking(self, frame): + target = self.objects.primaryTarget + # OpenCV's fit_ellipse requires at least five points + if target is not None and target.is_strong() and len(target.location_history) > 5: + # fit ellipse to target's path and get measured RCM offset + rcm_offset, ellipse, rect = self._fit_ellipse(target) + + # draw fitted ellipse + cv2.ellipse(frame, ellipse, color=(255, 0, 0), thickness=2) + # draw bounding rectangle of ellipse + cv2.drawContours(frame, [np.int32(cv2.boxPoints(ellipse))], 0, color=(0, 255, 0), thickness=2) + # draw bounding rectangle around target's trajectory + cv2.drawContours(frame, [np.int32(cv2.boxPoints(rect))], 0, color=(0, 0, 255), thickness=2) + # draw points in target's trajectory + cv2.polylines(frame, [np.int32(list(target.location_history))], False, color=(255, 255, 0), thickness=2) + + ellipse_center = (int(ellipse[0][0]), int(ellipse[0][1])) + rect_center = (int(rect[0][0]), int(rect[0][1])) + + cv2.circle(frame, ellipse_center, radius=0, color=(0, 255, 0), thickness=3) + cv2.circle(frame, rect_center, radius=0, color=(0, 0, 255), thickness=3) + + # once enough data points have been collected, output measured RCM offset + if rcm_offset is not None: + if len(target.location_history) > 150: + self._rcm_output_callback(rcm_offset) + + + # Starts RCM tracking and sets callback to be used when good RCM offset measurement is made + def rcm_tracking(self, output_callback): + self._rcm_output_callback = output_callback + self._should_run_rcm_tracking = True + + def stop_rcm_tracking(self): + self._should_run_rcm_tracking = False + diff --git a/dvrk_python/scripts/validate_psm_calibration.py b/dvrk_python/scripts/validate_psm_calibration.py new file mode 100755 index 00000000..5639d3d3 --- /dev/null +++ b/dvrk_python/scripts/validate_psm_calibration.py @@ -0,0 +1,320 @@ +#!/usr/bin/env python + +import rospy +import argparse +from geometry_msgs.msg import PoseArray, Pose, Point, Quaternion +from std_msgs.msg import Bool +import numpy as np +import math +import os +import PyKDL +import sys +import time +import dvrk + +import cisst_msgs.srv + +import xml.etree.ElementTree as ET + +# To use this script, run the sawNDITracker ROS tool with an arbitrary tool definition, and enable +# stray marker tracking. Also run the console for the relevant arm in calibration mode. Place +# a passive tracker sphere onto the end of a PSM tool - you may need to open the jaws somewhat to +# hold it in place. Ensure the PSMs working range is within the tracker's volume, and no +# other detections are picked up. + +class NDITracker: + def init(self): + self.topic = "/NDI/measured_cp_array" + self.subscriber = rospy.Subscriber(self.topic, PoseArray, self._marker_callback) + self.is_tracking_strays = rospy.Subscriber("/NDI/tracking/stray_markers", Bool, self._is_tracking_strays) + self._last_position = None + self.valid = False + + def _is_tracking_strays(self, bool_msg): + if not bool_msg.data: + print("Not tracking stray markers!") + + def _marker_callback(self, pose_array_msg): + marker_count = len(pose_array_msg.poses) + self.valid = marker_count == 1 + if not self.valid: + return + + self._last_position = pose_array_msg.poses[0].position + + def get_position(self): + return self.valid, self._last_position + + def stop(self): + self.is_tracking_strays.unsubscribe() + self.subscriber.unsubscribe() + + +class CalibrationValidationApplication: + def configure(self, robot_name, config_file, expected_interval): + # check that the config file is good + if not os.path.exists(config_file): + print('Config file "{:s}" not found'.format(config_file)) + return False + + self.tree = ET.parse(config_file) + root = self.tree.getroot() + + xpath_search_results = root.findall("./Robot") + if len(xpath_search_results) != 1: + print('Can\'t find "Robot" in config file "{:s}"'.format(config_file)) + return False + + xmlRobot = xpath_search_results[0] + # Verify robot name + if xmlRobot.get("Name") != robot_name: + print( + 'Found robot "{:s}" instead of "{:s}", are you using the right config file?'.format( + xmlRobot.get("Name"), robot_name + ) + ) + return False + + serial_number = xmlRobot.get("SN") + print( + 'Successfully found robot "{:s}", serial number {:s} in XML file'.format( + robot_name, serial_number + ) + ) + self.expected_interval = expected_interval + self.arm = dvrk.psm(arm_name=robot_name, expected_interval=expected_interval) + + self.cartesian_insertion_minimum = 0.055 + + return True + + def setup(self): + if not self.arm.enable(10): + print("Failed to enable within 10 seconds") + return False + + if not self.arm.home(10): + print("Failed to home within 10 seconds") + return False + + return True + + # instrument needs to be inserted past cannula to use Cartesian commands, + # this will move instrument if necessary so Cartesian commands can be used + def enter_cartesian_space(self): + pose = np.copy(self.arm.measured_jp()) + if pose[2] >= self.cartesian_insertion_minimum: + return True + + pose[2] = self.cartesian_insertion_minimum + self.arm.move_jp(pose).wait() + return True + + # return arm to upright position + def center_arm(self): + pose = np.copy(self.arm.measured_jp()) + pose.fill(0.0) + pose[2] = self.cartesian_insertion_minimum + self.arm.move_jp(pose).wait() + return True + + # Generate series of arm poses within safe range of motion + # range_of_motion = (depth, radius, center) describes a + # cone with tip at RCM, base centered at (center, depth) + # Generates slices^3 poses total + def registration_poses(self, slices=5, rom=math.pi/4, max_depth=0.018): + query_cp_name = "{}/local/query_cp".format(self.arm.namespace()) + local_query_cp = rospy.ServiceProxy(query_cp_name, cisst_msgs.srv.QueryForwardKinematics) + + # Scale to keep point density equal as depth varies + scale_rom = lambda depth: math.atan((max_depth/depth)*math.tan(rom)) + + def merge_coordinates(alpha, betas, depth): + alphas = np.repeat(alpha, slices) + depths = np.repeat(depth, slices) + return np.column_stack([alphas, betas, depths]) + + js_points = [] + depths = np.linspace(max_depth, self.cartesian_insertion_minimum, slices) + for i, depth in enumerate(depths): + parity = 1 if i % 2 == 0 else -1 + theta = scale_rom(depth)*parity + alphas = np.linspace(-theta, theta, slices) + # Alternate direction so robot follows shortest path + for i, alpha in enumerate(alphas): + parity = 1 if i % 2 == 0 else -1 + betas = np.linspace(-parity*theta, parity*theta, slices) + js_points.extend(merge_coordinates(alpha, betas, depth)) + + # We generated square grid, crop to circle so that overall angle + # stays within specified range of motion + js_points = [p for p in js_points if (p[0]**2 + p[1]**2) <= rom**2] + + cs_points = [] + for point in js_points: + # query forward kinematics to get equivalent Cartesian point + kinematics_request = cisst_msgs.srv.QueryForwardKinematicsRequest() + kinematics_request.jp.position = [point[0], point[1], point[2], 0.0, 0.0, 0.0, 0.0, 0.0] + response = local_query_cp(kinematics_request) + point = response.cp.pose.position + cs_points.append(np.array([point.x, point.y, point.z])) + + goal_orientation = PyKDL.Rotation() + goal_orientation[1,1] = -1.0 + goal_orientation[2,2] = -1.0 + + points = [PyKDL.Vector(p[0], p[1], p[2]) for p in cs_points] + poses = [PyKDL.Frame(goal_orientation, p) for p in points] + + return poses + + # move arm to each goal pose, and measure both robot and camera relative positions + def collect_data(self, poses, tracker_sample_size=10): + ok = self.enter_cartesian_space() + if not ok: + return False, None, None + + robot_points = [] + tracker_points = [] + + print("Collecting data: 0/{}".format(len(poses)), end="\r") + + # Move arm to variety of positions and record image & world coordinates + for i, pose in enumerate(poses): + if not self.ok: + self.center_arm() + break + + self.arm.move_cp(pose).wait() + rospy.sleep(0.5) # Make sure arm is settled + + point = self.arm.measured_cp().p + point = np.array([point[0], point[1], point[2]]) + robot_points.append(point) + + tracker_sample = [] + + for j in range(tracker_sample_size): + rospy.sleep(0.05) + if rospy.is_shutdown(): + return False, None, None + + ok = False + while not ok and not rospy.is_shutdown(): + ok, point = self.tracker.get_position() + + tracker_sample.append(np.array([point.x, point.y, point.z])) + + tracker_points.append(np.mean(tracker_sample, axis=0)) + + print( + "Collecting data: {}/{}".format(i+1, len(poses)), + end="\r", + ) + + print("\n") + + self.center_arm() + return True, robot_points, tracker_points + + # Compute rigid registration of robot_points to tracker_points, + # assuming correspondence robot_points[i] <-> tracker_points[i] + # Returns status, RMS error, rotation, translation + # Uses Kabsch method + def compute_registration(self, robot_points, tracker_points): + robot_points = np.array(robot_points, dtype=np.float32) + tracker_points = np.array(tracker_points, dtype=np.float32) + + robot_points_centroid = np.mean(robot_points, axis=0) + tracker_points_centroid = np.mean(tracker_points, axis=0) + + # Align centroids to remove translation + A = robot_points - robot_points_centroid + B = tracker_points - tracker_points_centroid + covariance = np.matmul(np.transpose(A), B) + u, s, vh = np.linalg.svd(covariance) + d = math.copysign(1, np.linalg.det(np.matmul(u, vh))) + C = np.diag([1, 1, d]) + R = np.matmul(u, np.matmul(C, vh)) + if np.linalg.det(R) < 0: + print("Reflection found instead!") + + T = robot_points_centroid - np.matmul(R, tracker_points_centroid) + projected_tracker_points = np.matmul(tracker_points, np.transpose(R)) + T + pointwise_difference = robot_points - projected_tracker_points + pointwise_error = [np.linalg.norm(d) for d in pointwise_difference] + + # Compute error metrics, in millimeters + rmse = 1000.0*np.sqrt(np.mean([error**2 for error in pointwise_error])) + maxe = 1000.0*np.max(pointwise_error) + stde = 1000.0*np.std(pointwise_error) + + return self.ok, (rmse, maxe, stde), R, T + + def run(self, trials): + try: + self.ok = True + + self.tracker = NDITracker() + self.tracker.init() + + errors = [] + + times = [] + + for i in range(trials): + start = time.time() + self.ok = self.setup() + if not self.ok: + return + + poses = self.registration_poses(5, math.radians(50), 0.200) + self.ok, robot_points, tracker_points = self.collect_data(poses) + if not self.ok: + return + + self.ok, error, _, _ = self.compute_registration(robot_points, tracker_points) + if not self.ok: + return + + errors.append(error) + print("Trial {}/{} RMS error: {} {} {} millimeters".format(i+1, trials, error[0], error[1], error[2])) + end = time.time() + duration = end - start + times.append(duration) + if i+1 < trials: + avg = np.mean(times) + remaining = avg*(trials - (i+1)) + print("Estimated time remaining: {} seconds".format(int(remaining))) + + print("\nAll errors:") + for error in errors: + print("{},{},{}".format(error[0], error[1], error[2])) + + finally: + self.arm.unregister() + + +if __name__ == '__main__': + # ros init node so we can use default ros arguments (e.g. __ns:= for namespace) + rospy.init_node('dvrk_validate_psm_calibration', anonymous=True) + # strip ros arguments + argv = rospy.myargv(argv=sys.argv) + + # parse arguments + parser = argparse.ArgumentParser() + parser.add_argument('-a', '--arm', type=str, required=True, + choices=['PSM1', 'PSM2', 'PSM3'], + help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace') + parser.add_argument('-t', '--trials', type=int, default=1, + help = 'how many full trials to run, disabling arm between trials') + parser.add_argument('-i', '--interval', type=float, default=0.02, + help = 'expected interval in seconds between messages sent by the device') + parser.add_argument('-c', '--config', type=str, required=True, + help = 'arm IO config file, i.e. something like sawRobotIO1394-xwz-12345.xml') + args = parser.parse_args(argv[1:]) # skip argv[0], script name + + application = CalibrationValidationApplication() + application.configure(args.arm, args.config, args.interval) + application.run(args.trials) + diff --git a/dvrk_python/src/dvrk/arm.py b/dvrk_python/src/dvrk/arm.py index 06258d09..6f5fc5cc 100644 --- a/dvrk_python/src/dvrk/arm.py +++ b/dvrk_python/src/dvrk/arm.py @@ -1,7 +1,7 @@ # Author(s): Anton Deguet # Created on: 2016-05 -# (C) Copyright 2016-2020 Johns Hopkins University (JHU), All Rights Reserved. +# (C) Copyright 2016-2023 Johns Hopkins University (JHU), All Rights Reserved. # --- begin cisst license - do not edit --- @@ -23,52 +23,40 @@ """ # sphinx-apidoc -F -A "Yijun Hu" -o doc src -import threading -import math import crtk -import rospy -import PyKDL import std_msgs.msg class arm(object): - """Simple arm API wrapping around ROS messages - """ + """Simple arm API wrapping around ROS messages""" # class to contain spatial/body cf methods class __MeasuredServoCf: - def __init__(self, ros_namespace, expected_interval): - self.__crtk_utils = crtk.utils(self, ros_namespace, expected_interval) + def __init__(self, ral, expected_interval): + self.__crtk_utils = crtk.utils(self, ral, expected_interval) self.__crtk_utils.add_measured_cf() self.__crtk_utils.add_servo_cf() self.__crtk_utils.add_jacobian() # local kinematics class __Local: - def __init__(self, ros_namespace, expected_interval): - self.__crtk_utils = crtk.utils(self, ros_namespace, expected_interval) + def __init__(self, ral, expected_interval): + self.__crtk_utils = crtk.utils(self, ral, expected_interval) self.__crtk_utils.add_measured_cp() self.__crtk_utils.add_setpoint_cp() + self.__crtk_utils.add_forward_kinematics() # initialize the arm - def __init__(self, arm_name, ros_namespace = '', expected_interval = 0.01): - # base class constructor in separate method so it can be called in derived classes - self.__init_arm(arm_name, ros_namespace, expected_interval) - - - def __init_arm(self, arm_name, ros_namespace, expected_interval): - """Constructor. This initializes a few data members.It - requires a arm name, this will be used to find the ROS + def __init__(self, ral, arm_name, expected_interval = 0.01): + """Requires a arm name, this will be used to find the ROS topics for the arm being controlled. For example if the user wants `PSM1`, the ROS topics will be from the namespace `PSM1`""" - # data members, event based - self.__arm_name = arm_name - self.__ros_namespace = ros_namespace - self.__full_ros_namespace = ros_namespace + arm_name + self.__name = arm_name + self.__ral = ral.create_child(arm_name) # crtk features - self.__crtk_utils = crtk.utils(self, self.__full_ros_namespace, expected_interval) + self.__crtk_utils = crtk.utils(self, self.__ral, expected_interval) # add crtk features that we need and are supported by the dVRK self.__crtk_utils.add_operating_state() @@ -77,6 +65,8 @@ def __init_arm(self, arm_name, ros_namespace, expected_interval): self.__crtk_utils.add_measured_js() self.__crtk_utils.add_measured_cp() self.__crtk_utils.add_measured_cv() + self.__crtk_utils.add_hold() + self.__crtk_utils.add_free() self.__crtk_utils.add_servo_jp() self.__crtk_utils.add_servo_jr() self.__crtk_utils.add_servo_cp() @@ -84,50 +74,37 @@ def __init_arm(self, arm_name, ros_namespace, expected_interval): self.__crtk_utils.add_move_jp() self.__crtk_utils.add_move_jr() self.__crtk_utils.add_move_cp() + self.__crtk_utils.add_forward_kinematics() + self.__crtk_utils.add_inverse_kinematics() - self.spatial = self.__MeasuredServoCf(self.__full_ros_namespace + '/spatial', expected_interval) - self.body = self.__MeasuredServoCf(self.__full_ros_namespace + '/body', expected_interval) - self.local = self.__Local(self.__full_ros_namespace + '/local', expected_interval) - - self.__sub_list = [] - self.__pub_list = [] + self.spatial = self.__MeasuredServoCf(self.__ral.create_child('spatial'), expected_interval) + self.body = self.__MeasuredServoCf(self.__ral.create_child('body'), expected_interval) + self.local = self.__Local(self.__ral.create_child('local'), expected_interval) # publishers - frame = PyKDL.Frame() - self.__body_set_cf_orientation_absolute_pub = rospy.Publisher(self.__full_ros_namespace - + '/body/set_cf_orientation_absolute', - std_msgs.msg.Bool, latch = True, queue_size = 1) - self.__use_gravity_compensation_pub = rospy.Publisher(self.__full_ros_namespace - + '/use_gravity_compensation', - std_msgs.msg.Bool, latch = True, queue_size = 1) - self.__trajectory_j_set_ratio_pub = rospy.Publisher(self.__full_ros_namespace - + '/trajectory_j/set_ratio', - std_msgs.msg.Float64, latch = True, queue_size = 1) - - self.__pub_list = [self.__body_set_cf_orientation_absolute_pub, - self.__use_gravity_compensation_pub, - self.__trajectory_j_set_ratio_pub] - # subscribers - self.__trajectory_j_ratio_sub = rospy.Subscriber(self.__full_ros_namespace - + '/trajectory_j/ratio', - std_msgs.msg.Float64, self.__trajectory_j_ratio_cb) - - self.__sub_list = [self.__trajectory_j_ratio_sub] - - # create node - if not rospy.get_node_uri(): - rospy.init_node('arm_api', anonymous = True, log_level = rospy.WARN) - else: - rospy.logdebug(rospy.get_caller_id() + ' -> ROS already initialized') - + self.__body_set_cf_orientation_absolute_pub = self.__ral.publisher( + '/body/set_cf_orientation_absolute', std_msgs.msg.Bool, + latch = True, queue_size = 1) + self.__use_gravity_compensation_pub = self.__ral.publisher( + '/use_gravity_compensation', std_msgs.msg.Bool, + latch = True, queue_size = 1) + self.__trajectory_j_set_ratio_pub = self.__ral.publisher( + '/trajectory_j/set_ratio', std_msgs.msg.Float64, + latch = True, queue_size = 1) + + self.__trajectory_j_ratio = 1.0 + self.__trajectory_j_ratio_sub = self.__ral.subscriber( + '/trajectory_j/ratio', std_msgs.msg.Float64, + self.__trajectory_j_ratio_cb) def name(self): - return self.__arm_name - - - def namespace(self): - return self.__full_ros_namespace + return self.__name + def ral(self): + return self.__ral + + def check_connections(self, timeout = 5.0): + self.__ral.check_connections(timeout) def body_set_cf_orientation_absolute(self, absolute): """Apply body wrench using body orientation (relative/False) or reference frame (absolute/True)""" @@ -135,37 +112,25 @@ def body_set_cf_orientation_absolute(self, absolute): m.data = absolute self.__body_set_cf_orientation_absolute_pub.publish(m) - def use_gravity_compensation(self, gravity_compensation): - "Turn on/off gravity compensation in cartesian effort mode" + """Turn on/off gravity compensation in cartesian effort mode""" g = std_msgs.msg.Bool() g.data = gravity_compensation self.__use_gravity_compensation_pub.publish(g) - def trajectory_j_set_ratio(self, ratio): """Set ratio applied to max velocities and accelerations used for - joint trajectory generation. Value should be in range ]0, - 1] + joint trajectory generation. Value should be in range ]0, 1] """ r = std_msgs.msg.Float64() r.data = ratio self.__trajectory_j_set_ratio_pub.publish(r) + def trajectory_j_get_ratio(self): + return self.__trajectory_j_ratio + def __trajectory_j_ratio_cb(self, msg): self.__trajectory_j_ratio = msg.data def trajectory_j_ratio(self): return self.__trajectory_j_ratio - - - def unregister(self, verbose = False): - for sub in self.__sub_list: - sub.unregister() - if verbose: - print('Unregistered {} subs for {}'.format(self.__sub_list.__len__(), self.__arm_name)) - - for pub in self.__pub_list: - pub.unregister() - if verbose: - print('Unregistered {} pubs for {}'.format(self.__pub_list.__len__(), self.__arm_name)) diff --git a/dvrk_python/src/dvrk/console.py b/dvrk_python/src/dvrk/console.py index a8a915d7..d75be535 100644 --- a/dvrk_python/src/dvrk/console.py +++ b/dvrk_python/src/dvrk/console.py @@ -1,7 +1,7 @@ # Author(s): Anton Deguet # Created on: 2016-05 -# (C) Copyright 2016-2021 Johns Hopkins University (JHU), All Rights Reserved. +# (C) Copyright 2016-2023 Johns Hopkins University (JHU), All Rights Reserved. # --- begin cisst license - do not edit --- @@ -11,58 +11,55 @@ # --- end cisst license --- -import rospy - -from std_msgs.msg import Bool, Float64, Empty +import std_msgs.msg class console(object): """Simple dVRK console API wrapping around ROS messages """ # initialize the console - def __init__(self, console_namespace = ''): + def __init__(self, ral, console_namespace = 'console'): # base class constructor in separate method so it can be called in derived classes - self.__init_console(console_namespace) - + self.__init_console(ral, console_namespace) - def __init_console(self, console_namespace = ''): - """Constructor. This initializes a few data members. It - requires a arm name, this will be used to find the ROS topics - for the console being controlled. The default is - 'console' and it would be necessary to change it only if + def __init_console(self, ral, console_name): + """Default is console name is 'console' and it would be necessary to change it only if you have multiple dVRK consoles""" # data members, event based - self.__console_namespace = console_namespace + 'console' + self.__ral = ral.create_child(console_name) self.__teleop_scale = 0.0 # publishers - self.__power_off_pub = rospy.Publisher(self.__console_namespace - + '/power_off', - Empty, latch = True, queue_size = 1) - self.__power_on_pub = rospy.Publisher(self.__console_namespace - + '/power_on', - Empty, latch = True, queue_size = 1) - self.__home_pub = rospy.Publisher(self.__console_namespace - + '/home', - Empty, latch = True, queue_size = 1) - self.__teleop_enable_pub = rospy.Publisher(self.__console_namespace - + '/teleop/enable', - Bool, latch = True, queue_size = 1) - self.__teleop_set_scale_pub = rospy.Publisher(self.__console_namespace - + '/teleop/set_scale', - Float64, latch = True, queue_size = 1) + self.__power_off_pub = self.__ral.publisher('/power_off', + std_msgs.msg.Empty, + latch = False, queue_size = 1) + self.__power_on_pub = self.__ral.publisher('/power_on', + std_msgs.msg.Empty, + latch = False, queue_size = 1) + self.__home_pub = self.__ral.publisher('/home', + std_msgs.msg.Empty, + latch = False, queue_size = 1) + self.__teleop_enable_pub = self.__ral.publisher('/teleop/enable', + std_msgs.msg.Bool, + latch = False, queue_size = 1) + self.__teleop_set_scale_pub = self.__ral.publisher('/teleop/set_scale', + std_msgs.msg.Float64, + latch = False, queue_size = 1) + self.__set_volume_pub = self.__ral.publisher('/set_volume', + std_msgs.msg.Float64, + latch = False, queue_size = 1) + self.__beep_pub = self.__ral.publisher('/beep', + std_msgs.msg.Float64MultiArray, + latch = False, queue_size = 1) + self.__string_to_speech_pub = self.__ral.publisher('/string_to_speech', + std_msgs.msg.String, + latch = False, queue_size = 1) # subscribers - rospy.Subscriber(self.__console_namespace - + '/teleop/scale', - Float64, self.__teleop_scale_cb) - - # create node - if not rospy.get_node_uri(): - rospy.init_node('console_api', anonymous = True, log_level = rospy.WARN) - else: - rospy.logdebug(rospy.get_caller_id() + ' -> ROS already initialized') - + self.__teleop_scale_sub = self.__ral.subscriber('/teleop/scale', + std_msgs.msg.Float64, + self.__teleop_scale_cb, + latch = True) def __teleop_scale_cb(self, data): """Callback for teleop scale. @@ -70,30 +67,50 @@ def __teleop_scale_cb(self, data): :param data: the latest scale requested for the dVRK console""" self.__teleop_scale = data.data - def power_off(self): - self.__power_off_pub.publish() - + msg = std_msgs.msg.Empty() + self.__power_off_pub.publish(msg) def power_on(self): - self.__power_on_pub.publish() - + msg = std_msgs.msg.Empty() + self.__power_on_pub.publish(msg) def home(self): - self.__home_pub.publish() - + msg = std_msgs.msg.Empty() + self.__home_pub.publish(msg) def teleop_start(self): - self.__teleop_enable_pub.publish(True) - + msg = std_msgs.msg.Bool() + msg.data = True + self.__teleop_enable_pub.publish(msg) def teleop_stop(self): - self.__teleop_enable_pub.publish(False) - + msg = std_msgs.msg.Bool() + msg.data = False + self.__teleop_enable_pub.publish(msg) def teleop_set_scale(self, scale): - self.__teleop_set_scale_pub.publish(scale) - + msg = std_msgs.msg.Float64() + msg.data = scale + self.__teleop_set_scale_pub.publish(msg) def teleop_get_scale(self): return self.__teleop_scale + + def set_volume(self, volume): + msg = std_msgs.msg.Float64() + msg.data = volume + self.__set_volume_pub.publish(msg) + + def beep(self, duration, frequency, volume = 1.0): + msg = std_msgs.msg.Float64MultiArray() + msg.data = [duration, frequency, volume] + msg.layout.data_offset = 0 + msg.layout.dim = [] + msg.layout.dim.append(std_msgs.msg.MultiArrayDimension('values', 3, 1)) + self.__beep_pub.publish(msg) + + def string_to_speech(self, string): + msg = std_msgs.msg.String() + msg.data = string + self.__string_to_speech_pub.publish(msg) diff --git a/dvrk_python/src/dvrk/ecm.py b/dvrk_python/src/dvrk/ecm.py index 9a645318..da610ac4 100644 --- a/dvrk_python/src/dvrk/ecm.py +++ b/dvrk_python/src/dvrk/ecm.py @@ -1,7 +1,7 @@ # Author(s): Anton Deguet # Created on: 2016-05 -# (C) Copyright 2016-2020 Johns Hopkins University (JHU), All Rights Reserved. +# (C) Copyright 2016-2023 Johns Hopkins University (JHU), All Rights Reserved. # --- begin cisst license - do not edit --- @@ -16,13 +16,12 @@ import numpy class ecm(arm): - """Simple robot API wrapping around ROS messages - """ - # initialize the robot + """Simple robot API wrapping around ROS messages""" + # initialize the robot - def __init__(self, arm_name, ros_namespace = '', expected_interval = 0.01): + def __init__(self, ral, arm_name, expected_interval = 0.01): # first call base class constructor - self._arm__init_arm(arm_name, ros_namespace, expected_interval) + super().__init__(ral, arm_name, expected_interval) def insert_jp(self, depth): "insert the tool, by moving it to an absolute depth" diff --git a/dvrk_python/src/dvrk/mtm.py b/dvrk_python/src/dvrk/mtm.py index b04c20d5..46e588b0 100644 --- a/dvrk_python/src/dvrk/mtm.py +++ b/dvrk_python/src/dvrk/mtm.py @@ -1,7 +1,7 @@ # Author(s): Anton Deguet # Created on: 2016-05 -# (C) Copyright 2016-2020 Johns Hopkins University (JHU), All Rights Reserved. +# (C) Copyright 2016-2023 Johns Hopkins University (JHU), All Rights Reserved. # --- begin cisst license - do not edit --- @@ -14,6 +14,7 @@ from dvrk.arm import * import geometry_msgs.msg +from crtk_msgs.msg import CartesianImpedance class mtm(arm): """Simple robot API wrapping around ROS messages @@ -21,41 +22,45 @@ class mtm(arm): # class to contain gripper methods class __Gripper: - def __init__(self, ros_namespace, expected_interval): - self.__crtk_utils = crtk.utils(self, ros_namespace, expected_interval) + def __init__(self, ral, expected_interval): + self.__crtk_utils = crtk.utils(self, ral, expected_interval) self.__crtk_utils.add_measured_js() # initialize the robot - def __init__(self, arm_name, ros_namespace = '', expected_interval = 0.01): + def __init__(self, ral, arm_name, expected_interval = 0.01): # first call base class constructor - self._arm__init_arm(arm_name, ros_namespace, expected_interval) - self.gripper = self.__Gripper(self._arm__full_ros_namespace + '/gripper', expected_interval) + super().__init__(ral, arm_name, expected_interval) + self.gripper = self.__Gripper(self.ral().create_child('/gripper'), expected_interval) # publishers - self.__lock_orientation_pub = rospy.Publisher(self._arm__full_ros_namespace - + '/lock_orientation', - geometry_msgs.msg.Quaternion, latch = True, queue_size = 1) - self.__unlock_orientation_pub = rospy.Publisher(self._arm__full_ros_namespace - + '/unlock_orientation', - std_msgs.msg.Empty, latch = True, queue_size = 1) + self.__lock_orientation_publisher = self.ral().publisher('lock_orientation', + geometry_msgs.msg.Quaternion, + latch = True, queue_size = 1) + + self.__unlock_orientation_publisher = self.ral().publisher('unlock_orientation', + std_msgs.msg.Empty, + latch = True, queue_size = 1) + + self.__set_gains_publisher = self.ral().publisher('servo_ci', + CartesianImpedance, + queue_size = 10) - self._arm__pub_list.extend([self.__lock_orientation_pub, - self.__unlock_orientation_pub]) def lock_orientation_as_is(self): "Lock orientation based on current orientation" current = self.setpoint_cp() self.lock_orientation(current.M) - def lock_orientation(self, orientation): """Lock orientation, expects a PyKDL rotation matrix (PyKDL.Rotation)""" q = geometry_msgs.msg.Quaternion() q.x, q.y, q.z, q.w = orientation.GetQuaternion() - self.__lock_orientation_pub.publish(q); - + self.__lock_orientation_publisher.publish(q) def unlock_orientation(self): "Unlock orientation" e = std_msgs.msg.Empty() - self.__unlock_orientation_pub.publish(e); + self.__unlock_orientation_publisher.publish(e) + + def servo_ci(self, gains): + self.__set_gains_publisher.publish(gains) diff --git a/dvrk_python/src/dvrk/psm.py b/dvrk_python/src/dvrk/psm.py index d2c0070d..90eaa837 100644 --- a/dvrk_python/src/dvrk/psm.py +++ b/dvrk_python/src/dvrk/psm.py @@ -1,7 +1,7 @@ # Author(s): Anton Deguet # Created on: 2016-05 -# (C) Copyright 2016-2020 Johns Hopkins University (JHU), All Rights Reserved. +# (C) Copyright 2016-2023 Johns Hopkins University (JHU), All Rights Reserved. # --- begin cisst license - do not edit --- @@ -13,6 +13,7 @@ from dvrk.arm import * +import math import numpy class psm(arm): @@ -21,8 +22,8 @@ class psm(arm): # class to contain jaw methods class __Jaw: - def __init__(self, ros_namespace, expected_interval, operating_state_instance): - self.__crtk_utils = crtk.utils(self, ros_namespace, expected_interval, operating_state_instance) + def __init__(self, ral, expected_interval, operating_state_instance): + self.__crtk_utils = crtk.utils(self, ral, expected_interval, operating_state_instance) self.__crtk_utils.add_measured_js() self.__crtk_utils.add_setpoint_js() self.__crtk_utils.add_servo_jp() @@ -31,26 +32,25 @@ def __init__(self, ros_namespace, expected_interval, operating_state_instance): def close(self): "Close the tool jaw" - return self.move_jp(numpy.array(math.radians(-20.0))) + return self.move_jp(numpy.array([math.radians(-20.0)])) def open(self, angle = math.radians(60.0)): "Close the tool jaw" - return self.move_jp(numpy.array(angle)) + return self.move_jp(numpy.array([angle])) # initialize the robot - def __init__(self, arm_name, ros_namespace = '', expected_interval = 0.01): + def __init__(self, ral, arm_name, expected_interval = 0.01): # first call base class constructor - self._arm__init_arm(arm_name, ros_namespace, expected_interval) - self.jaw = self.__Jaw(self._arm__full_ros_namespace + '/jaw', expected_interval, + super().__init__(ral, arm_name, expected_interval) + jaw_ral = self.ral().create_child('/jaw') + self.jaw = self.__Jaw(jaw_ral, expected_interval, operating_state_instance = self) # publishers - self.__set_tool_present_pub = rospy.Publisher(self._arm__full_ros_namespace - + '/set_tool_present', - std_msgs.msg.Bool, latch = True, queue_size = 1) - - self._arm__pub_list.extend([self.__set_tool_present_pub]) + self.__set_tool_present_publisher = self.ral().publisher('/emulate_tool_present', + std_msgs.msg.Bool, + latch = True, queue_size = 1) def insert_jp(self, depth): "insert the tool, by moving it to an absolute depth" @@ -62,4 +62,4 @@ def set_tool_present(self, tool_present): "Set tool inserted. To be used only for custom tools that can't be detected automatically" tp = std_msgs.msg.Bool() tp.data = tool_present - self.__set_tool_present_pub.publish(tp) + self.__set_tool_present_publisher.publish(tp) diff --git a/dvrk_python/src/dvrk/suj.py b/dvrk_python/src/dvrk/suj.py index 9aaa81c4..d6f54773 100644 --- a/dvrk_python/src/dvrk/suj.py +++ b/dvrk_python/src/dvrk/suj.py @@ -1,7 +1,7 @@ # Author(s): Anton Deguet # Created on: 2016-05 -# (C) Copyright 2016-2021 Johns Hopkins University (JHU), All Rights Reserved. +# (C) Copyright 2016-2023 Johns Hopkins University (JHU), All Rights Reserved. # --- begin cisst license - do not edit --- @@ -11,44 +11,40 @@ # --- end cisst license --- -import rospy import crtk class suj(object): """Simple arm API wrapping around ROS messages """ - # local kinematics - class __Local: - def __init__(self, ros_namespace, expected_interval): - self.__crtk_utils = crtk.utils(self, ros_namespace, expected_interval) - self.__crtk_utils.add_measured_cp() + class __Arm: + + # local kinematics + class __Local: + def __init__(self, ral, expected_interval): + self.__crtk_utils = crtk.utils(self, ral, expected_interval) + self.__crtk_utils.add_measured_cp() - # initialize the arm - def __init__(self, arm_name, ros_namespace = 'SUJ/', expected_interval = 1.0): - """Constructor. This initializes a few data members.It - requires a arm name, this will be used to find the ROS - topics for the arm being controlled. For example if the - user wants `PSM1`, the ROS topics will be from the namespace - `SUJ/PSM1`""" - # data members, event based - self.__arm_name = arm_name - self.__ros_namespace = ros_namespace - self.__full_ros_namespace = self.__ros_namespace + self.__arm_name - - # crtk features - self.__crtk_utils = crtk.utils(self, self.__full_ros_namespace, expected_interval) - - # add crtk features that we need and are supported by the dVRK - self.__crtk_utils.add_operating_state() - self.__crtk_utils.add_measured_js() - self.__crtk_utils.add_measured_cp() - self.__crtk_utils.add_move_jp() - - self.local = self.__Local(self.__full_ros_namespace + '/local', expected_interval) - - # create node - if not rospy.get_node_uri(): - rospy.init_node('arm_suj_api_' + self.__arm_name, anonymous = True, log_level = rospy.WARN) - else: - rospy.logdebug(rospy.get_caller_id() + ' -> ROS already initialized') + def __init__(self, ral, expected_interval): + self.__ral = ral + self.__crtk_utils = crtk.utils(self, ral, expected_interval) + self.__crtk_utils.add_operating_state() + self.__crtk_utils.add_measured_js() + self.__crtk_utils.add_measured_cp() + self.__crtk_utils.add_move_jp() # for simulated SUJs only + self.local = self.__Local(ral.create_child('/local'), expected_interval) + + def ral(self): + return self.__ral + + # initialize the all SUJ arms + def __init__(self, ral, expected_interval = 1.0): + """Constructor. This initializes a few data members and creates + instances of classes for each SUJ arm.""" + self.__ral = ral.create_child('SUJ') + self.__crtk_utils = crtk.utils(self, ral, expected_interval) + for arm in ('ECM', 'PSM1', 'PSM2', 'PSM3'): + setattr(self, arm, self.__Arm(ral.create_child(arm), expected_interval)) + + def ral(self): + return self.__ral diff --git a/dvrk_python/src/dvrk/teleop_psm.py b/dvrk_python/src/dvrk/teleop_psm.py index db1cd7bd..5792b908 100644 --- a/dvrk_python/src/dvrk/teleop_psm.py +++ b/dvrk_python/src/dvrk/teleop_psm.py @@ -1,7 +1,7 @@ # Author(s): Anton Deguet # Created on: 2016-08 -# (C) Copyright 2016-2021 Johns Hopkins University (JHU), All Rights Reserved. +# (C) Copyright 2016-2023 Johns Hopkins University (JHU), All Rights Reserved. # --- begin cisst license - do not edit --- @@ -11,53 +11,29 @@ # --- end cisst license --- -import rospy - -from std_msgs.msg import Bool, Float64, Empty, String -from geometry_msgs.msg import Quaternion +import std_msgs.msg +import crtk_msgs.msg class teleop_psm(object): - """Simple dVRK teleop PSM API wrapping around ROS messages - """ - - # initialize the teleop - def __init__(self, teleop_name, ros_namespace = ''): - # base class constructor in separate method so it can be called in derived classes - self.__init_teleop_psm(teleop_name, ros_namespace) - + """Simple dVRK teleop PSM API wrapping around ROS messages""" - def __init_teleop_psm(self, teleop_name, ros_namespace = ''): - """Constructor. This initializes a few data members. It - requires a teleop name, this will be used to find the ROS topics + def __init__(self, ral, teleop_name): + """Requires a teleop name, this will be used to find the ROS topics for the console being controlled.""" # data members - self.__teleop_name = teleop_name - self.__ros_namespace = ros_namespace - self.__full_ros_namespace = self.__ros_namespace + self.__teleop_name + self.__ral = ral.create_child(teleop_name) self.__scale = 0.0 # publishers - self.__set_scale_pub = rospy.Publisher(self.__full_ros_namespace - + '/set_scale', - Float64, latch = True, queue_size = 1) - self.__set_registration_rotation_pub = rospy.Publisher(self.__full_ros_namespace - + '/set_registration_rotation', - Quaternion, latch = True, queue_size = 1) - self.__set_desired_state_pub = rospy.Publisher(self.__full_ros_namespace - + '/set_desired_state', - String, latch = True, queue_size = 1) + self.__set_scale_pub = self.__ral.publisher('/set_scale', + std_msgs.msg.Float64, + latch = False, queue_size = 1) + self.__set_state_command_pub = self.__ral.publisher('/state_command', + crtk_msgs.msg.StringStamped, + latch = False, queue_size = 1) # subscribers - rospy.Subscriber(self.__full_ros_namespace - + '/scale', - Float64, self.__scale_cb) - - # create node - if not rospy.get_node_uri(): - rospy.init_node('teleop_api', anonymous = True, log_level = rospy.WARN) - else: - rospy.logdebug(rospy.get_caller_id() + ' -> ROS already initialized') - + self.__scale_sub = self.__ral.subscriber('/scale', std_msgs.msg.Float64, self.__scale_cb) def __scale_cb(self, data): """Callback for teleop scale. @@ -66,19 +42,19 @@ def __scale_cb(self, data): self.__scale = data.data def set_scale(self, scale): - self.__set_scale_pub.publish(scale) + msg = std_msgs.msg.Float64() + msg.data = scale + self.__set_scale_pub.publish(msg) def get_scale(self): return self.__scale - def set_registration_rotation(self, rotation): - """Expect a PyKDL rotation matrix (PyKDL.Rotation)""" - q = Quaternion() - q.x, q.y, q.z, q.w = rotation.GetQuaternion() - self.__set_registration_rotation_pub.publish(q) - def enable(self): - self.__set_desired_state_pub.publish('ENABLED') + msg = crtk_msgs.msg.StringStamped() + msg.string = 'enable' + self.__set_state_command_pub.publish(msg) def disable(self): - self.__set_desired_state_pub.publish('DISABLED') + msg = crtk_msgs.msg.StringStamped() + msg.string = 'disable' + self.__set_state_command_pub.publish(msg) diff --git a/dvrk_robot/CMakeLists.txt b/dvrk_robot/CMakeLists.txt index b43cb448..5ecd8b1a 100644 --- a/dvrk_robot/CMakeLists.txt +++ b/dvrk_robot/CMakeLists.txt @@ -1,5 +1,5 @@ # -# (C) Copyright 2015-2021 Johns Hopkins University (JHU), All Rights Reserved. +# (C) Copyright 2015-2023 Johns Hopkins University (JHU), All Rights Reserved. # # --- begin cisst license - do not edit --- # @@ -10,13 +10,12 @@ # --- end cisst license --- cmake_minimum_required (VERSION 3.1) +project (dvrk_robot VERSION 2.2.0) set (CMAKE_CXX_STANDARD 14) set (CMAKE_CXX_STANDARD_REQUIRED ON) set (CMAKE_CXX_EXTENSIONS OFF) -project (dvrk_robot) - ## find catkin macros and libraries ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) ## is used, also find other catkin packages @@ -44,7 +43,7 @@ set (REQUIRED_CISST_LIBRARIES cisstQt ) -find_package (cisst 1.1.0 REQUIRED ${REQUIRED_CISST_LIBRARIES}) +find_package (cisst 1.2.0 REQUIRED ${REQUIRED_CISST_LIBRARIES}) if (cisst_FOUND_AS_REQUIRED) @@ -61,9 +60,9 @@ if (cisst_FOUND_AS_REQUIRED) # sawRobotIO1394 has been compiled within cisst, we should find it automatically - find_package (sawRobotIO1394 2.1.0 REQUIRED) - find_package (sawControllers 2.0.0 REQUIRED) - find_package (sawIntuitiveResearchKit 2.1.0 REQUIRED) + find_package (sawRobotIO1394 2.2.0 REQUIRED) + find_package (sawControllers 2.1.0 REQUIRED) + find_package (sawIntuitiveResearchKit 2.2.0 REQUIRED) include_directories ( ${dvrk_robot_SOURCE_DIR}/include diff --git a/dvrk_robot/README.md b/dvrk_robot/README.md index 3bb2924b..5e6f96f3 100644 --- a/dvrk_robot/README.md +++ b/dvrk_robot/README.md @@ -35,4 +35,4 @@ We provide a few console configurations for simulated arms in `sawIntuitiveResea The best way to figure how to use the ROS topics is to look at the python ROS wrappers in `dvrk_python` or the Matlab ROS wrappers in -`dvrk_matlab`. The ROS topics are described in https://github.com/jhu-dvrk/sawIntuitiveResearchKit/wiki/API-2.x +`dvrk_matlab`. diff --git a/dvrk_robot/include/dvrk_utilities/dvrk_console.h b/dvrk_robot/include/dvrk_utilities/dvrk_console.h index e9a3ffe3..bbf288db 100644 --- a/dvrk_robot/include/dvrk_utilities/dvrk_console.h +++ b/dvrk_robot/include/dvrk_utilities/dvrk_console.h @@ -5,7 +5,7 @@ Author(s): Anton Deguet Created on: 2015-05-23 - (C) Copyright 2015-2020 Johns Hopkins University (JHU), All Rights Reserved. + (C) Copyright 2015-2023 Johns Hopkins University (JHU), All Rights Reserved. --- begin cisst license - do not edit --- @@ -19,12 +19,12 @@ no warranty. The complete license can be found in license.txt and #ifndef _dvrk_console_h #define _dvrk_console_h -#include +#include class mtsIntuitiveResearchKitConsole; namespace dvrk { - class console: public mts_ros_crtk_bridge + class console: public mts_ros_crtk_bridge_provided { CMN_DECLARE_SERVICES(CMN_NO_DYNAMIC_CREATION, CMN_LOG_ALLOW_DEFAULT); @@ -62,6 +62,8 @@ namespace dvrk { void add_topics_endoscope_focus(void); // IO timing void add_topics_io(void); + // add monitoring topics for all PIDs + void add_topics_pid(void); // low level IO for a given arm if requested by user void add_topics_arm_io(mtsROSBridge * _pub_bridge, const std::string & _arm_name, @@ -77,6 +79,7 @@ namespace dvrk { protected: mtsROSBridge * m_pub_bridge; + double m_publish_rate, m_tf_rate; mtsIntuitiveResearchKitConsole * m_console; }; } diff --git a/dvrk_robot/launch/gscam_decklink.launch b/dvrk_robot/launch/gscam_decklink.launch index e33dc051..06c186bc 100644 --- a/dvrk_robot/launch/gscam_decklink.launch +++ b/dvrk_robot/launch/gscam_decklink.launch @@ -1,28 +1,4 @@ - - - - - - - - - - - - - - - - - - - - - - - + diff --git a/dvrk_robot/launch/gscam_decklink_stereo.launch b/dvrk_robot/launch/gscam_decklink_stereo.launch index 2e0e21f0..06c186bc 100644 --- a/dvrk_robot/launch/gscam_decklink_stereo.launch +++ b/dvrk_robot/launch/gscam_decklink_stereo.launch @@ -1,23 +1,4 @@ - - - - - - - - - - - - - - - - - - - - + diff --git a/dvrk_robot/launch/gscam_hauppauge_live2.launch b/dvrk_robot/launch/gscam_hauppauge_live2.launch index e07606df..06c186bc 100644 --- a/dvrk_robot/launch/gscam_hauppauge_live2.launch +++ b/dvrk_robot/launch/gscam_hauppauge_live2.launch @@ -1,28 +1,4 @@ - - - - - - - - - - - - - - - - - - - - - - + - diff --git a/dvrk_robot/launch/gscam_mono.launch b/dvrk_robot/launch/gscam_mono.launch index 197e3a10..06c186bc 100644 --- a/dvrk_robot/launch/gscam_mono.launch +++ b/dvrk_robot/launch/gscam_mono.launch @@ -1,28 +1,4 @@ - - - - - - - - - - - - - - - - - - - - - - + - diff --git a/dvrk_robot/launch/gscam_stereo.launch b/dvrk_robot/launch/gscam_stereo.launch index 35b95e83..06c186bc 100644 --- a/dvrk_robot/launch/gscam_stereo.launch +++ b/dvrk_robot/launch/gscam_stereo.launch @@ -1,30 +1,4 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - + diff --git a/dvrk_robot/launch/gscam_v4l_test.launch b/dvrk_robot/launch/gscam_v4l_test.launch new file mode 100644 index 00000000..06c186bc --- /dev/null +++ b/dvrk_robot/launch/gscam_v4l_test.launch @@ -0,0 +1,4 @@ + + + + diff --git a/dvrk_robot/launch/jhu_dVRK_video.launch b/dvrk_robot/launch/jhu_dVRK_video.launch index 3f2df406..06c186bc 100644 --- a/dvrk_robot/launch/jhu_dVRK_video.launch +++ b/dvrk_robot/launch/jhu_dVRK_video.launch @@ -1,46 +1,4 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + diff --git a/dvrk_robot/launch/jhu_daVinci_full_system_video.launch b/dvrk_robot/launch/jhu_daVinci_full_system_video.launch index df5468d7..fecd8132 100644 --- a/dvrk_robot/launch/jhu_daVinci_full_system_video.launch +++ b/dvrk_robot/launch/jhu_daVinci_full_system_video.launch @@ -1,13 +1,13 @@ - + diff --git a/dvrk_robot/launch/jhu_daVinci_video.launch b/dvrk_robot/launch/jhu_daVinci_video.launch index b9a57f1c..06c186bc 100644 --- a/dvrk_robot/launch/jhu_daVinci_video.launch +++ b/dvrk_robot/launch/jhu_daVinci_video.launch @@ -1,42 +1,4 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + diff --git a/dvrk_robot/package.xml b/dvrk_robot/package.xml index bdaa1c76..859d4d57 100644 --- a/dvrk_robot/package.xml +++ b/dvrk_robot/package.xml @@ -1,7 +1,7 @@ dvrk_robot - 2.1.0 + 2.2.0 The dvrk_robot package Anton Deguet diff --git a/dvrk_robot/scripts/dvrk_calibrate_potentiometer_psm.py b/dvrk_robot/scripts/dvrk_calibrate_potentiometer_psm.py index f150799c..2e22e633 100755 --- a/dvrk_robot/scripts/dvrk_calibrate_potentiometer_psm.py +++ b/dvrk_robot/scripts/dvrk_calibrate_potentiometer_psm.py @@ -1,234 +1,6 @@ #!/usr/bin/env python -# Author: Anton Deguet -# Date: 2015-02-22 - -# (C) Copyright 2015-2020 Johns Hopkins University (JHU), All Rights Reserved. - -# --- begin cisst license - do not edit --- - -# This software is provided "as is" under an open source license, with -# no warranty. The complete license can be found in license.txt and -# http://www.cisst.org/cisst/license.txt. - -# --- end cisst license --- - -# Start a single arm using -# > rosrun dvrk_robot dvrk_console_json -j - -# To communicate with the arm using ROS topics, see the python based example dvrk_arm_test.py: -# > rosrun dvrk_python dvrk_arm_test.py - -import dvrk -import math import sys -import select -import tty -import termios -import rospy -import numpy -import argparse - -import os.path -import xml.etree.ElementTree as ET - -# for local_query_cp -import cisst_msgs.srv - -# for keyboard capture -def is_there_a_key_press(): - return select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], []) - -# example of application using arm.py -class example_application: - - # configuration - def configure(self, robot_name, config_file, expected_interval): - self.expected_interval = expected_interval - self.config_file = config_file - # check that the config file is good - if not os.path.exists(self.config_file): - sys.exit("Config file \"{%s}\" not found".format(self.config_file)) - - # XML parsing, find current offset - self.tree = ET.parse(config_file) - root = self.tree.getroot() - - # find Robot in config file and make sure name matches - xpath_search_results = root.findall("./Robot") - if len(xpath_search_results) == 1: - xmlRobot = xpath_search_results[0] - else: - sys.exit("Can't find \"Robot\" in configuration file {:s}".format(self.config_file)) - - if xmlRobot.get("Name") == robot_name: - serial_number = xmlRobot.get("SN") - print("Successfully found robot \"{:s}\", serial number {:s} in XML file".format(robot_name, serial_number)) - robotFound = True - else: - sys.exit("Found robot \"{:s}\" while looking for \"{:s}\", make sure you're using the correct configuration file!".format(xmlRobot.get("Name"), robot_name)) - - # now find the offset for joint 2, we assume there's only one result - xpath_search_results = root.findall("./Robot/Actuator[@ActuatorID='2']/AnalogIn/VoltsToPosSI") - self.xmlPot = xpath_search_results[0] - print("Potentiometer offset for joint 2 is currently: {:s}".format(self.xmlPot.get("Offset"))) - - self.arm = dvrk.psm(arm_name = robot_name, - expected_interval = expected_interval) - - # homing example - def home(self): - print('Enabling...') - if not self.arm.enable(10): - sys.exit('failed to enable within 10 seconds') - print('Homing...') - if not self.arm.home(10): - sys.exit('failed to home within 10 seconds') - # get current joints just to set size - print('Moving to zero position...') - goal = numpy.copy(self.arm.setpoint_jp()) - goal.fill(0) - self.arm.move_jp(goal).wait() - self.arm.jaw.move_jp(numpy.array([0.0])).wait() - # identify depth for tool j5 using forward kinematics - local_query_cp = rospy.ServiceProxy(self.arm.namespace() + '/local/query_cp', cisst_msgs.srv.QueryForwardKinematics) - request = cisst_msgs.srv.QueryForwardKinematicsRequest() - request.jp.position = [0.0, 0.0, 0.0, 0.0] - response = local_query_cp(request) - self.q2 = response.cp.pose.position.z - print("Depth required to position O5 on RCM point: {0:4.2f}mm".format(self.q2 * 1000.0)) - - # find range - def find_range(self): - print('Finding range of motion for joint 0\nMove the arm manually (pressing the clutch) to find the maximum range of motion for the first joint (left to right motion).\n - press "d" when you\'re done\n - press "q" to abort\n') - self.min = math.radians( 180.0) - self.max = math.radians(-180.0) - done = False - increment = numpy.copy(self.arm.setpoint_jp()) - increment.fill(0) - - # termios settings - old_settings = termios.tcgetattr(sys.stdin) - try: - tty.setcbreak(sys.stdin.fileno()) - while not done: - # process key - if is_there_a_key_press(): - c = sys.stdin.read(1) - if c == 'd': - done = True - elif c == 'q': - sys.exit('... calibration aborted by user') - # get measured joint values - p = self.arm.measured_jp() - if p[0] > self.max: - self.max = p[0] - elif p[0] < self.min: - self.min = p[0] - # display current range - sys.stdout.write('\rRange[%02.2f, %02.2f]' % (math.degrees(self.min), math.degrees(self.max))) - sys.stdout.flush() - # sleep - rospy.sleep(self.expected_interval) - finally: - termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings) - print('') - - # direct joint control example - def calibrate_third_joint(self): - print('\nAdjusting translation offset\nPress the keys "+" (or "=") and "-" or ("_") to adjust the depth until the axis 5 is mostly immobile (one can use a camera to look at the point)\n - press "d" when you\'re done\n - press "q" to abort\n') - # move to max position as starting point - initial_joint_position = numpy.copy(self.arm.setpoint_jp()) - goal = numpy.copy(self.arm.setpoint_jp()) - goal.fill(0) - goal[0] = self.max - goal[2] = self.q2 # to start close to expected RCM - goal[3] = math.radians(90.0) # so axis is facing user - - self.arm.move_jp(goal).wait() - - # parameters to move back and forth - cos_ratio = (self.max - self.min) / 2.0 - - # termios settings - old_settings = termios.tcgetattr(sys.stdin) - correction = 0.0 - try: - tty.setcbreak(sys.stdin.fileno()) - start = rospy.Time.now() - done = False - while not done: - # process key - if is_there_a_key_press(): - c = sys.stdin.read(1) - if c == 'd': - done = True - elif c == 'q': - sys.exit('... calibration aborted by user') - elif c == '-' or c == '_': - correction = correction - 0.0001 - elif c == '+' or c == '=': - correction = correction + 0.0001 - # move back and forth - dt = rospy.Time.now() - start - t = dt.to_sec() / 2.0 - goal[0] = self.max + cos_ratio * (math.cos(t) - 1.0) - goal[2] = self.q2 + correction - self.arm.servo_jp(goal) - # display current offset - sys.stdout.write('\rCorrection = %02.2f mm' % (correction * 1000.0)) - sys.stdout.flush() - # sleep - rospy.sleep(self.expected_interval) - finally: - termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings) - print('') - - # now save the new offset - oldOffset = float(self.xmlPot.get("Offset")) / 1000.0 # convert from XML (mm) to m - newOffset = oldOffset - correction # add in meters - self.xmlPot.set("Offset", str(newOffset * 1000.0)) # convert from m to XML (mm) - self.tree.write(self.config_file + "-new") - print('Old offset: {:2.2f}mm\nNew offset: {:2.2f}mm\n'.format(oldOffset * 1000.0, newOffset * 1000.0)) - print('Results saved in {:s}-new. Restart your dVRK application with the new file and make sure you re-bias the potentiometer offsets! To be safe, power off and on the dVRK PSM controller.'.format(self.config_file)) - print('To copy the new file over the existing one: cp {:s}-new {:s}'.format(self.config_file, self.config_file)) - - - # main method - def run(self): - self.home() - self.find_range() - self.calibrate_third_joint() - -if __name__ == '__main__': - # ros init node so we can use default ros arguments (e.g. __ns:= for namespace) - rospy.init_node('dvrk_arm_test', anonymous=True) - # strip ros arguments - argv = rospy.myargv(argv=sys.argv) - - # parse arguments - parser = argparse.ArgumentParser() - parser.add_argument('-a', '--arm', type=str, required=True, - choices=['PSM1', 'PSM2', 'PSM3'], - help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace') - parser.add_argument('-i', '--interval', type=float, default=0.01, - help = 'expected interval in seconds between messages sent by the device') - parser.add_argument('-c', '--config', type=str, required=True, - help = 'arm IO config file, i.e. something like sawRobotIO1394-xwz-12345.xml') - args = parser.parse_args(argv[1:]) # skip argv[0], script name - - print ('\nThis program can be used to improve the potentiometer offset for the third joint ' - 'of the PSM arm (translation stage). The goal is increase the absolute accuracy of the PSM.\n' - 'The main idea is to position a known point on the tool where the RCM should be. ' - 'If the calibration is correct, the point shouldn\'t move while the arm is rocking from left to right. ' - 'For this application we\'re going to use the axis of the first joint of the wrist, i.e. the first joint ' - 'at the end of the tool shaft. To perform this calibration you need to remove the canulla otherwise you won\'t see' - ' the RCM point. One simple way to track the motion is to use a camera and place the cursor where the axis is.\n\n' - 'You must first home your PSM and make sure a tool is engaged. ' - 'Once this is done, there are two steps:\n' - ' -1- find a safe range of motion for the rocking movement\n' - ' -2- adjust the depth so that the first hinge on the tool wrist is as close as possible to the RCM.\n\n') +import os - application = example_application() - application.configure(args.arm, args.config, args.interval) - application.run() +print('This program has been moved to the dvrk_python package.\nYou should use: rosrun dvrk_python {}'.format(os.path.basename(sys.argv[0]))) diff --git a/dvrk_robot/scripts/dvrk_calibrate_potentiometer_psm_cv.py b/dvrk_robot/scripts/dvrk_calibrate_potentiometer_psm_cv.py new file mode 100755 index 00000000..2e22e633 --- /dev/null +++ b/dvrk_robot/scripts/dvrk_calibrate_potentiometer_psm_cv.py @@ -0,0 +1,6 @@ +#!/usr/bin/env python + +import sys +import os + +print('This program has been moved to the dvrk_python package.\nYou should use: rosrun dvrk_python {}'.format(os.path.basename(sys.argv[0]))) diff --git a/dvrk_robot/scripts/dvrk_calibrate_potentiometers.py b/dvrk_robot/scripts/dvrk_calibrate_potentiometers.py index a9a3bee0..2e22e633 100755 --- a/dvrk_robot/scripts/dvrk_calibrate_potentiometers.py +++ b/dvrk_robot/scripts/dvrk_calibrate_potentiometers.py @@ -1,339 +1,6 @@ #!/usr/bin/env python -# Authors: Nick Eusman, Anton Deguet -# Date: 2015-09-24 -# Copyright JHU 2015-2021 - -import time -import rospy -import math import sys -import csv -import datetime -import numpy -import argparse -import os.path - -from sensor_msgs.msg import JointState -import dvrk -import xml.etree.ElementTree as ET - - -if sys.version_info.major < 3: - input = raw_input - - -def slope(x, y): - a = [] - for i in range(len(x)): - a.append(x[i] * y[i]) - sum_a = sum(a) - final_a = (sum_a * len(x)) - final_b = (sum(x) * sum(y)) - - c_squared = [] - for i in x: - c_squared.append(i**2) - - c_sum = sum(c_squared) - final_c = (c_sum * len(x)) - final_d = (sum(x)**2) - - result = (final_a - final_b) / (final_c - final_d) - return result - -class potentiometer_calibration: - - def __init__(self, robot_name): - self._robot_name = robot_name - self._serial_number = "" - self._data_received = False # use pots to make sure the ROS topics are OK - self._last_potentiometers = [] - self._last_joints = [] - ros_namespace = self._robot_name - rospy.Subscriber(ros_namespace + '/io/analog_input_pos_si', JointState, self.pot_callback) - rospy.Subscriber(ros_namespace + '/io/joint_measured_js', JointState, self.joints_callback) - - def pot_callback(self, data): - self._last_potentiometers[:] = data.position - self._data_received = True - - def joints_callback(self, data): - self._data_received = True - self._last_joints[:] = data.position - - - def run(self, calibrate, filename): - nb_joint_positions = 20 # number of positions between limits - nb_samples_per_position = 500 # number of values collected at each position - total_samples = nb_joint_positions * nb_samples_per_position - samples_so_far = 0 - - sleep_time_after_motion = 0.5 # time after motion from position to position to allow potentiometers to stabilize - sleep_time_between_samples = 0.01 # time between two samples read (potentiometers) - - encoders = [] - potentiometers = [] - range_of_motion_joint = [] - - average_encoder = [] # all encoder values collected at a sample position used for averaging the values of that position - average_potentiometer = [] # all potentiometer values collected at a sample position used for averaging the values of that position - d2r = math.pi / 180.0 # degrees to radians - r2d = 180.0 / math.pi # radians to degrees - - slopes = [] - offsets = [] - average_offsets = [] - - # Looking in XML assuming following tree structure - # config > Robot> Actuator > AnalogIn > VoltsToPosSI > Scale = ____ or Offset = ____ - xmlVoltsToPosSI = {} - - # Make sure file exists - if not os.path.exists(filename): - sys.exit("Config file \"%s\" not found" % filename) - - tree = ET.parse(filename) - root = tree.getroot() - - # Find Robot in config file and make sure name matches - xpath_search_results = root.findall("./Robot") - if len(xpath_search_results) == 1: - xmlRobot = xpath_search_results[0] - else: - sys.exit("Can't find \"Robot\" in configuration file") - - if xmlRobot.get("Name") == self._robot_name: - self._serial_number = xmlRobot.get("SN") - print("Successfully found robot \"%s\", serial number %s in XML file" % (self._robot_name, self._serial_number)) - robotFound = True - else: - sys.exit("Found robot \"%s\" while looking for \"%s\", make sure you're using the correct configuration file!" % (xmlRobot.get("Name"), self._robot_name)) - - # Look for all actuators/VoltsToPosSI - xpath_search_results = root.findall("./Robot/Actuator") - for actuator in xpath_search_results: - actuatorId = int(actuator.get("ActuatorID")) - voltsToPosSI = actuator.find("./AnalogIn/VoltsToPosSI") - xmlVoltsToPosSI[actuatorId] = voltsToPosSI - - # set joint limits and number of axis based on arm type, using robot name - if ("").join(list(self._robot_name)[:-1]) == "PSM": #checks to see if the robot being tested is a PSM - arm_type = "PSM" - lower_joint_limits = [-60.0 * d2r, -30.0 * d2r, 0.005, -170.0 * d2r, -170.0 * d2r, -170.0 * d2r, -170.0 * d2r] - upper_joint_limits = [ 60.0 * d2r, 30.0 * d2r, 0.235, 170.0 * d2r, 170.0 * d2r, 170.0 * d2r, 170.0 * d2r] - nb_axis = 7 - elif self._robot_name == "MTML": - arm_type = "MTM" - lower_joint_limits = [-15.0 * d2r, -10.0 * d2r, -10.0 * d2r, -180.0 * d2r, -80.0 * d2r, -40.0 * d2r, -100.0 * d2r] - upper_joint_limits = [ 35.0 * d2r, 20.0 * d2r, 10.0 * d2r, 80.0 * d2r, 160.0 * d2r, 40.0 * d2r, 100.0 * d2r] - nb_axis = 7 - elif self._robot_name == "MTMR": - arm_type = "MTM" - lower_joint_limits = [-30.0 * d2r, -10.0 * d2r, -10.0 * d2r, -80.0 * d2r, -80.0 * d2r, -40.0 * d2r, -100.0 * d2r] - upper_joint_limits = [ 15.0 * d2r, 20.0 * d2r, 10.0 * d2r, 180.0 * d2r, 160.0 * d2r, 40.0 * d2r, 100.0 * d2r] - nb_axis = 7 - elif self._robot_name == "ECM": - arm_type = "ECM" - lower_joint_limits = [-60.0 * d2r, -40.0 * d2r, 0.005, -80.0 * d2r] - upper_joint_limits = [ 60.0 * d2r, 40.0 * d2r, 0.230, 80.0 * d2r] - nb_axis = 4 - - # Create the dVRK python ROS client - this_arm = dvrk.arm(self._robot_name) - - # resize all arrays - for axis in range(nb_axis): - encoders.append([]) - offsets.append([]) - potentiometers.append([]) - average_encoder.append([]) - average_offsets.append([]) - average_potentiometer.append([]) - range_of_motion_joint.append(math.fabs(upper_joint_limits[axis] - lower_joint_limits[axis])) - - # Check that everything is working - time.sleep(2.0) # to make sure some data has arrived - if not self._data_received: - print("It seems the console for %s is not started or is not publishing the IO topics" % self._robot_name) - print("Make sure you use \"rosrun dvrk_robot dvrk_console_json\" with the -i option") - sys.exit("Start the dvrk_console_json with the proper options first") - - print("The serial number found in the XML file is: %s" % self._serial_number) - print("Make sure the dvrk_console_json is using the same configuration file. Serial number can be found in GUI tab \"IO\".") - ok = input("Press `c` and [enter] to continue\n") - if ok != "c": - sys.exit("Quitting") - - ######## scale calibration - now = datetime.datetime.now() - now_string = now.strftime("%Y-%m-%d-%H:%M") - - if calibrate == "scales": - - print("Calibrating scales using encoders as reference") - - # write all values to csv file - csv_file_name = 'pot_calib_scales_' + self._robot_name + '-' + self._serial_number + '-' + now_string + '.csv' - print("Values will be saved in: %s" % csv_file_name) - f = open(csv_file_name, 'w') - writer = csv.writer(f) - header = [] - for axis in range(nb_axis): - header.append("potentiometer" + str(axis)) - for axis in range(nb_axis): - header.append("encoder" + str(axis)) - writer.writerow(header) - - # messages - input("To start with some initial values, you first need to \"home\" the robot. When homed, press [enter]\n") - - if arm_type == "PSM": - input("Since you are calibrating a PSM, make sure there is no tool inserted. Please remove tool or calibration plate if any and press [enter]\n") - if arm_type == "ECM": - input("Since you are calibrating an ECM, remove the endoscope and press [enter]\n") - input("The robot will make LARGE MOVEMENTS, please hit [enter] to continue once it is safe to proceed\n") - - for position in range(nb_joint_positions): - # create joint goal - joint_goal = [] - for axis in range(nb_axis): - joint_goal.append(lower_joint_limits[axis] + position * (range_of_motion_joint[axis] / nb_joint_positions)) - average_encoder[axis] = [] - average_potentiometer[axis] = [] - - # move and sleep - this_arm.move_jp(numpy.array(joint_goal)) - time.sleep(sleep_time_after_motion) - - # collect nb_samples_per_position at current position to compute average - for sample in range(nb_samples_per_position): - for axis in range(nb_axis): - average_potentiometer[axis].append(self._last_potentiometers[axis]) - average_encoder[axis].append(self._last_joints[axis]) - # log data - writer.writerow(self._last_potentiometers + self._last_joints) - time.sleep(sleep_time_between_samples) - samples_so_far = samples_so_far + 1 - sys.stdout.write('\rProgress %02.1f%%' % (float(samples_so_far) / float(total_samples) * 100.0)) - sys.stdout.flush() - - # compute averages - for axis in range(nb_axis): - potentiometers[axis].append(math.fsum(average_potentiometer[axis]) / nb_samples_per_position) - encoders[axis].append(math.fsum(average_encoder[axis]) / nb_samples_per_position) - - - # at the end, return to home position - if arm_type == "PSM" or arm_type == "MTM": - this_arm.move_jp(numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])) - elif arm_type == "ECM": - this_arm.move_jp(numpy.array([0.0, 0.0, 0.0, 0.0])) - - # close file - f.close() - - - ######## offset calibration - if calibrate == "offsets": - - print("Calibrating offsets") - - # write all values to csv file - csv_file_name = 'pot_calib_offsets_' + self._robot_name + '-' + self._serial_number + '-' + now_string + '.csv' - print("Values will be saved in: ", csv_file_name) - f = open(csv_file_name, 'w') - writer = csv.writer(f) - header = [] - for axis in range(nb_axis): - header.append("potentiometer" + str(axis)) - writer.writerow(header) - - # messages - print("Please home AND power off the robot first. Then hold/clamp your arm in zero position.") - if arm_type == "PSM": - print("For a PSM, you need to hold at least the last 4 joints in zero position. If you don't have a way to constrain the first 3 joints, you can still just calibrate the last 4. This program will ask you later if you want to save all PSM joint offsets"); - input("Press [enter] to continue\n") - nb_samples = 2 * nb_samples_per_position - for sample in range(nb_samples): - for axis in range(nb_axis): - average_offsets[axis].append(self._last_potentiometers[axis] * r2d) - writer.writerow(self._last_potentiometers) - time.sleep(sleep_time_between_samples) - sys.stdout.write('\rProgress %02.1f%%' % (float(sample) / float(nb_samples) * 100.0)) - sys.stdout.flush() - for axis in range(nb_axis): - offsets[axis] = (math.fsum(average_offsets[axis]) / (nb_samples) ) - - print("") - - - if calibrate == "scales": - print("index | old scale | new scale | correction") - for index in range(nb_axis): - # find existing values - oldScale = float(xmlVoltsToPosSI[index].get("Scale")) - # compute new values - correction = slope(encoders[index], potentiometers[index]) - newScale = oldScale / correction - - # display - print(" %d | % 04.6f | % 04.6f | % 04.6f" % (index, oldScale, newScale, correction)) - # replace values - xmlVoltsToPosSI[index].set("Scale", str(newScale)) - - if calibrate == "offsets": - newOffsets = [] - print("index | old offset | new offset | correction") - for index in range(nb_axis): - # find existing values - oldOffset = float(xmlVoltsToPosSI[index].get("Offset")) - # compute new values - newOffsets.append(oldOffset - offsets[index]) - - # display - print(" %d | % 04.6f | % 04.6f | % 04.6f " % (index, oldOffset, newOffsets[index], offsets[index])) - - if arm_type == "PSM": - all = input("Do you want to save all joint offsets or just the last 4, press 'a' followed by [enter] to save all\n"); - if all == "a": - print("This program will save ALL new PSM offsets") - for axis in range(nb_axis): - # replace values - xmlVoltsToPosSI[axis].set("Offset", str(newOffsets[axis])) - else: - print("This program will only save the last 4 PSM offsets") - for axis in range(3, nb_axis): - # replace values - xmlVoltsToPosSI[axis].set("Offset", str(newOffsets[axis])) - else: - for axis in range(nb_axis): - # replace values - xmlVoltsToPosSI[axis].set("Offset", str(newOffsets[axis])) - - save = input("To save this in new file press 'y' followed by [enter]\n") - if save == "y": - tree.write(filename + "-new") - print('Results saved in %s-new. Restart your dVRK application with the new file!' % filename) - print('To copy the new file over the existing one: cp %s-new %s' % (filename, filename)) - -if __name__ == '__main__': - # ros init node so we can use default ros arguments (e.g. __ns:= for namespace) - rospy.init_node('dvrk_calibrate_potentiometer') - # strip ros arguments - argv = rospy.myargv(argv=sys.argv) - - # parse arguments - parser = argparse.ArgumentParser() - parser.add_argument('-t', '--type', type=str, required=True, - choices=['scales', 'offsets'], - help = 'use either "scales" or "offsets"') - parser.add_argument('-a', '--arm', type=str, required=True, - choices=['ECM', 'MTML', 'MTMR', 'PSM1', 'PSM2', 'PSM3'], - help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace') - parser.add_argument('-c', '--config', type=str, required=True, - help = 'arm IO config file, i.e. something like sawRobotIO1394-xwz-12345.xml') - args = parser.parse_args(argv[1:]) # skip argv[0], script name +import os - calibration = potentiometer_calibration(args.arm) - calibration.run(args.type, args.config) +print('This program has been moved to the dvrk_python package.\nYou should use: rosrun dvrk_python {}'.format(os.path.basename(sys.argv[0]))) diff --git a/dvrk_robot/src/dvrk_console.cpp b/dvrk_robot/src/dvrk_console.cpp index b66cde49..c779e55c 100644 --- a/dvrk_robot/src/dvrk_console.cpp +++ b/dvrk_robot/src/dvrk_console.cpp @@ -5,7 +5,7 @@ Author(s): Anton Deguet Created on: 2015-07-18 - (C) Copyright 2015-2021 Johns Hopkins University (JHU), All Rights Reserved. + (C) Copyright 2015-2023 Johns Hopkins University (JHU), All Rights Reserved. --- begin cisst license - do not edit --- @@ -21,6 +21,7 @@ no warranty. The complete license can be found in license.txt and #include #include +#include #include #include @@ -37,7 +38,9 @@ dvrk::console::console(const std::string & name, const double & publish_rate_in_seconds, const double & tf_rate_in_seconds, mtsIntuitiveResearchKitConsole * mts_console): - mts_ros_crtk_bridge(name, node_handle), + mts_ros_crtk_bridge_provided(name, node_handle), + m_publish_rate(publish_rate_in_seconds), + m_tf_rate(tf_rate_in_seconds), m_console(mts_console) { // start creating components @@ -45,7 +48,7 @@ dvrk::console::console(const std::string & name, // create all ROS bridges std::string m_bridge_name = "dvrk_ros" + node_handle->getNamespace(); - clean_namespace(m_bridge_name); + cisst_ros_crtk::clean_namespace(m_bridge_name); // shared publish bridge m_pub_bridge = new mtsROSBridge(m_bridge_name, publish_rate_in_seconds, node_handle); @@ -61,75 +64,58 @@ dvrk::console::console(const std::string & name, } // arm topics - const mtsIntuitiveResearchKitConsole::ArmList::iterator - armEnd = m_console->mArms.end(); - mtsIntuitiveResearchKitConsole::ArmList::iterator armIter; - for (armIter = m_console->mArms.begin(); - armIter != armEnd; - ++armIter) { - if (!armIter->second->m_skip_ROS_bridge) { - const std::string name = armIter->first; - switch (armIter->second->m_type) { - case mtsIntuitiveResearchKitConsole::Arm::ARM_MTM: - case mtsIntuitiveResearchKitConsole::Arm::ARM_MTM_DERIVED: - // custom dVRK + for (auto armPair : m_console->mArms) { + auto arm = *(armPair.second); + if (!arm.m_skip_ROS_bridge) { + const std::string name = armPair.first; + if (arm.native_or_derived_mtm()) { bridge_interface_provided_mtm(name, "Arm", publish_rate_in_seconds, tf_rate_in_seconds); - break; - case mtsIntuitiveResearchKitConsole::Arm::ARM_ECM: - case mtsIntuitiveResearchKitConsole::Arm::ARM_ECM_DERIVED: - // custom dVRK + } else if (arm.native_or_derived_ecm()) { bridge_interface_provided_ecm(name, "Arm", publish_rate_in_seconds, tf_rate_in_seconds); - if (armIter->second->m_simulation + if (arm.m_simulation == mtsIntuitiveResearchKitConsole::Arm::SIMULATION_NONE) { - add_topics_ecm_io(name, armIter->second->m_IO_component_name); + add_topics_ecm_io(name, arm.m_IO_component_name); } - break; - case mtsIntuitiveResearchKitConsole::Arm::ARM_PSM: - case mtsIntuitiveResearchKitConsole::Arm::ARM_PSM_DERIVED: - // custom dVRK + } else if (arm.native_or_derived_psm()) { bridge_interface_provided_psm(name, "Arm", publish_rate_in_seconds, tf_rate_in_seconds); - if (armIter->second->m_simulation + if (arm.m_simulation == mtsIntuitiveResearchKitConsole::Arm::SIMULATION_NONE) { - add_topics_psm_io(name, armIter->second->m_IO_component_name); + add_topics_psm_io(name, arm.m_IO_component_name); } - break; - case mtsIntuitiveResearchKitConsole::Arm::ARM_MTM_GENERIC: - case mtsIntuitiveResearchKitConsole::Arm::ARM_PSM_GENERIC: - case mtsIntuitiveResearchKitConsole::Arm::ARM_ECM_GENERIC: - // standard CRTK - bridge_interface_provided(armIter->second->ComponentName(), - armIter->second->InterfaceName(), + } else if (arm.generic()) { + bridge_interface_provided(arm.ComponentName(), + arm.InterfaceName(), publish_rate_in_seconds, tf_rate_in_seconds); - break; - case mtsIntuitiveResearchKitConsole::Arm::ARM_SUJ: - { - const auto _sujs = std::list({"PSM1", "PSM2", "PSM3", "ECM"}); - for (auto const & _suj : _sujs) { - bridge_interface_provided(name, - _suj, - "SUJ/" + _suj, - publish_rate_in_seconds, - tf_rate_in_seconds); - } + } else if (arm.suj()) { + const auto _sujs = std::list({"PSM1", "PSM2", "PSM3", "ECM"}); + for (auto const & _suj : _sujs) { + bridge_interface_provided(name, + _suj, + "SUJ/" + _suj, + publish_rate_in_seconds, + tf_rate_in_seconds); } - break; - default: - break; } } } // ECM teleop if (m_console->mTeleopECM) { - add_topics_teleop_ecm(m_console->mTeleopECM->Name()); + const auto teleopName = m_console->mTeleopECM->Name(); + bridge_interface_provided(teleopName, "Setting", teleopName, + publish_rate_in_seconds, tf_rate_in_seconds); + add_topics_teleop_ecm(teleopName); } // PSM teleops for (auto const & teleop : m_console->mTeleopsPSM) { - add_topics_teleop_psm(teleop.first); + const auto teleopName = teleop.first; + bridge_interface_provided(teleopName, "Setting", teleopName, + publish_rate_in_seconds, tf_rate_in_seconds); + add_topics_teleop_psm(teleopName); } // Endoscope focus @@ -139,14 +125,9 @@ dvrk::console::console(const std::string & name, // digital inputs const std::string footPedalsNameSpace = "footpedals/"; - typedef mtsIntuitiveResearchKitConsole::DInputSourceType DInputSourceType; - const DInputSourceType::const_iterator inputsEnd = m_console->mDInputSources.end(); - DInputSourceType::const_iterator inputsIter; - for (inputsIter = m_console->mDInputSources.begin(); - inputsIter != inputsEnd; - ++inputsIter) { - std::string upperName = inputsIter->second.second; - std::string lowerName = inputsIter->first; + for (auto input : m_console->mDInputSources) { + std::string upperName = input.second.second; + std::string lowerName = input.first; std::string requiredInterfaceName = upperName + "_" + lowerName; // put everything lower case std::transform(lowerName.begin(), lowerName.end(), lowerName.begin(), tolower); @@ -157,7 +138,7 @@ dvrk::console::console(const std::string & name, (requiredInterfaceName, "Button", footPedalsNameSpace + lowerName); componentManager->Connect(events_bridge().GetName(), requiredInterfaceName, - inputsIter->second.first, inputsIter->second.second); + input.second.first, input.second.second); } @@ -216,7 +197,7 @@ void dvrk::console::bridge_interface_provided_arm(const std::string & _arm_name, // bridged (e.g. subscribers and events) const std::string _required_interface_name = _arm_name + "_using_" + _interface_name; - subscribers_bridge().AddSubscriberToCommandWrite + subscribers_bridge().AddSubscriberToCommandWrite (_required_interface_name, "set_base_frame", _arm_name + "/set_base_frame"); subscribers_bridge().AddSubscriberToCommandWrite @@ -231,9 +212,10 @@ void dvrk::console::bridge_interface_provided_arm(const std::string & _arm_name, subscribers_bridge().AddSubscriberToCommandWrite (_required_interface_name, "body/set_cf_orientation_absolute", _arm_name + "/body/set_cf_orientation_absolute"); - subscribers_bridge().AddSubscriberToCommandWrite - (_required_interface_name, "set_cartesian_impedance_gains", - _arm_name + "/set_cartesian_impedance_gains"); + subscribers_bridge().AddServiceFromCommandQualifiedRead + (_required_interface_name, "actuator_to_joint_position", + _arm_name + "/actuator_to_joint_position"); events_bridge().AddPublisherFromEventWrite (_required_interface_name, "desired_state", @@ -331,11 +313,11 @@ void dvrk::console::bridge_interface_provided_psm(const std::string & _arm_name, const std::string _required_interface_name = _arm_name + "_using_" + _interface_name; subscribers_bridge().AddSubscriberToCommandWrite - (_required_interface_name, "set_adapter_present", - _arm_name + "/set_adapter_present"); + (_required_interface_name, "emulate_adapter_present", + _arm_name + "/emulate_adapter_present"); subscribers_bridge().AddSubscriberToCommandWrite - (_required_interface_name, "set_tool_present", - _arm_name + "/set_tool_present"); + (_required_interface_name, "emulate_tool_present", + _arm_name + "/emulate_tool_present"); subscribers_bridge().AddSubscriberToCommandWrite (_required_interface_name, "set_tool_type", _arm_name + "/set_tool_type"); @@ -486,6 +468,21 @@ void dvrk::console::add_topics_io(void) m_console->m_IO_component_name, "Configuration"); } +void dvrk::console::add_topics_pid(void) +{ + for (auto armPair : m_console->mArms) { + const auto name = armPair.first; + const auto arm = *(armPair.second); + if (arm.expects_PID()) { + const auto pid_component_name = name + "-PID"; + bridge_interface_provided(pid_component_name, + "Monitoring", + name + "/pid", + m_publish_rate, m_tf_rate); + } + } +} + void dvrk::console::add_topics_arm_io(mtsROSBridge * _pub_bridge, const std::string & _arm_name, const std::string & _io_component_name) @@ -493,20 +490,26 @@ void dvrk::console::add_topics_arm_io(mtsROSBridge * _pub_bridge, const std::string _ros_namespace = _arm_name + "/io/"; const std::string _interface_name = _arm_name + "-io"; _pub_bridge->AddPublisherFromCommandRead - (_interface_name, "GetAnalogInputPosSI", - _ros_namespace + "/analog_input_pos_si"); + (_interface_name, "pot/measured_js", + _ros_namespace + "pot/measured_js"); _pub_bridge->AddPublisherFromCommandRead (_interface_name, "measured_js", - _ros_namespace + "/joint_measured_js"); + _ros_namespace + "actuator/measured_js"); + _pub_bridge->AddPublisherFromCommandRead + (_interface_name, "software/measured_js", + _ros_namespace + "software/measured_js"); _pub_bridge->AddPublisherFromCommandRead - (_interface_name, "actuator_measured_js", - _ros_namespace + "/actuator_measured_js"); + (_interface_name, "firmware/measured_js", + _ros_namespace + "firmware/measured_js"); _pub_bridge->AddPublisherFromCommandRead (_interface_name, "GetActuatorFeedbackCurrent", - _ros_namespace + "/actuator_measured_current"); + _ros_namespace + "actuator/measured_current"); _pub_bridge->AddPublisherFromCommandRead (_interface_name, "GetActuatorRequestedCurrent", - _ros_namespace + "/actuator_servo_current"); + _ros_namespace + "actuator/servo_current"); + _pub_bridge->AddPublisherFromCommandRead + (_interface_name, "GetActuatorTimestamp", + _ros_namespace + "timestamp"); m_connections.Add(_pub_bridge->GetName(), _interface_name, _io_component_name, _arm_name); @@ -519,12 +522,10 @@ void dvrk::console::add_topics_ecm_io(const std::string & _arm_name, const auto events = std::list >({ {"ManipClutch", "manip_clutch"}, {"SUJClutch", "suj_clutch"}}); - for (auto event = events.begin(); - event != events.end(); - ++event) { - std::string _interface_name = _arm_name + "-" + event->first; + for (auto event : events) { + std::string _interface_name = _arm_name + "-" + event.first; events_bridge().AddPublisherFromEventWrite - (_interface_name, "Button", _arm_name + "/io/" + event->second); + (_interface_name, "Button", _arm_name + "/io/" + event.second); m_connections.Add(events_bridge().GetName(), _interface_name, _io_component_name, _interface_name); } @@ -539,12 +540,10 @@ void dvrk::console::add_topics_psm_io(const std::string & _arm_name, {"SUJClutch", "suj_clutch"}, {"Adapter", "adapter"}, {"Tool", "tool"}}); - for (auto event = events.begin(); - event != events.end(); - ++event) { - std::string _interface_name = _arm_name + "-" + event->first; + for (auto event : events) { + std::string _interface_name = _arm_name + "-" + event.first; events_bridge().AddPublisherFromEventWrite - (_interface_name, "Button", _arm_name + "/io/" + event->second); + (_interface_name, "Button", _arm_name + "/io/" + event.second); m_connections.Add(events_bridge().GetName(), _interface_name, _io_component_name, _interface_name); } @@ -553,7 +552,7 @@ void dvrk::console::add_topics_psm_io(const std::string & _arm_name, void dvrk::console::add_topics_teleop_ecm(const std::string & _name) { std::string _ros_namespace = _name; - clean_namespace(_ros_namespace); + cisst_ros_crtk::clean_namespace(_ros_namespace); // messages events_bridge().AddLogFromEventWrite(_name + "-log", "error", @@ -580,9 +579,6 @@ void dvrk::console::add_topics_teleop_ecm(const std::string & _name) _name, "Setting"); // commands - subscribers_bridge().AddSubscriberToCommandWrite - (_name, "state_command", - _ros_namespace + "/state_command"); subscribers_bridge().AddSubscriberToCommandWrite (_name, "set_scale", _ros_namespace + "/set_scale"); @@ -594,7 +590,7 @@ void dvrk::console::add_topics_teleop_ecm(const std::string & _name) void dvrk::console::add_topics_teleop_psm(const std::string & _name) { std::string _ros_namespace = _name; - clean_namespace(_ros_namespace); + cisst_ros_crtk::clean_namespace(_ros_namespace); // messages events_bridge().AddLogFromEventWrite(_name + "-log", "error", @@ -637,9 +633,6 @@ void dvrk::console::add_topics_teleop_psm(const std::string & _name) _name, "Setting"); // commands - subscribers_bridge().AddSubscriberToCommandWrite - (_name, "state_command", - _ros_namespace + "/state_command"); subscribers_bridge().AddSubscriberToCommandWrite (_name, "lock_translation", _ros_namespace + "/lock_translation"); @@ -652,9 +645,6 @@ void dvrk::console::add_topics_teleop_psm(const std::string & _name) subscribers_bridge().AddSubscriberToCommandWrite (_name, "set_align_mtm", _ros_namespace + "/set_align_mtm"); - subscribers_bridge().AddSubscriberToCommandWrite - (_name, "set_registration_rotation", - _ros_namespace + "/set_registration_rotation"); // connect m_connections.Add(subscribers_bridge().GetName(), _name, _name, "Setting"); diff --git a/dvrk_robot/src/dvrk_console_json.cpp b/dvrk_robot/src/dvrk_console_json.cpp index 99af0b13..9b161a7d 100644 --- a/dvrk_robot/src/dvrk_console_json.cpp +++ b/dvrk_robot/src/dvrk_console_json.cpp @@ -5,7 +5,7 @@ Author(s): Anton Deguet Created on: 2015-07-18 - (C) Copyright 2015-2021 Johns Hopkins University (JHU), All Rights Reserved. + (C) Copyright 2015-2023 Johns Hopkins University (JHU), All Rights Reserved. --- begin cisst license - do not edit --- @@ -25,7 +25,7 @@ no warranty. The complete license can be found in license.txt and #include #include #include -#include +#include #include #include #include @@ -43,16 +43,21 @@ no warranty. The complete license can be found in license.txt and #include #include -void fileExists(const std::string & description, const std::string & filename) +void fileExists(const std::string & description, std::string & filename, + mtsIntuitiveResearchKitConsole * console) { if (!cmnPath::Exists(filename)) { - std::cerr << "File not found: " << description - << "; " << filename << std::endl; - exit(-1); - } else { - std::cout << "File found: " << description - << "; " << filename << std::endl; + const std::string fileInPath = console->locate_file(filename); + if (fileInPath == "") { + std::cerr << "File not found: " << description + << ": " << filename << std::endl; + exit(-1); + } else { + filename = fileInPath; + } } + std::cout << "Using file: " << description + << ": " << filename << std::endl; } int main(int argc, char ** argv) @@ -61,17 +66,7 @@ int main(int argc, char ** argv) std::setlocale(LC_ALL, "C"); // log configuration - cmnLogger::SetMask(CMN_LOG_ALLOW_ALL); - cmnLogger::SetMaskDefaultLog(CMN_LOG_ALLOW_ALL); - cmnLogger::SetMaskFunction(CMN_LOG_ALLOW_ALL); - cmnLogger::SetMaskClassMatching("mtsIntuitiveResearchKit", CMN_LOG_ALLOW_ALL); - cmnLogger::AddChannel(std::cerr, CMN_LOG_ALLOW_ERRORS_AND_WARNINGS); - // add log file with date so logs don't get overwritten - std::string currentDateTime; - osaGetDateTimeString(currentDateTime); - std::ofstream logFileStream(std::string("cisstLog-" + currentDateTime + ".txt").c_str()); - cmnLogger::AddChannel(logFileStream); - cmnLogger::HaltDefaultLog(); // stop log to default cisstLog.txt + mtsIntuitiveResearchKit::Logger logger; // create ROS node handle ros::init(argc, argv, "dvrk", ros::init_options::AnonymousName); @@ -103,6 +98,9 @@ int main(int argc, char ** argv) "json config file to configure ROS bridges to collect low level data (IO)", cmnCommandLineOptions::OPTIONAL_OPTION, &jsonIOConfigFiles); + options.AddOptionNoValue("I", "pid-topics", + "add some extra publishers to monitor PID state"); + options.AddOptionNoValue("t", "text-only", "text only interface, do not create Qt widgets"); @@ -125,10 +123,7 @@ int main(int argc, char ** argv) "replaces the default Qt palette with darker colors"); // check that all required options have been provided - std::string errorMessage; - if (!options.Parse(argc, argv, errorMessage)) { - std::cerr << "Error: " << errorMessage << std::endl; - options.PrintUsage(std::cerr); + if (!options.Parse(argc, argv, std::cerr)) { return -1; } std::string arguments; @@ -143,7 +138,7 @@ int main(int argc, char ** argv) // console mtsIntuitiveResearchKitConsole * console = new mtsIntuitiveResearchKitConsole("console"); console->set_calibration_mode(options.IsSet("calibration-mode")); - fileExists("console JSON configuration file", jsonMainConfigFile); + fileExists("console JSON configuration file", jsonMainConfigFile, console); console->Configure(jsonMainConfigFile); componentManager->AddComponent(console); console->Connect(); @@ -174,7 +169,7 @@ int main(int argc, char ** argv) // configure data collection if needed if (options.IsSet("collection-config")) { // make sure the json config file exists - fileExists("JSON data collection configuration", jsonCollectionConfigFile); + fileExists("JSON data collection configuration", jsonCollectionConfigFile, console); mtsCollectorFactory * collectorFactory = new mtsCollectorFactory("collectors"); collectorFactory->Configure(jsonCollectionConfigFile); @@ -204,15 +199,19 @@ int main(int argc, char ** argv) publishPeriod, tfPeriod, console); // IOs - const std::list::const_iterator end = jsonIOConfigFiles.end(); - std::list::const_iterator iter; + const std::list::iterator end = jsonIOConfigFiles.end(); + std::list::iterator iter; for (iter = jsonIOConfigFiles.begin(); iter != end; iter++) { - fileExists("ROS IO JSON configuration file", *iter); + fileExists("ROS IO JSON configuration file", *iter, console); consoleROS->Configure(*iter); } + if (options.IsSet("pid-topics")) { + consoleROS->add_topics_pid(); + } + componentManager->AddComponent(consoleROS); consoleROS->Connect(); @@ -238,8 +237,7 @@ int main(int argc, char ** argv) componentManager->Cleanup(); // stop all logs - cmnLogger::Kill(); - cmnLogger::RemoveChannel(logFileStream); + logger.Stop(); // stop ROS node ros::shutdown(); diff --git a/dvrk_robot/video.md b/dvrk_robot/video.md index 33af3a30..0428f9da 100644 --- a/dvrk_robot/video.md +++ b/dvrk_robot/video.md @@ -1,234 +1 @@ -Video pipeline -============== - -This describes a fairly low cost setup that can be used with the dVRK HRSV display (High Resolution Stereo Video). We use a couple of cheap USB frame grabbers (Hauppage Live 2) for the analog videos from SD cameras. For HD systems, we tested a BlackMagic DeckLink Duo frame grabber with dual SDI inputs (see also the dVRK video pipeline page for the hardware setup: https://github.com/jhu-dvrk/sawIntuitiveResearchKit/wiki/Video-Pipeline). For displaying the video back, we just use a graphic card with two spare video outputs. The software relies heavily on ROS tools to grab and display the stereo video. Some lag is to be expected. - -The general steps are: - * Make sure the frame grabber works (e.g. using tvtime or vendor application) - * Figure out the gstreamer pipeline and test using `gst-launch-1.0` - * Create a lauch file for gscam with the gstreamer pipeline you just tested - -# Disclaimer - -This page is a collection of notes that might be helpful for the dVRK community but it is in no way exhaustive. If you need some help re. gstreamer and gscam, you should probably start searching online and/or reach out to the gstreamer and gscam developers. - -# Hardware - -## USB frame grabbers - -The frame grabbers we use most often are Hauppage USB Live 2: - * Manufacturer: http://www.hauppauge.com/site/products/data_usblive2.html - * Amazon, about $45: http://www.amazon.com/Hauppauge-610-USB-Live-Digitizer-Capture/dp/B0036VO2BI - -When you plug these in your PC, make sure both frame grabbers are on -different USB channels otherwise you won't have enough bandwidth to -capture both left and right videos. To check, use `lsusb`. The output should look like: -```sh -$> lsusb -Bus 004 Device 006: ID 0461:4e22 Primax Electronics, Ltd -Bus 004 Device 005: ID 413c:2107 Dell Computer Corp. -Bus 004 Device 004: ID 0424:2514 Standard Microsystems Corp. USB 2.0 Hub -Bus 004 Device 003: ID 2040:c200 Hauppauge -Bus 004 Device 002: ID 8087:0024 Intel Corp. Integrated Rate Matching Hub -Bus 004 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub -Bus 002 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hub -Bus 001 Device 002: ID 2040:c200 Hauppauge -Bus 001 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub -Bus 003 Device 002: ID 8087:0024 Intel Corp. Integrated Rate Matching Hub -Bus 003 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub -``` -In this example, the Hauppage frame grabbers are on bus `004` and `001`. - -To check if the frame grabbers are working, one can use `tvtime` -(available on most Linux distributions). The two frame grabbers -should appear in `/dev` with the prefix `video`. For example: - -```sh -$> ls /dev/video* -/dev/video0 /dev/video1 -``` - -The numbering (i.e. which frame grabber is `/dev/video0` and which one -is `/dev/video1`) depends on the order the grabbers are plugged in. -To have a consistent ordering, always plug the frame grabbers in the -same order, e.g. first the left channel and then the right channel. Alternatively, you can setup `udev` rules to automatically assign a device name for a specific frame grabber identified by serial number (see below). - -Some Linux distributions might restrict access to the video devices using the `video` group. To check, do: -```sh -ls -l /dev/video* -``` -If the result shows something like: -```sh -crw-rw----+ 1 root video 81, 0 Nov 14 11:47 /dev/video0 -``` -you will need to add your user id to the `video` group. Do not use `sudo tvtime`, it might work for `tvtime` but it's not going to work with `gscam`. You should fix the unix file permissions first and make sure you can access the video without `sudo`. - -To test each channel one after another: -```sh -tvtime -Ld /dev/video0 -``` -Then: -```sh -tvtime -Ld /dev/video1 -``` - -Once in `tvtime`, change the input to S-Video by pressing `i` key. If you see a black -image, it's possible that you don't have enough light in front of your -camera or endoscope. If you happen to use a real da Vinci endoscope -and CCUs (Camera Control Units), you can use the toggle switch -`CAM/BAR` to use the video test pattern -(https://en.wikipedia.org/wiki/SMPTE_color_bars). - -Using the color bar is also useful to test your video connections, -i.e. if your video is noisy or not visible, put the CCUs in bar mode. -If the video is still not working, the problem likely comes from your -S-video cables. If the color bars show correctly, the problem comes -from the cables to the endoscope or the endoscope itself. - -Once you have the video showing in tvtime, you need to figure out the gstreamer options. There is some information online and you can use `gst-inspect-1.0` (see more details in DeckLink Duo section). You can also use the command line tool `v4l2-ctl` to figure out the output format of your frame grabber. The option `-d0` is to specify `/dev/video0`, if you're using a different device, make sure the digit matches. Example of commands: -```sh -v4l2-ctl -d0 --get-fmt-video -v4l2-ctl -d0 --list-formats-ext -``` - -On Ubuntu 18.04, we found the following syntax seems to work with the USB Hauppage Live2: -```sh - gst-launch-1.0 v4l2src device=/dev/video0 ! video/x-raw,interlace-mode=interleaved ! autovideosink - ``` - -To setup a `udev` rule, you first need to find a way to uniquely identify each frame grabber. To start, plug just one frame grabber then do `ls /dev/video*`. Use the full path to identify each frame grabber (e.g. `/dev/video0`, `/dev/video1`...): -``` -udevadm info --attribute-walk /dev/video0 -``` -Scroll through the output to find the serial number: -``` - ATTR{manufacturer}=="Hauppauge" - ... - ATTR{serial}=="0011485772" -``` -Note that this info should correspond to the messages in `dmesg -w` when you plugged your frame grabber. Now we can create a `udev` rule to automatically assign the frame grabber to a specific `/dev/video` "device". You can write the rules in `/etc/udev/rules.d/90-hauppauge.rules` using sudo privileges, replace the serial numbers with yours, the following example is for a stereo system: -``` -SUBSYSTEM=="video4linux", ATTRS{manufacturer}=="Hauppauge", ATTRS{serial}=="0011367747", SYMLINK+="video-left" -SUBSYSTEM=="video4linux", ATTRS{manufacturer}=="Hauppauge", ATTRS{serial}=="0011485772", SYMLINK+="video-right" -``` -Save the file and then do `sudo udevadm control --reload-rules` to apply the rules. No need to reboot the computer, just unplug your frame grabber, wait a few seconds, replug it and then do `ls -l /dev/video*` to confirm that the rule worked. If this didn't work, these pages have some useful info for debugging `udev` and `video4linux` rules: https://linuxconfig.org/tutorial-on-how-to-write-basic-udev-rules-in-linux and https://unix.stackexchange.com/questions/424887/udev-rule-to-discern-2-identical-webcams-on-linux - -## Blackmagic DeckLink Duo frame grabber - -You first need to install the drivers from Blackmagic, see https://www.blackmagicdesign.com/support/family/capture-and-playback The drivers are included in the package "Desktop Video". Once you've downloaded the binaries and extracted the files from Blackmagic, follow the instructions on their ReadMe.txt. For 64 bits Ubuntu system, install the `.deb` files in subfolder `deb/x86_64`. If your card is old, the DeckLink install might ask to run the BlackMagic firmware updater, i.e. something like `BlackmagicFirmwareUpdater update 0`. After you reboot, check with `dmesg | grep -i black` to see if the card is recognized. If the driver is working properly, the devices will show up under `/dev/blackmagic`. - -To test if the drivers are working and the cards are working, use gstreamer 1.0 or greater. On Ubuntu 16.04/18.04 both gstreamer 1.0 and 0.1 are available. Make sure you ONLY install the 1.0 packages. You will also need the proper gstreamer plugins installed: -```sh - sudo apt install gstreamer1.0-plugins-base gstreamer1.0-plugins-good gstreamer1.0-plugins-bad -``` - -Once gstreamer is installed, you can use a few command lines to test the drivers: - * `gst-inspect-1.0 decklinkvideosrc` will show you the different parameters for the Decklink gstreamer plugin - * `gst-launch-1.0` can be used to launch the streamer and pipe it to see the video live on the computer. For example, we used `gst-launch-1.0 -v decklinkvideosrc mode=0 connection=sdi device-number=0 ! videoconvert ! autovideosink`. - * `mode=0` is for auto detection and is optional - * `connection=sdi` is to force to use an SDI input if your card has different types of inputs. This is optional. - * `device-number=0` is to select which input to use if you have multiple inputs - * On a Decklink Duo, we found that one can see the stereo video using two text terminals: - * `gst-launch-1.0 decklinkvideosrc device-number=0 ! videoconvert ! autovideosink` - * `gst-launch-1.0 decklinkvideosrc device-number=1 ! videoconvert ! autovideosink` - -# Software - -## gscam - -`gscam` is a ROS node using the `gstreamer` library. The gstreamer -library supports a few frame grabbers including the Hauppage one. The -gstreamer developement library can be installed using `apt-get install`. Make sure you install gstreamer 1.0, not 0.1. It is important to note that when you're installing `gscam`, the dependencies will also be installed and you might install the wrong version of `gstreamer` without realizing it. - -To figure out if the ROS provided gscam uses gstreamer 0.1 or 1.x, use the command line: -```sh -apt-cache showpkg ros-kinetic-gscam # or ros-lunar-gscam or whatever ROS version -``` - -Look at the output of the `apt-cache showpkg` command and search the "Dependencies" to find the gstreamer version used. - -As far as we know, ROS Kinetic on Ubuntu 16.04 uses the gstreamer 0.1 so you will have to manually compile `gscam` to use gstreamer 1.x. Melodic on 18.04 seems to use gstreamer 1.x so you should be able install using `apt`. - -### ROS Ubuntu packages - -Use `apt install` to install gscam on Ubuntu 18.04. The package name should be `ros-melodic-gscam`. It will install all the required dependencies for you. - -### Manual compilation - -If you need gstreamer 1.x and gscam is not built against it, you need to manually compile it. You will first need to install some development libraries: -```sh -sudo apt-get install gstreamer1.0-tools libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev libgstreamer-plugins-good1.0-dev -``` - -Then, assuming your catkin workspace is in `~/catkin` and you're using the Catkin Python build tools: - -```sh -cd ~/catkin_ws/src -git clone https://github.com/ros-drivers/gscam -catkin build -source ~/catkin_ws/devel/setup.bash -``` - -**Note:** See https://github.com/ros-drivers/gscam. As of March 2018, the readme on gscam/github are a bit confusing since they still indicate that gstreamer 1.x support is experimental but they provide instructions to compile with gstreamer 1.x. So, make sure you compile for 1.x version. - - -### Using gscam - -To start the `gscam` node, we provide a couple of ROS launch scripts. **Make sure the launch script has been updated to use a working gstreamer pipeline** (as descrided above using `gst-launch-1.01). The main difference is that your pipeline for gscam should end with `videoconvert` and you need to remove `autovideosink`. - -For a stereo system with the USB frame grabbers, use: -```sh -roslaunch dvrk_robot gscam_stereo.launch rig_name:=jhu_daVinci -``` -Where `jhu_daVinci` is a name you want to give to your camera rig. This name will be used to define the ROS namespace for all the data published. It is also used to define a directory to save the results of your camera calibration or load said camera calibration (i.e. `dvrk_robot/data/`). If you don't have a calibration for your rig, you can still render both video channels using the ROS topics: - * `/jhu_daVinci/left/image_raw` - * `/jhu_daVinci/right/image_raw` - -For a system with a Decklink Duo, the `gscam_config` in a launch script would look like: -```xml - -``` - -## (rqt_)image_view - -One can use the `image_view` node to visualize a single image: - -```sh -rosrun image_view image_view image:=/jhu_daVinci/right/image_raw -``` - -If you prefer GUI, you can use `rqt_image_view`, a simple program to -view the different camera topics. Pick the image to display using the -drop-down menu on the top left corner. - -## RViz - -Use RViz to display both channels at the same time. Add image, select -topic and then drop image to separate screen/eye on the HRSV display. -You can save your settings to everytime you start RViz you will have -both images. - -## Camera calibration - -```sh -roslaunch dvrk_robot gscam_stereo.launch rig_name:=jhu_daVinci rect:=false -``` - -To start the camera calibration: -``` -# ros camera calibration -# NOTE: checkerboard 11x10 square with = 5 mm -rosrun camera_calibration cameracalibrator.py --size 11x10 --square 0.005 right:=/jhu_daVinci/right/image_raw left:=/jhu_daVinci/left/image_raw right_camera:=/jhu_daVinci/right left_camera:=/jhu_daVinci/left --approximate=0.050 -``` - -Save results: - * manual save: calibration result is located /tmp/calibrationdata.tar.gz - * commit button to save (note camera_info_url should be correct) - * calibration result is saved as `ost.txt`, which is Videre INI format - * `gscam` requires calibration file with file extension `.ini` or `.yaml`. - * `gscam` only takes package://dvrk_robot/data/ - * from now on, use topics `image_rect_color` - -References: - * http://wiki.ros.org/camera_calibration - * http://wiki.ros.org/camera_calibration_parsers +This file has been moved to the dvrk_video package. See README.md in directory dvrk_video. \ No newline at end of file diff --git a/dvrk_ros.rosinstall b/dvrk_ros.rosinstall deleted file mode 100644 index 73575d79..00000000 --- a/dvrk_ros.rosinstall +++ /dev/null @@ -1,52 +0,0 @@ -- git: - local-name: cisst-saw/cisstNetlib - uri: https://github.com/jhu-cisst/cisstNetlib - version: master -- git: - local-name: cisst-saw/cisst - uri: https://github.com/jhu-cisst/cisst - version: 1.1.0 -- git: - local-name: cisst-saw/cisst-ros - uri: https://github.com/jhu-cisst/cisst-ros - version: 2.0.0 -- git: - local-name: cisst-saw/sawRobotIO1394 - uri: https://github.com/jhu-saw/sawRobotIO1394 - version: 2.1.0 -- git: - local-name: cisst-saw/sawControllers - uri: https://github.com/jhu-saw/sawControllers - version: 2.0.0 -- git: - local-name: cisst-saw/sawTextToSpeech - uri: https://github.com/jhu-saw/sawTextToSpeech - version: 1.3.0 -- git: - local-name: cisst-saw/sawKeyboard - uri: https://github.com/jhu-saw/sawKeyboard - version: 1.3.0 -- git: - local-name: crtk/crtk_msgs - uri: https://github.com/collaborative-robotics/crtk_msgs - version: 1.0.0 -- git: - local-name: crtk/crtk_python_client - uri: https://github.com/collaborative-robotics/crtk_python_client - version: 1.1.0 -- git: - local-name: crtk/crtk_matlab_client - uri: https://github.com/collaborative-robotics/crtk_matlab_client - version: 1.1.0 -- git: - local-name: cisst-saw/sawIntuitiveResearchKit - uri: https://github.com/jhu-dvrk/sawIntuitiveResearchKit - version: 2.1.0 -- git: - local-name: dvrk-ros - uri: https://github.com/jhu-dvrk/dvrk-ros - version: 2.1.0 -- git: - local-name: dvrk-gravity-compensation - uri: https://github.com/jhu-dvrk/dvrk-gravity-compensation - version: 2.0.0 diff --git a/dvrk_video/CMakeLists.txt b/dvrk_video/CMakeLists.txt new file mode 100644 index 00000000..39aff686 --- /dev/null +++ b/dvrk_video/CMakeLists.txt @@ -0,0 +1,9 @@ +cmake_minimum_required (VERSION 3.1) +project (dvrk_video VERSION 2.2.0) + +find_package ( + catkin REQUIRED + ) + +catkin_package ( + ) diff --git a/dvrk_video/README.md b/dvrk_video/README.md new file mode 100644 index 00000000..5b9e7674 --- /dev/null +++ b/dvrk_video/README.md @@ -0,0 +1,338 @@ +Video pipeline +============== + +This describes a fairly low cost setup that can be used with the dVRK +HRSV display (High Resolution Stereo Video). We use a couple of cheap +USB frame grabbers (Hauppage Live 2) for the analog videos from SD +cameras (640x480). For HD systems (720p and 1080p), we tested a +BlackMagic DeckLink Duo frame grabber with dual SDI inputs (see also +the dVRK video pipeline page for the hardware setup: +https://github.com/jhu-dvrk/sawIntuitiveResearchKit/wiki/Video-Pipeline). +For displaying the video back, we just use a graphic card with two +spare video outputs. The software relies heavily on ROS tools to grab +and display the stereo video. Some lag is to be expected. + +The general steps are: + * Make sure the frame grabber works (e.g. using tvtime or vendor application) + * Figure out the gstreamer pipeline and test using `gst-launch-1.0` + * Create a lauch file for gscam with the gstreamer pipeline you just tested + +# Disclaimer + +This page is a collection of notes that might be helpful for the dVRK +community but it is in no way exhaustive. If you need some help +re. gstreamer and gscam, you should probably start searching online +and/or reach out to the gstreamer and gscam developers. + +# Hardware + +## USB frame grabbers + +The frame grabbers we use most often for SD endoscopes are Hauppage USB Live 2: + * Manufacturer: http://www.hauppauge.com/site/products/data_usblive2.html + * Amazon, about $45: http://www.amazon.com/Hauppauge-610-USB-Live-Digitizer-Capture/dp/B0036VO2BI + +When you plug these in your PC, make sure both frame grabbers are on +different USB channels otherwise you won't have enough bandwidth to +capture both left and right videos. To check, use `lsusb`. The output should look like: +```sh +$> lsusb +Bus 004 Device 006: ID 0461:4e22 Primax Electronics, Ltd +Bus 004 Device 005: ID 413c:2107 Dell Computer Corp. +Bus 004 Device 004: ID 0424:2514 Standard Microsystems Corp. USB 2.0 Hub +Bus 004 Device 003: ID 2040:c200 Hauppauge +Bus 004 Device 002: ID 8087:0024 Intel Corp. Integrated Rate Matching Hub +Bus 004 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub +Bus 002 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hub +Bus 001 Device 002: ID 2040:c200 Hauppauge +Bus 001 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub +Bus 003 Device 002: ID 8087:0024 Intel Corp. Integrated Rate Matching Hub +Bus 003 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub +``` +In this example, the Hauppage frame grabbers are on bus `004` and `001`. + +To check if the frame grabbers are working, one can use `tvtime` +(available on most Linux distributions). The two frame grabbers +should appear in `/dev` with the prefix `video`. For example: + +```sh +$> ls /dev/video* +/dev/video0 /dev/video1 +``` + +The numbering (i.e. which frame grabber is `/dev/video0` and which one +is `/dev/video1`) depends on the order the grabbers are plugged in. +To have a consistent ordering, always plug the frame grabbers in the +same order, e.g. first the left channel and then the right channel. +Alternatively, you can setup `udev` rules to automatically assign a +device name for a specific frame grabber identified by serial number +(see below). + +Some Linux distributions might restrict access to the video devices using the `video` group. To check, do: +```sh +ls -l /dev/video* +``` +If the result shows something like: +```sh +crw-rw----+ 1 root video 81, 0 Nov 14 11:47 /dev/video0 +``` + +you will need to add your user id to the `video` group. Do not use +`sudo tvtime`, it might work for `tvtime` but it's not going to work +with `gscam`. You should fix the unix file permissions first and make +sure you can access the video without `sudo`. + +To test each channel one after another: +```sh +tvtime -Ld /dev/video0 +``` +Then: +```sh +tvtime -Ld /dev/video1 +``` + +Once in `tvtime`, change the input to S-Video by pressing `i` key. If you see a black +image, it's possible that you don't have enough light in front of your +camera or endoscope. If you happen to use a real da Vinci endoscope +and CCUs (Camera Control Units), you can use the toggle switch +`CAM/BAR` to use the video test pattern +(https://en.wikipedia.org/wiki/SMPTE_color_bars). + +Using the color bar is also useful to test your video connections, +i.e. if your video is noisy or not visible, put the CCUs in bar mode. +If the video is still not working, the problem likely comes from your +S-video cables. If the color bars show correctly, the problem comes +from the cables to the endoscope or the endoscope itself. + +Once you have the video showing in tvtime, you need to figure out the +gstreamer options. There is some information online and you can use +`gst-inspect-1.0` (see more details in DeckLink Duo section). You can +also use the command line tool `v4l2-ctl` to figure out the output +format of your frame grabber. The option `-d0` is to specify +`/dev/video0`, if you're using a different device, make sure the digit +matches. Example of commands: + +```sh +v4l2-ctl -d0 --get-fmt-video +v4l2-ctl -d0 --list-formats-ext +``` + +On Ubuntu 18.04, we found the following syntax seems to work with the USB Hauppage Live2: +```sh + gst-launch-1.0 v4l2src device=/dev/video0 ! video/x-raw,interlace-mode=interleaved ! autovideosink + ``` + +To setup a `udev` rule, you first need to find a way to uniquely +identify each frame grabber. To start, plug just one frame grabber +then do `ls /dev/video*`. Use the full path to identify each frame +grabber (e.g. `/dev/video0`, `/dev/video1`...): + +``` +udevadm info --attribute-walk /dev/video0 +``` +Scroll through the output to find the serial number: +``` + ATTR{manufacturer}=="Hauppauge" + ... + ATTR{serial}=="0011485772" +``` + +Note that this info should correspond to the messages in `dmesg -w` +when you plugged your frame grabber. Now we can create a `udev` rule +to automatically assign the frame grabber to a specific `/dev/video` +"device". You can write the rules in +`/etc/udev/rules.d/90-hauppauge.rules` using sudo privileges, replace +the serial numbers with yours, the following example is for a stereo +system: + +``` +SUBSYSTEM=="video4linux", ATTRS{manufacturer}=="Hauppauge", ATTRS{serial}=="0011367747", SYMLINK+="video-left" +SUBSYSTEM=="video4linux", ATTRS{manufacturer}=="Hauppauge", ATTRS{serial}=="0011485772", SYMLINK+="video-right" +``` + +Save the file and then do `sudo udevadm control --reload-rules` to +apply the rules. No need to reboot the computer, just unplug your +frame grabber, wait a few seconds, replug it and then do `ls -l +/dev/video*` to confirm that the rule worked. If this didn't work, +these pages have some useful info for debugging `udev` and +`video4linux` rules: +https://linuxconfig.org/tutorial-on-how-to-write-basic-udev-rules-in-linux +and +https://unix.stackexchange.com/questions/424887/udev-rule-to-discern-2-identical-webcams-on-linux + +## Blackmagic DeckLink Duo frame grabber + +You first need to install the drivers from Blackmagic, see +https://www.blackmagicdesign.com/support/family/capture-and-playback +The drivers are included in the package "Desktop Video". Once you've +downloaded the binaries and extracted the files from Blackmagic, +follow the instructions on their ReadMe.txt. For 64 bits Ubuntu +system, install the `.deb` files in subfolder `deb/x86_64` using `sudo +dpkg -i *.deb`. If your card is old, the DeckLink install might ask +to run the BlackMagic firmware updater, i.e. something like +`BlackmagicFirmwareUpdater update 0`. After you reboot, check with +`dmesg | grep -i black` to see if the card is recognized. If the +driver is working properly, the devices will show up under +`/dev/blackmagic`. + +You can quickly test the frame grabber using `MediaExpress` which +should be installed along the drivers. You can also select the video +input using `BlackmagicDesktopVideoSetup` (also installed along +drivers). + +If you need to remove all the Blackmagic packages to test a different +version, use `sudo apt remove desktopvideo* mediaexpress*`. + +To test if the drivers are working and the cards are working, use +gstreamer 1.0 or greater. On Ubuntu 16.04/18.04 both gstreamer 1.0 +and 0.1 are available. Make sure you ONLY install the 1.0 packages. +You will also need the proper gstreamer plugins installed: + +```sh + sudo apt install gstreamer1.0-plugins-base gstreamer1.0-plugins-good gstreamer1.0-plugins-bad +``` + +Once gstreamer is installed, you can use a few command lines to test the drivers: + * `gst-inspect-1.0 decklinkvideosrc` will show you the different parameters for the Decklink gstreamer plugin + * `gst-launch-1.0` can be used to launch the streamer and pipe it to see the video live on the computer. For example, we used `gst-launch-1.0 -v decklinkvideosrc mode=0 connection=sdi device-number=0 ! videoconvert ! autovideosink`. + * `mode=0` is for auto detection and is optional + * `connection=sdi` is to force to use an SDI input if your card has different types of inputs. This is optional. + * `device-number=0` is to select which input to use if you have multiple inputs + * On a Decklink Duo, we found that one can see the stereo video using two text terminals: + * `gst-launch-1.0 decklinkvideosrc device-number=0 ! videoconvert ! autovideosink` + * `gst-launch-1.0 decklinkvideosrc device-number=1 ! videoconvert ! autovideosink` + +**Note:** For some reason, in Ubuntu 20.04 you need to add all users to the `plugdev` group. It's a bit odd since `/dev/blackmagic/*` has `rw` permissions for all users. If you figure out a fix, let us know. + +# Software + +## gscam + +`gscam` is a ROS node using the `gstreamer` library. The gstreamer +library supports a few frame grabbers including the Hauppage one. The +gstreamer developement library can be installed using `apt-get +install`. Make sure you install gstreamer 1.0, not 0.1. It is +important to note that when you're installing `gscam`, the +dependencies will also be installed and you might install the wrong +version of `gstreamer` without realizing it. + +To figure out if the ROS provided gscam uses gstreamer 0.1 or 1.x, use the command line: +```sh +apt-cache showpkg ros-kinetic-gscam # or ros-lunar-gscam, melodic or whatever ROS version +``` + +Look at the output of the `apt-cache showpkg` command and search the "Dependencies" to find the gstreamer version used. + +As far as we know, ROS Kinetic on Ubuntu 16.04 uses the gstreamer 0.1 +so you will have to manually compile `gscam` to use gstreamer 1.x. +Melodic on 18.04 seems to use gstreamer 1.x so you should be able +install using `apt`. + +### ROS Ubuntu packages vs build from source + +Use `apt install` to install gscam on Ubuntu 18.04. The package name should be `ros-melodic-gscam`. It will install all the required dependencies for you. + +On Ubuntu 20.04, gscam binaries are not available via `apt` so you +will need to compile it in your ROS workspace. The original source +code is on github: https://github.com/ros-drivers/gscam. But you need +a different version which can be found using the pull request for +Noetic Devel. So you need to clone https://github.com/hap1961/gscam +in your `catkin_ws/src`. then make sure you switch to the Noetic +branch: `cd ~/catkin_ws/src/gscam; git checkout noetic-devel`. Then +`catkin build`. This info is from June 2022, it might need to be +updated. + +### Manual compilation + +If you need gstreamer 1.x and gscam is not built against it, you need to manually compile it. You will first need to install some development libraries: +```sh +sudo apt-get install gstreamer1.0-tools libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev libgstreamer-plugins-good1.0-dev +``` + +Then, assuming your catkin workspace is in `~/catkin` and you're using the Catkin Python build tools: + +```sh +cd ~/catkin_ws/src +git clone https://github.com/ros-drivers/gscam +catkin build +source ~/catkin_ws/devel/setup.bash +``` + +**Note:** See https://github.com/ros-drivers/gscam. As of March 2018, + the readme on gscam/github are a bit confusing since they still + indicate that gstreamer 1.x support is experimental but they provide + instructions to compile with gstreamer 1.x. So, make sure you + compile for 1.x version. + + +### Using gscam + +To start the `gscam` node, we provide a couple of ROS launch scripts. +**Make sure the launch script has been updated to use a working +gstreamer pipeline** (as descrided above using `gst-launch-1.01`). +The main difference is that your pipeline for gscam should end with +`videoconvert` and you need to remove `autovideosink`. + +For a stereo system with the USB frame grabbers, use: ```sh roslaunch +dvrk_robot gscam_stereo.launch rig_name:=jhu_daVinci ``` Where +`jhu_daVinci` is a name you want to give to your camera rig. This +name will be used to define the ROS namespace for all the data +published. It is also used to define a directory to save the results +of your camera calibration or load said camera calibration +(i.e. `dvrk_robot/data/`). If you don't have a calibration +for your rig, you can still render both video channels using the ROS +topics: + + * `/jhu_daVinci/left/image_raw` + * `/jhu_daVinci/right/image_raw` + +For a system with a Decklink Duo, the `gscam_config` in a launch script would look like: +```xml + +``` + +**Note:** ROS topic names might changes when upgraded from 18.04/Melodic to 20.04/Noetic + +## (rqt_)image_view + +One can use the `image_view` node to visualize a single image: + +```sh +rosrun image_view image_view image:=/jhu_daVinci/right/image_raw +``` + +If you prefer GUI, you can use `rqt_image_view`, a simple program to +view the different camera topics. Pick the image to display using the +drop-down menu on the top left corner. + +## RViz + +Use RViz to display both channels at the same time. Add image, select +topic and then drop image to separate screen/eye on the HRSV display. +You can save your settings to everytime you start RViz you will have +both images. + +## Camera calibration + +```sh +roslaunch dvrk_robot gscam_stereo.launch rig_name:=jhu_daVinci rect:=false +``` + +To start the camera calibration: +``` +# ros camera calibration +# NOTE: checkerboard 11x10 square with = 5 mm +rosrun camera_calibration cameracalibrator.py --size 11x10 --square 0.005 right:=/jhu_daVinci/right/image_raw left:=/jhu_daVinci/left/image_raw right_camera:=/jhu_daVinci/right left_camera:=/jhu_daVinci/left --approximate=0.050 +``` + +Save results: + * manual save: calibration result is located /tmp/calibrationdata.tar.gz + * commit button to save (note camera_info_url should be correct) + * calibration result is saved as `ost.txt`, which is Videre INI format + * `gscam` requires calibration file with file extension `.ini` or `.yaml`. + * `gscam` only takes package://dvrk_robot/data/ + * from now on, use topics `image_rect_color` + +References: + * http://wiki.ros.org/camera_calibration + * http://wiki.ros.org/camera_calibration_parsers diff --git a/dvrk_video/launch/gscam_decklink.launch b/dvrk_video/launch/gscam_decklink.launch new file mode 100644 index 00000000..e33dc051 --- /dev/null +++ b/dvrk_video/launch/gscam_decklink.launch @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dvrk_video/launch/gscam_decklink_stereo.launch b/dvrk_video/launch/gscam_decklink_stereo.launch new file mode 100644 index 00000000..a05fc25f --- /dev/null +++ b/dvrk_video/launch/gscam_decklink_stereo.launch @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dvrk_video/launch/gscam_hauppauge_live2.launch b/dvrk_video/launch/gscam_hauppauge_live2.launch new file mode 100644 index 00000000..46bc218f --- /dev/null +++ b/dvrk_video/launch/gscam_hauppauge_live2.launch @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dvrk_video/launch/gscam_mono.launch b/dvrk_video/launch/gscam_mono.launch new file mode 100644 index 00000000..2c4df3c0 --- /dev/null +++ b/dvrk_video/launch/gscam_mono.launch @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dvrk_video/launch/gscam_stereo.launch b/dvrk_video/launch/gscam_stereo.launch new file mode 100644 index 00000000..6f1dea3d --- /dev/null +++ b/dvrk_video/launch/gscam_stereo.launch @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dvrk_video/launch/gscam_v4l_test.launch b/dvrk_video/launch/gscam_v4l_test.launch new file mode 100644 index 00000000..fa15f865 --- /dev/null +++ b/dvrk_video/launch/gscam_v4l_test.launch @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + diff --git a/dvrk_video/launch/jhu_dVRK_video.launch b/dvrk_video/launch/jhu_dVRK_video.launch new file mode 100644 index 00000000..b8ffc527 --- /dev/null +++ b/dvrk_video/launch/jhu_dVRK_video.launch @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dvrk_video/launch/jhu_daVinci_video.launch b/dvrk_video/launch/jhu_daVinci_video.launch new file mode 100644 index 00000000..76882997 --- /dev/null +++ b/dvrk_video/launch/jhu_daVinci_video.launch @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dvrk_video/package.xml b/dvrk_video/package.xml new file mode 100644 index 00000000..a1af6950 --- /dev/null +++ b/dvrk_video/package.xml @@ -0,0 +1,16 @@ + + + dvrk_video + 2.2.0 + The dVRK video package + + Anton Deguet + CISST + + catkin + + + + + +