From ab5ffd504d1f1d63444a71e50387614062cffc1e Mon Sep 17 00:00:00 2001 From: Charles xu Date: Wed, 22 Nov 2023 18:02:40 -0800 Subject: [PATCH] added envionrmnet and camera capture code --- robot_infra/README.md | 2 +- robot_infra/camera/__init__.py | 0 robot_infra/camera/capture.py | 54 ++++++++++ robot_infra/camera/rs_capture.py | 99 +++++++++++++++++++ robot_infra/camera/video_capture.py | 65 ++++++++++++ robot_infra/spacemouse/spacemouse_teleop.py | 7 -- .../pcb_insertion_ddpg.py | 2 +- ...anka_rlpd_classifier_reward_multithread.py | 2 +- .../binpick_franka_fwbw_vice.py | 2 +- serl_examples/hybrid_pixel_bc/pixel_bc.py | 2 +- 10 files changed, 223 insertions(+), 12 deletions(-) create mode 100644 robot_infra/camera/__init__.py create mode 100644 robot_infra/camera/capture.py create mode 100644 robot_infra/camera/rs_capture.py create mode 100644 robot_infra/camera/video_capture.py diff --git a/robot_infra/README.md b/robot_infra/README.md index 2bc085f..2627c0b 100644 --- a/robot_infra/README.md +++ b/robot_infra/README.md @@ -19,4 +19,4 @@ To start using the robot, first power on the robot (small switch on the back of From there you should be able to navigate to `robot_infra` and then simply run `python franka_server.py`. This should start the impedence controller and the HTTP server. You can test that things are running by trying to move the end effector around, if the impedence controller is running it should be compliant. -Lastly, any code you write can interact with the robot via the gym interface defined in this repo under `env`. Simply run `pip install -e .` in the `env_franka` directory, and in your code simply initialize the env via `gym.make("Franka-{ENVIRONMENT NAME}-v0)`. \ No newline at end of file +Lastly, any code you write can interact with the robot via the gym interface defined in this repo under `env`. Simply run `pip install -e .` in the `robot_infra` directory, and in your code simply initialize the env via `gym.make("Franka-{ENVIRONMENT NAME}-v0)`. \ No newline at end of file diff --git a/robot_infra/camera/__init__.py b/robot_infra/camera/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/robot_infra/camera/capture.py b/robot_infra/camera/capture.py new file mode 100644 index 0000000..c4fb621 --- /dev/null +++ b/robot_infra/camera/capture.py @@ -0,0 +1,54 @@ +import queue +import threading +import time +# bufferless (Video) Capture +class Capture: + + def __init__(self, name): + print("starting video stream", name) + self.name = name + + self.q = queue.Queue() + t = threading.Thread(target=self._reader) + t.daemon = True + t.start() + + # read frames as soon as they are available, keeping only most recent one + def _reader(self): + while True: + ret, frame = self.read_frame() + if not ret: + break + if not self.q.empty(): + try: + self.q.get_nowait() # discard previous (unprocessed) frame + except queue.Empty: + pass + self.q.put(frame) + + def read_frame(self): + raise NotImplementedError + + def read_recent(self): + return self.q.get() + +class MultiCapture: + def __init__(self): + self.streams = {} + + def add_stream(self, stream: Capture): + name = stream.name + if name in self.streams: + return False + self.streams[name] = stream + return True + + def read_recent(self, name): + return self.streams[name].read_recent() + + def read_all(self): + frames = dict() + import pdb; pdb.set_trace() + for name in self.streams.keys(): + frames[name] = self.read_recent(name) + return frames \ No newline at end of file diff --git a/robot_infra/camera/rs_capture.py b/robot_infra/camera/rs_capture.py new file mode 100644 index 0000000..345cd93 --- /dev/null +++ b/robot_infra/camera/rs_capture.py @@ -0,0 +1,99 @@ +import numpy as np +import pyrealsense2 as rs # Intel RealSense cross-platform open-source API +import cv2 +from PIL import Image +from capture import Capture + +class RSCapture(Capture): + def get_device_serial_numbers(self): + devices = rs.context().devices + return [d.get_info(rs.camera_info.serial_number) for d in devices] + + def __init__(self, name, serial_number, dim = (640, 480), fps = 15, depth=False): + self.name = name + assert serial_number in self.get_device_serial_numbers() + self.serial_number = serial_number + self.depth = depth + self.pipe = rs.pipeline() + self.cfg = rs.config() + self.cfg.enable_device(self.serial_number) + self.cfg.enable_stream(rs.stream.color, dim[0], dim[1], rs.format.bgr8, fps) + if self.depth: + self.cfg.enable_stream(rs.stream.depth, dim[0], dim[1], rs.format.z16, fps) + self.profile = self.pipe.start(self.cfg) + + if self.depth: + # Getting the depth sensor's depth scale (see rs-align example for explanation) + depth_sensor = self.profile.get_device().first_depth_sensor() + depth_scale = depth_sensor.get_depth_scale() + + # Create an align object + # rs.align allows us to perform alignment of depth frames to others frames + # The "align_to" is the stream type to which we plan to align depth frames. + align_to = rs.stream.color + self.align = rs.align(align_to) + + def read(self): + frames = self.pipe.wait_for_frames() + aligned_frames = self.align.process(frames) + color_frame = aligned_frames.get_color_frame() + if self.depth: + depth_frame = aligned_frames.get_depth_frame() + + if color_frame.is_video_frame(): + image = np.asarray(color_frame.get_data()) + if self.depth and depth_frame.is_depth_frame(): + depth = np.expand_dims(np.asarray(depth_frame.get_data()), axis=2) + return True, np.concatenate((image, depth), axis=-1) + else: + return True, image + else: + return False, None + + def close(self): + self.pipe.stop() + self.cfg.disable_all_streams() + +if __name__ == "__main__": + cap = RSCapture(name='side_1', serial_number='128422270679', dim=(640, 480), depth=True) + cap2 = RSCapture(name='side_2', serial_number='127122270146', dim=(640, 480), depth=True) + cap3 = RSCapture(name='wrist_1', serial_number='127122270350', dim=(640, 480), depth=True) + cap4 = RSCapture(name='wrist_2', serial_number='128422271851', dim=(640, 480), depth=True) + + while True: + succes, img = cap.read() + if succes: + img = cv2.resize(img, (480, 640)[::-1]) + color_img = img[..., :3] + depth_img = img[..., 3] + depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_img, alpha=0.03), cv2.COLORMAP_JET) + img = np.hstack((color_img, depth_colormap))/255. + cv2.imshow(cap.name, img) + cv2.waitKey(1) + succes2, img2 = cap2.read() + if succes2: + img = cv2.resize(img2, (480, 640)[::-1]) + color_img = img[..., :3] + depth_img = img[..., 3] + depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_img, alpha=0.03), cv2.COLORMAP_JET) + img = np.hstack((color_img, depth_colormap))/255. + cv2.imshow(cap2.name, img) + cv2.waitKey(1) + succes3, img3 = cap3.read() + if succes3: + img = cv2.resize(img3, (480, 640)[::-1]) + color_img = img[..., :3] + depth_img = img[..., 3] + depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_img, alpha=0.03), cv2.COLORMAP_JET) + img = np.hstack((color_img, depth_colormap))/255. + cv2.imshow(cap3.name, img) + cv2.waitKey(1) + succes4, img4 = cap4.read() + if succes2: + img = cv2.resize(img4, (480, 640)[::-1]) + color_img = img[..., :3] + depth_img = img[..., 3] + depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_img, alpha=0.03), cv2.COLORMAP_JET) + img = np.hstack((color_img, depth_colormap))/255. + cv2.imshow(cap4.name, img) + cv2.waitKey(1) \ No newline at end of file diff --git a/robot_infra/camera/video_capture.py b/robot_infra/camera/video_capture.py new file mode 100644 index 0000000..357ba2e --- /dev/null +++ b/robot_infra/camera/video_capture.py @@ -0,0 +1,65 @@ +from calendar import c +import sys +import numpy as np +# sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages') + +if 1: + import cv2 + # sys.path.insert(0, '/opt/ros/melodic/lib/python2.7/dist-packages') + # import cv2 + import queue + import threading + import time + +class VideoCapture: + + def __init__(self, cap, name=None): + # name = cap.name + # print("starting video stream", name) + if name == None: + name = cap.name + self.name = name + self.q = queue.Queue() + self.cap = cap + self.t = threading.Thread(target=self._reader) + self.t.daemon = True + self.enable = True + self.t.start() + + # read frames as soon as they are available, keeping only most recent one + def _reader(self): + while self.enable: + time.sleep(0.01) + # cap.set(cv2.CAP_PROP_EXPOSURE, -10) + ret, frame = self.cap.read() + # cv2.imshow("frame", frame) + # key = cv2.waitKey(1) + if not ret: + break + if not self.q.empty(): + try: + self.q.get_nowait() # discard previous (unprocessed) frame + except queue.Empty: + pass + self.q.put(frame) + + def read(self): + return self.q.get(timeout=5) + + def close(self): + self.enable = False + self.t.join() + self.cap.close() + + +if __name__ == "__main__": + import sys + # resource = int(sys.argv[1]) + resource = "/dev/video0" + from franka_env.envs.capture.rs_capture import RSCapture + side = RSCapture(name='side', serial_number='112222070712') + cap = VideoCapture(side) + while True: + frame = cap.read() + cv2.imshow('', frame) + cv2.waitKey(1) \ No newline at end of file diff --git a/robot_infra/spacemouse/spacemouse_teleop.py b/robot_infra/spacemouse/spacemouse_teleop.py index 1a0036f..6e3586e 100644 --- a/robot_infra/spacemouse/spacemouse_teleop.py +++ b/robot_infra/spacemouse/spacemouse_teleop.py @@ -1,11 +1,4 @@ """Uses a spacemouse as action input into the environment. - -To use this, first clone robosuite (git@github.com:anair13/robosuite.git), -add it to the python path, and ensure you can run the following file (and -see input values from the spacemouse): - -robosuite/devices/spacemouse.py - You will likely have to `pip install hidapi` and Spacemouse drivers. """ from spacemouse import SpaceMouse diff --git a/serl_examples/ddpg_with_task_reward/pcb_insertion_ddpg.py b/serl_examples/ddpg_with_task_reward/pcb_insertion_ddpg.py index 6f45373..05e82ed 100644 --- a/serl_examples/ddpg_with_task_reward/pcb_insertion_ddpg.py +++ b/serl_examples/ddpg_with_task_reward/pcb_insertion_ddpg.py @@ -10,7 +10,7 @@ from serl.data import ReplayBuffer, MemoryEfficientReplayBuffer from serl.evaluation import evaluate from serl.wrappers import wrap_gym -from franka.env_franka.franka_env.envs.wrappers import SpacemouseIntervention, TwoCameraFrankaWrapper, InsertionWrapper +from robot_infra.env.wrappers import GripperCloseEnv, SpacemouseIntervention, TwoCameraFrankaWrapper, FourDoFWrapper, ResetFreeWrapper from serl.wrappers.wandb_video import WANDBVideo from serl.wrappers.frame_stack import FrameStack from serl.utils.commons import restore_checkpoint_ diff --git a/serl_examples/drq_rlpd_with_classifier_reward/cable_route_franka_rlpd_classifier_reward_multithread.py b/serl_examples/drq_rlpd_with_classifier_reward/cable_route_franka_rlpd_classifier_reward_multithread.py index 41bf3ed..f21352e 100644 --- a/serl_examples/drq_rlpd_with_classifier_reward/cable_route_franka_rlpd_classifier_reward_multithread.py +++ b/serl_examples/drq_rlpd_with_classifier_reward/cable_route_franka_rlpd_classifier_reward_multithread.py @@ -11,7 +11,7 @@ from serl.evaluation import evaluate from serl.wrappers import wrap_gym from serl.utils.commons import get_data, restore_checkpoint_ -from franka.env_franka.franka_env.envs.wrappers import GripperCloseEnv, SpacemouseIntervention, TwoCameraFrankaWrapper, FourDoFWrapper, ResetFreeWrapper +from robot_infra.env.wrappers import GripperCloseEnv, SpacemouseIntervention, TwoCameraFrankaWrapper, FourDoFWrapper, ResetFreeWrapper from serl.wrappers.wandb_video import WANDBVideo from serl.wrappers.frame_stack import FrameStack import os diff --git a/serl_examples/drq_rlpd_with_vice_reward/binpick_franka_fwbw_vice.py b/serl_examples/drq_rlpd_with_vice_reward/binpick_franka_fwbw_vice.py index c77b0af..1343c9e 100644 --- a/serl_examples/drq_rlpd_with_vice_reward/binpick_franka_fwbw_vice.py +++ b/serl_examples/drq_rlpd_with_vice_reward/binpick_franka_fwbw_vice.py @@ -13,7 +13,7 @@ from serl.wrappers.wandb_video import WANDBVideo from serl.wrappers.frame_stack import FrameStack from serl.utils.commons import get_data, restore_checkpoint_, ema -from franka.env_franka.franka_env.envs.wrappers import GripperCloseEnv, SpacemouseIntervention, TwoCameraFrankaWrapper, ResetFreeWrapper, RemoveGripperStateWrapper +from robot_infra.env.wrappers import GripperCloseEnv, SpacemouseIntervention, TwoCameraFrankaWrapper, FourDoFWrapper, ResetFreeWrapper import os import threading from queue import Queue diff --git a/serl_examples/hybrid_pixel_bc/pixel_bc.py b/serl_examples/hybrid_pixel_bc/pixel_bc.py index 948b525..6dee51b 100644 --- a/serl_examples/hybrid_pixel_bc/pixel_bc.py +++ b/serl_examples/hybrid_pixel_bc/pixel_bc.py @@ -10,7 +10,7 @@ from serl.data import ReplayBuffer, MemoryEfficientReplayBuffer from serl.evaluation import evaluate from serl.wrappers import wrap_gym -from franka.env_franka.franka_env.envs.wrappers import GripperCloseEnv, SpacemouseIntervention, TwoCameraFrankaWrapper, FourDoFWrapper, ResetFreeWrapper +from robot_infra.env.wrappers import GripperCloseEnv, SpacemouseIntervention, TwoCameraFrankaWrapper, FourDoFWrapper, ResetFreeWrapper from serl.wrappers.wandb_video import WANDBVideo from serl.wrappers.frame_stack import FrameStack from serl.utils.commons import restore_checkpoint_