Skip to content

Commit

Permalink
added envionrmnet and camera capture code
Browse files Browse the repository at this point in the history
  • Loading branch information
charlesxu0124 committed Nov 23, 2023
1 parent ccbcf95 commit ab5ffd5
Show file tree
Hide file tree
Showing 10 changed files with 223 additions and 12 deletions.
2 changes: 1 addition & 1 deletion robot_infra/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,4 @@ To start using the robot, first power on the robot (small switch on the back of

From there you should be able to navigate to `robot_infra` and then simply run `python franka_server.py`. This should start the impedence controller and the HTTP server. You can test that things are running by trying to move the end effector around, if the impedence controller is running it should be compliant.

Lastly, any code you write can interact with the robot via the gym interface defined in this repo under `env`. Simply run `pip install -e .` in the `env_franka` directory, and in your code simply initialize the env via `gym.make("Franka-{ENVIRONMENT NAME}-v0)`.
Lastly, any code you write can interact with the robot via the gym interface defined in this repo under `env`. Simply run `pip install -e .` in the `robot_infra` directory, and in your code simply initialize the env via `gym.make("Franka-{ENVIRONMENT NAME}-v0)`.
Empty file added robot_infra/camera/__init__.py
Empty file.
54 changes: 54 additions & 0 deletions robot_infra/camera/capture.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
import queue
import threading
import time
# bufferless (Video) Capture
class Capture:

def __init__(self, name):
print("starting video stream", name)
self.name = name

self.q = queue.Queue()
t = threading.Thread(target=self._reader)
t.daemon = True
t.start()

# read frames as soon as they are available, keeping only most recent one
def _reader(self):
while True:
ret, frame = self.read_frame()
if not ret:
break
if not self.q.empty():
try:
self.q.get_nowait() # discard previous (unprocessed) frame
except queue.Empty:
pass
self.q.put(frame)

def read_frame(self):
raise NotImplementedError

def read_recent(self):
return self.q.get()

class MultiCapture:
def __init__(self):
self.streams = {}

def add_stream(self, stream: Capture):
name = stream.name
if name in self.streams:
return False
self.streams[name] = stream
return True

def read_recent(self, name):
return self.streams[name].read_recent()

def read_all(self):
frames = dict()
import pdb; pdb.set_trace()
for name in self.streams.keys():
frames[name] = self.read_recent(name)
return frames
99 changes: 99 additions & 0 deletions robot_infra/camera/rs_capture.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
import numpy as np
import pyrealsense2 as rs # Intel RealSense cross-platform open-source API
import cv2
from PIL import Image
from capture import Capture

class RSCapture(Capture):
def get_device_serial_numbers(self):
devices = rs.context().devices
return [d.get_info(rs.camera_info.serial_number) for d in devices]

def __init__(self, name, serial_number, dim = (640, 480), fps = 15, depth=False):
self.name = name
assert serial_number in self.get_device_serial_numbers()
self.serial_number = serial_number
self.depth = depth
self.pipe = rs.pipeline()
self.cfg = rs.config()
self.cfg.enable_device(self.serial_number)
self.cfg.enable_stream(rs.stream.color, dim[0], dim[1], rs.format.bgr8, fps)
if self.depth:
self.cfg.enable_stream(rs.stream.depth, dim[0], dim[1], rs.format.z16, fps)
self.profile = self.pipe.start(self.cfg)

if self.depth:
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = self.profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()

# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
self.align = rs.align(align_to)

def read(self):
frames = self.pipe.wait_for_frames()
aligned_frames = self.align.process(frames)
color_frame = aligned_frames.get_color_frame()
if self.depth:
depth_frame = aligned_frames.get_depth_frame()

if color_frame.is_video_frame():
image = np.asarray(color_frame.get_data())
if self.depth and depth_frame.is_depth_frame():
depth = np.expand_dims(np.asarray(depth_frame.get_data()), axis=2)
return True, np.concatenate((image, depth), axis=-1)
else:
return True, image
else:
return False, None

def close(self):
self.pipe.stop()
self.cfg.disable_all_streams()

if __name__ == "__main__":
cap = RSCapture(name='side_1', serial_number='128422270679', dim=(640, 480), depth=True)
cap2 = RSCapture(name='side_2', serial_number='127122270146', dim=(640, 480), depth=True)
cap3 = RSCapture(name='wrist_1', serial_number='127122270350', dim=(640, 480), depth=True)
cap4 = RSCapture(name='wrist_2', serial_number='128422271851', dim=(640, 480), depth=True)

while True:
succes, img = cap.read()
if succes:
img = cv2.resize(img, (480, 640)[::-1])
color_img = img[..., :3]
depth_img = img[..., 3]
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_img, alpha=0.03), cv2.COLORMAP_JET)
img = np.hstack((color_img, depth_colormap))/255.
cv2.imshow(cap.name, img)
cv2.waitKey(1)
succes2, img2 = cap2.read()
if succes2:
img = cv2.resize(img2, (480, 640)[::-1])
color_img = img[..., :3]
depth_img = img[..., 3]
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_img, alpha=0.03), cv2.COLORMAP_JET)
img = np.hstack((color_img, depth_colormap))/255.
cv2.imshow(cap2.name, img)
cv2.waitKey(1)
succes3, img3 = cap3.read()
if succes3:
img = cv2.resize(img3, (480, 640)[::-1])
color_img = img[..., :3]
depth_img = img[..., 3]
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_img, alpha=0.03), cv2.COLORMAP_JET)
img = np.hstack((color_img, depth_colormap))/255.
cv2.imshow(cap3.name, img)
cv2.waitKey(1)
succes4, img4 = cap4.read()
if succes2:
img = cv2.resize(img4, (480, 640)[::-1])
color_img = img[..., :3]
depth_img = img[..., 3]
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_img, alpha=0.03), cv2.COLORMAP_JET)
img = np.hstack((color_img, depth_colormap))/255.
cv2.imshow(cap4.name, img)
cv2.waitKey(1)
65 changes: 65 additions & 0 deletions robot_infra/camera/video_capture.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
from calendar import c
import sys
import numpy as np
# sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')

if 1:
import cv2
# sys.path.insert(0, '/opt/ros/melodic/lib/python2.7/dist-packages')
# import cv2
import queue
import threading
import time

class VideoCapture:

def __init__(self, cap, name=None):
# name = cap.name
# print("starting video stream", name)
if name == None:
name = cap.name
self.name = name
self.q = queue.Queue()
self.cap = cap
self.t = threading.Thread(target=self._reader)
self.t.daemon = True
self.enable = True
self.t.start()

# read frames as soon as they are available, keeping only most recent one
def _reader(self):
while self.enable:
time.sleep(0.01)
# cap.set(cv2.CAP_PROP_EXPOSURE, -10)
ret, frame = self.cap.read()
# cv2.imshow("frame", frame)
# key = cv2.waitKey(1)
if not ret:
break
if not self.q.empty():
try:
self.q.get_nowait() # discard previous (unprocessed) frame
except queue.Empty:
pass
self.q.put(frame)

def read(self):
return self.q.get(timeout=5)

def close(self):
self.enable = False
self.t.join()
self.cap.close()


if __name__ == "__main__":
import sys
# resource = int(sys.argv[1])
resource = "/dev/video0"
from franka_env.envs.capture.rs_capture import RSCapture
side = RSCapture(name='side', serial_number='112222070712')
cap = VideoCapture(side)
while True:
frame = cap.read()
cv2.imshow('', frame)
cv2.waitKey(1)
7 changes: 0 additions & 7 deletions robot_infra/spacemouse/spacemouse_teleop.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,4 @@
"""Uses a spacemouse as action input into the environment.
To use this, first clone robosuite ([email protected]:anair13/robosuite.git),
add it to the python path, and ensure you can run the following file (and
see input values from the spacemouse):
robosuite/devices/spacemouse.py
You will likely have to `pip install hidapi` and Spacemouse drivers.
"""
from spacemouse import SpaceMouse
Expand Down
2 changes: 1 addition & 1 deletion serl_examples/ddpg_with_task_reward/pcb_insertion_ddpg.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from serl.data import ReplayBuffer, MemoryEfficientReplayBuffer
from serl.evaluation import evaluate
from serl.wrappers import wrap_gym
from franka.env_franka.franka_env.envs.wrappers import SpacemouseIntervention, TwoCameraFrankaWrapper, InsertionWrapper
from robot_infra.env.wrappers import GripperCloseEnv, SpacemouseIntervention, TwoCameraFrankaWrapper, FourDoFWrapper, ResetFreeWrapper
from serl.wrappers.wandb_video import WANDBVideo
from serl.wrappers.frame_stack import FrameStack
from serl.utils.commons import restore_checkpoint_
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from serl.evaluation import evaluate
from serl.wrappers import wrap_gym
from serl.utils.commons import get_data, restore_checkpoint_
from franka.env_franka.franka_env.envs.wrappers import GripperCloseEnv, SpacemouseIntervention, TwoCameraFrankaWrapper, FourDoFWrapper, ResetFreeWrapper
from robot_infra.env.wrappers import GripperCloseEnv, SpacemouseIntervention, TwoCameraFrankaWrapper, FourDoFWrapper, ResetFreeWrapper
from serl.wrappers.wandb_video import WANDBVideo
from serl.wrappers.frame_stack import FrameStack
import os
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from serl.wrappers.wandb_video import WANDBVideo
from serl.wrappers.frame_stack import FrameStack
from serl.utils.commons import get_data, restore_checkpoint_, ema
from franka.env_franka.franka_env.envs.wrappers import GripperCloseEnv, SpacemouseIntervention, TwoCameraFrankaWrapper, ResetFreeWrapper, RemoveGripperStateWrapper
from robot_infra.env.wrappers import GripperCloseEnv, SpacemouseIntervention, TwoCameraFrankaWrapper, FourDoFWrapper, ResetFreeWrapper
import os
import threading
from queue import Queue
Expand Down
2 changes: 1 addition & 1 deletion serl_examples/hybrid_pixel_bc/pixel_bc.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from serl.data import ReplayBuffer, MemoryEfficientReplayBuffer
from serl.evaluation import evaluate
from serl.wrappers import wrap_gym
from franka.env_franka.franka_env.envs.wrappers import GripperCloseEnv, SpacemouseIntervention, TwoCameraFrankaWrapper, FourDoFWrapper, ResetFreeWrapper
from robot_infra.env.wrappers import GripperCloseEnv, SpacemouseIntervention, TwoCameraFrankaWrapper, FourDoFWrapper, ResetFreeWrapper
from serl.wrappers.wandb_video import WANDBVideo
from serl.wrappers.frame_stack import FrameStack
from serl.utils.commons import restore_checkpoint_
Expand Down

0 comments on commit ab5ffd5

Please sign in to comment.