diff --git a/donkeycar/parts/oak_d_camera.py b/donkeycar/parts/oak_d_camera.py index c4d5dc336..e019b7b57 100644 --- a/donkeycar/parts/oak_d_camera.py +++ b/donkeycar/parts/oak_d_camera.py @@ -115,6 +115,9 @@ def with_pixel_crop_height(self, pixel_crop_height): self.pixel_crop_height = pixel_crop_height return self + def with_device_id(self, device_id): + self.device_id = device_id + return self def build(self): return OakDCamera( @@ -137,7 +140,8 @@ def build(self): rgb_wb_manual=self.rgb_wb_manual, use_camera_tuning_blob=self.use_camera_tuning_blob, enable_undistort_rgb=self.enable_undistort_rgb, - pixel_crop_height=self.pixel_crop_height + pixel_crop_height=self.pixel_crop_height, + mxid=self.device_id ) class OakDCamera: @@ -161,7 +165,8 @@ def __init__(self, rgb_wb_manual= 2800, use_camera_tuning_blob = False, enable_undistort_rgb = False, - pixel_crop_height = 35): + pixel_crop_height = 35, + mxid = None): self.width = width self.height = height @@ -202,6 +207,11 @@ def __init__(self, self.alpha = 0 + if mxid: + self.device_info = dai.DeviceInfo(mxid) # MXID + else: + self.device_info = dai.DeviceInfo() + # Create pipeline self.pipeline = dai.Pipeline() # self.pipeline.setCameraTuningBlobPath('/tuning_color_ov9782_wide_fov.bin') @@ -222,8 +232,8 @@ def __init__(self, self.create_obstacle_dist_pipeline() try: # Connect to device and start pipeline - logger.info('Starting OAK-D camera') - self.device = dai.Device(self.pipeline) + logger.info(f'Starting OAK-D camera id {mxid}') + self.device = dai.Device(self.pipeline, self.device_info) calibData = self.device.readCalibration2() rgbCamSocket = dai.CameraBoardSocket.CAM_A @@ -546,4 +556,4 @@ def shutdown(self): self.device = None self.queue = None self.pipeline = None - \ No newline at end of file + diff --git a/donkeycar/templates/cfg_complete.py b/donkeycar/templates/cfg_complete.py index 8826ba71e..e5d8b5167 100644 --- a/donkeycar/templates/cfg_complete.py +++ b/donkeycar/templates/cfg_complete.py @@ -40,6 +40,9 @@ # OAK-D-LITE CAMERA SETTINGS CAMERA_TYPE = "OAK" # (OAK|PICAM|WEBCAM|CVCAM|CSIC|V4L|D435|MOCK|IMAGE_LIST) +# OAK REAR CAMERA SETTINGS +CAMERA_REAR_TYPE = None # (None|OAK_REAR) + # OAK-D-LITE: "1080p" for rgb # OAK-D-WIDE: "800p" for rgb RGB_RESOLUTION = "800p" @@ -54,6 +57,9 @@ RGB_SENSOR_ISO = 400 RGB_WB_MANUAL = 2800 +OAK_ID = None +OAK_REAR_ID = None + # OAK-D-LITE: from 1920/1080 (1,8)>>240/135 # OAK-D-WIDE: from 1280/800 (1,8)>>160/100 (3,16)>>240/150 5/32>>200/125 OAK_D_ISP_SCALE = (1,4) # (1,2) @@ -112,6 +118,14 @@ BEHAVIOR_DROP_UPPER_LAYERS = 0.2 BEHAVIOR_L1_CHANNELS = 24 +# REAR CAMERA SETTINGS +OBSTACLE_DETECTOR_REAR_VIEW_ENABLED = False +OBSTACLE_DETECTOR_REAR_MODEL_TYPE = "trt_obstacle_detector" +OBSTACLE_DETECTOR_REAR_MODEL_PATH = "~/donkeycar-models/20240112161500-20240114222912-LR-A001-avoidance-rgb-cone-car.trt" +OAK_REAR_ENABLE_DEPTH_MAP = False +OAK_REAR_ENABLE_UNDISTORTED_RGB = False +OAK_REAR_OBSTACLE_DETECTION_ENABLED = False + #CAMERA Settings Vivatech 2022 (nano) #CAMERA_TYPE = "CSIC" # (PICAM|WEBCAM|CVCAM|CSIC|V4L|D435|MOCK|IMAGE_LIST) #IMAGE_W = 160 diff --git a/donkeycar/templates/complete.py b/donkeycar/templates/complete.py index d807d3eb5..a3950d5e5 100644 --- a/donkeycar/templates/complete.py +++ b/donkeycar/templates/complete.py @@ -104,6 +104,11 @@ def drive(cfg, model_path=None, use_joystick=False, model_type=None, # add_camera(V, cfg, camera_type) + # + # setup rear camera + # + add_rear_camera(V, cfg) + # add lidar if cfg.USE_LIDAR: @@ -367,6 +372,7 @@ def reload_weights(filename): # if cfg.OBSTACLE_DETECTOR_ENABLED: detector_outputs = ['detector/obstacle_lane'] + fused_detector_outputs = detector_outputs if not cfg.OBSTACLE_DETECTOR_MANUAL_LANE: kd = dk.utils.get_model_by_type(cfg.OBSTACLE_DETECTOR_MODEL_TYPE, cfg) load_model(kd, cfg.OBSTACLE_DETECTOR_MODEL_PATH) @@ -376,11 +382,23 @@ def reload_weights(filename): inputs = ['cam/image_array'] V.add(kd, inputs=inputs, outputs=detector_outputs, run_condition='run_pilot') + # Add rear view camera + if cfg.OBSTACLE_DETECTOR_REAR_VIEW_ENABLED: + rear_detector_outputs = ['detector_rear/obstacle_lane'] + kd_rear = dk.utils.get_model_by_type(cfg.OBSTACLE_DETECTOR_REAR_MODEL_TYPE, cfg) + load_model(kd, cfg.OBSTACLE_DETECTOR_REAR_MODEL_PATH) + if cfg.OAK_REAR_ENABLE_DEPTH_MAP: + inputs = ['cam_rear/image_array', 'cam_rear/depth_array'] + else: + inputs = ['cam_rear/image_array'] + V.add(kd_rear, inputs=inputs, outputs=rear_detector_outputs, run_condition='run_pilot') + fused_detector_outputs = detector_outputs + rear_detector_outputs + # Avoidance logic between obstacle_detector and KerasBehavioral from donkeycar.parts.avoidance_behavior import AvoidanceBehaviorPart abh = AvoidanceBehaviorPart(cfg) #V.add(abh, inputs=['detector/obstacle_lane'], outputs=['behavior/one_hot_state_array'], run_condition='run_pilot') - V.add(abh, inputs=detector_outputs, outputs=['behavior/one_hot_state_array'], run_condition='run_pilot') + V.add(abh, inputs=fused_detector_outputs, outputs=['behavior/one_hot_state_array'], run_condition='run_pilot') inputs = ["cam/image_array", "behavior/one_hot_state_array"] @@ -590,13 +608,26 @@ def run(self, mode, recording): types +=['float', 'float', 'float', 'float', 'float', 'float'] - if cfg.CAMERA_TYPE == "OAK" and cfg.OAK_ENABLE_DEPTH_MAP: - inputs += ['cam/depth_array'] - types += ['gray16_array'] + if cfg.CAMERA_TYPE == "OAK": + if cfg.OAK_ENABLE_DEPTH_MAP: + inputs += ['cam/depth_array'] + types += ['gray16_array'] + + if cfg.OAK_ENABLE_UNDISTORTED_RGB: + inputs += ['cam/undistorted_rgb'] + types += ['undistorted_image'] + + if cfg.CAMERA_REAR_TYPE == "OAK_REAR": + if cfg.OBSTACLE_DETECTOR_REAR_VIEW_ENABLED: + inputs += ['cam_rear/image_array'] + types += ['image_array'] + + if cfg.OAK_REAR_ENABLE_DEPTH_MAP: + inputs += ['cam_rear/depth_array'] + types += ['gray16_array'] + + - if cfg.CAMERA_TYPE == "OAK" and cfg.OAK_ENABLE_UNDISTORTED_RGB: - inputs += ['cam/undistorted_rgb'] - types += ['undistorted_image'] # print(f"in complete cfg.OAK_ENABLE_UNDISTORTED_RGB: {cfg.OAK_ENABLE_UNDISTORTED_RGB}") # if cfg.CAMERA_TYPE == "OAK" and cfg.OAK_OBSTACLE_DETECTION_ENABLED: # inputs += ['cam/obstacle_distances'] @@ -846,6 +877,45 @@ def get_camera(cfg): raise(Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE)) return cam +def add_rear_camera(V, cfg): + + logger.info("cfg.CAMERA_REAR_TYPE %s"%cfg.CAMERA_REAR_TYPE) + + if cfg.CAMERA_REAR_TYPE == "OAK_REAR": + from donkeycar.parts.oak_d_camera import OakDCameraBuilder + # cam = OakDCamera(width=cfg.IMAGE_W, height=cfg.IMAGE_H, depth=cfg.IMAGE_DEPTH, isp_scale=cfg.OAK_D_ISP_SCALE, framerate=cfg.CAMERA_FRAMERATE, enable_depth=cfg.OAK_ENABLE_DEPTH_MAP, enable_obstacle_dist=cfg.OAK_OBSTACLE_DETECTION_ENABLED, rgb_resolution=cfg.RGB_RESOLUTION) + cam_rear = OakDCameraBuilder() \ + .with_width(cfg.OAK_IMAGE_W) \ + .with_height(cfg.OAK_IMAGE_H) \ + .with_depth(cfg.OAK_IMAGE_DEPTH) \ + .with_isp_scale(cfg.OAK_D_ISP_SCALE) \ + .with_framerate(cfg.CAMERA_FRAMERATE) \ + .with_depth_crop_rect(cfg.OAK_DEPTH_CROP_RECT) \ + .with_enable_depth(cfg.OAK_REAR_ENABLE_DEPTH_MAP) \ + .with_enable_obstacle_dist(cfg.OAK_OBSTACLE_DETECTION_ENABLED) \ + .with_rgb_resolution(cfg.RGB_RESOLUTION) \ + .with_rgb_apply_cropping(cfg.RGB_APPLY_CROPPING) \ + .with_rgb_sensor_crop_x(cfg.RGB_SENSOR_CROP_X) \ + .with_rgb_sensor_crop_y(cfg.RGB_SENSOR_CROP_Y) \ + .with_rgb_video_size(cfg.RGB_VIDEO_SIZE) \ + .with_rgb_apply_manual_conf(cfg.RGB_APPLY_MANUAL_CONF) \ + .with_rgb_exposure_time(cfg.RGB_EXPOSURE_TIME) \ + .with_rgb_sensor_iso(cfg.RGB_SENSOR_ISO) \ + .with_rgb_wb_manual(cfg.RGB_WB_MANUAL) \ + .with_use_camera_tuning_blob(cfg.USE_CAMERA_TUNING_BLOB) \ + .with_enable_undistort_rgb(cfg.OAK_ENABLE_UNDISTORTED_RGB) \ + .with_pixel_crop_height(cfg.OAK_PIXEL_CROP_HEIGHT) \ + .with_device_id(cfg.OAK_REAR_ID) \ + .build() + outputs = ['cam_rear/image_array'] + if cfg.OAK_REAR_ENABLE_DEPTH_MAP: + outputs += ['cam_rear/depth_array'] + if cfg.OAK_REAR_ENABLE_UNDISTORTED_RGB: + outputs += ['cam_rear/undistorted_rgb'] + if cfg.OAK_OBSTACLE_DETECTION_ENABLED: + outputs += ['cam_rear/obstacle_distances'] + V.add(cam_rear, inputs=[], outputs=outputs, threaded=True) + def add_camera(V, cfg, camera_type): """ @@ -914,6 +984,7 @@ def add_camera(V, cfg, camera_type): .with_use_camera_tuning_blob(cfg.USE_CAMERA_TUNING_BLOB) \ .with_enable_undistort_rgb(cfg.OAK_ENABLE_UNDISTORTED_RGB) \ .with_pixel_crop_height(cfg.OAK_PIXEL_CROP_HEIGHT) \ + .with_device_id(cfg.OAK_ID) \ .build() outputs = ['cam/image_array'] if cfg.OAK_ENABLE_DEPTH_MAP: diff --git a/scripts/listOAKCamera.py b/scripts/listOAKCamera.py new file mode 100644 index 000000000..12bfe17d2 --- /dev/null +++ b/scripts/listOAKCamera.py @@ -0,0 +1,27 @@ +import depthai as dai +from typing import List + +print('Searching for all available devices...\n') +# Query all available devices (USB and POE OAK cameras) +infos: List[dai.DeviceInfo] = dai.DeviceBootloader.getAllAvailableDevices() + +if len(infos) == 0: + print("Couldn't find any available devices.") + exit(-1) + + +for info in infos: + # Converts enum eg. 'XLinkDeviceState.X_LINK_UNBOOTED' to 'UNBOOTED' + state = str(info.state).split('X_LINK_')[1] + + print(f"Found device '{info.name}', MxId: '{info.mxid}', State: '{state}'") + + +# Connect to a specific device. We will just take the first one +print(f"\nBooting the first available camera ({infos[0].name})...") +with dai.Device(dai.Pipeline(), infos[0], usb2Mode=False) as device: + print("Available camera sensors: ", device.getCameraSensorNames()) + calib = device.readCalibration() + eeprom = calib.getEepromData() + print(f"Product name: {eeprom.productName}, board name {eeprom.boardName}") +