diff --git a/.gitignore b/.gitignore index e0c8b5f17..5af1acf43 100644 --- a/.gitignore +++ b/.gitignore @@ -30,7 +30,7 @@ share/python-wheels/ MANIFEST *.idea # DepthAI-Specific -dataset/ +dataset* .fw_cache/ depthai.calib mesh_left.calib diff --git a/calibrate.py b/calibrate.py index 113dbd1fb..1f31f9355 100755 --- a/calibrate.py +++ b/calibrate.py @@ -1,15 +1,23 @@ #!/usr/bin/env python3 import argparse import json +from pydoc import render_doc import shutil import traceback from argparse import ArgumentParser from pathlib import Path import time +from datetime import datetime, timedelta +from collections import deque +from scipy.spatial.transform import Rotation +import traceback +import sys import cv2 +from cv2 import resize import depthai as dai import numpy as np +import copy import depthai_helpers.calibration_utils as calibUtils @@ -18,6 +26,168 @@ red = (255, 0, 0) green = (0, 255, 0) +if hasattr(dai.CameraBoardSocket, 'CAM_A'): + stringToCam = { + 'RGB' : dai.CameraBoardSocket.CAM_A, + 'LEFT' : dai.CameraBoardSocket.CAM_B, + 'RIGHT' : dai.CameraBoardSocket.CAM_C, + 'CAM_A' : dai.CameraBoardSocket.CAM_A, + 'CAM_B' : dai.CameraBoardSocket.CAM_B, + 'CAM_C' : dai.CameraBoardSocket.CAM_C, + 'CAM_D' : dai.CameraBoardSocket.CAM_D, + 'CAM_E' : dai.CameraBoardSocket.CAM_E, + 'CAM_F' : dai.CameraBoardSocket.CAM_F, + 'CAM_G' : dai.CameraBoardSocket.CAM_G, + 'CAM_H' : dai.CameraBoardSocket.CAM_H + } + camToString = { + dai.CameraBoardSocket.CAM_A : 'RGB' , + dai.CameraBoardSocket.CAM_B : 'LEFT' , + dai.CameraBoardSocket.CAM_C : 'RIGHT', + dai.CameraBoardSocket.CAM_A : 'CAM_A', + dai.CameraBoardSocket.CAM_B : 'CAM_B', + dai.CameraBoardSocket.CAM_C : 'CAM_C', + dai.CameraBoardSocket.CAM_D : 'CAM_D', + dai.CameraBoardSocket.CAM_E : 'CAM_E', + dai.CameraBoardSocket.CAM_F : 'CAM_F', + dai.CameraBoardSocket.CAM_G : 'CAM_G', + dai.CameraBoardSocket.CAM_H : 'CAM_H' + } +else: + stringToCam = { + 'RGB': dai.CameraBoardSocket.RGB, + 'LEFT': dai.CameraBoardSocket.LEFT, + 'RIGHT': dai.CameraBoardSocket.RIGHT, + 'AUTO': dai.CameraBoardSocket.AUTO, + 'CAM_A' : dai.CameraBoardSocket.RGB, + 'CAM_B' : dai.CameraBoardSocket.LEFT, + 'CAM_C' : dai.CameraBoardSocket.RIGHT + } + + camToString = { + # dai.CameraBoardSocket.RGB : 'RGB' , + # dai.CameraBoardSocket.LEFT : 'LEFT' , + # dai.CameraBoardSocket.RIGHT : 'RIGHT', + # dai.CameraBoardSocket.AUTO : 'AUTO', + dai.CameraBoardSocket.RGB : 'CAM_A', + dai.CameraBoardSocket.LEFT : 'CAM_B', + dai.CameraBoardSocket.RIGHT : 'CAM_C', + } + + +camToMonoRes = { + 'OV7251' : dai.MonoCameraProperties.SensorResolution.THE_480_P, + 'OV9*82' : dai.MonoCameraProperties.SensorResolution.THE_800_P, + 'OV9282' : dai.MonoCameraProperties.SensorResolution.THE_800_P, + } + +camToRgbRes = { + 'IMX378' : dai.ColorCameraProperties.SensorResolution.THE_4_K, + 'IMX214' : dai.ColorCameraProperties.SensorResolution.THE_4_K, + 'OV9*82' : dai.ColorCameraProperties.SensorResolution.THE_800_P, + 'OV9282' : dai.ColorCameraProperties.SensorResolution.THE_800_P, + 'OV9782' : dai.ColorCameraProperties.SensorResolution.THE_800_P, + 'IMX582' : dai.ColorCameraProperties.SensorResolution.THE_12_MP, + } + +if hasattr(dai.ColorCameraProperties.SensorResolution, 'THE_1200_P'): + camToRgbRes['AR0234'] = dai.ColorCameraProperties.SensorResolution.THE_1200_P + +BOARD_TYPES = [f.stem for f in (Path(__file__).parent / 'resources' / 'boards').glob('*.json')] + +# This settings definition is used to create the command line parser and to compose the settings GUI. +# The 'label' property is displayed in the GUI. If it is missing the setting is not displayed in the GUI. +# The 'help' property is dispayed as a tooltip in the GUI and as a help message in the command line parser. +SETTINGS = { + 'board': { + 'flag': "-brd", 'flag_long': "--board", 'default': None, 'type': str, 'options': BOARD_TYPES, + 'label': "Camera model", 'help': "BW1097, BW1098OBC - Board type from resources/boards/ (not case-sensitive},. Or path to a custom .json board config. Mutually exclusive with [-fv -b -w]" + }, + 'count': { + 'flag': "-c", 'flag_long': "--count", 'default': 3, 'type': int, 'min': 1, 'max': 10, + 'label': "Images per polygon", 'help': "Number of images per polygon to capture. Default: 1." + }, + 'squareSizeCm': { + 'flag': "-s", 'flag_long': "--squareSizeCm", 'default': 2.5, 'type': float, 'min': 2.2, 'step': 0.5, + 'label': "Square size (cm)", 'help': "Square size of calibration pattern used in centimeters. Default: 2.5cm." + }, + 'markerSizeCm': { + 'flag': "-ms", 'flag_long': "--markerSizeCm", 'default': None, 'type': float, 'step': 0.5, + 'label': "Marker size (cm)", 'help': "Marker size in charuco boards." + }, + 'defaultBoard': { + 'flag': "-db", 'flag_long': "--defaultBoard", 'default': False, 'type': bool, 'action': "store_true", + 'help': "Calculates the -ms parameter automatically based on aspect ratio of charuco board in the repository" + }, + 'squaresX': { + 'flag': "-nx", 'flag_long': "--squaresX", 'default': 11, 'type': int, 'min': 1, + 'label': "No. squres X", 'help': "number of chessboard squares in X direction in charuco boards." + }, + 'squaresY': { + 'flag': "-ny", 'flag_long': "--squaresY", 'default': 8, 'type': int, 'min': 1, + 'label': "No. squares Y", 'help': "number of chessboard squares in Y direction in charuco boards." + }, + 'rectifiedDisp': { + 'flag': "-rd", 'flag_long': "--rectifiedDisp", 'default': True, 'type': bool, 'action': "store_false", + 'label': "Show rectified lines", 'help': "Display rectified images with lines drawn for epipolar check" + }, + 'disableRgb': { + 'flag': "-drgb", 'flag_long': "--disableRgb", 'default': False, 'type': bool, 'action': "store_true", + 'label': "Disable RGB calibration", 'help': "Disable rgb camera Calibration" + }, + 'swapLR': { + 'flag': "-slr", 'flag_long': "--swapLR", 'default': False, 'type': bool, 'action': "store_true", + 'label': "Swap left and right cameras", 'help': "Interchange Left and right camera port." + }, + 'mode': { + 'flag': "-m", 'flag_long': "--mode", 'default': ['capture', 'process'], 'nargs': '*', 'type': str, 'multichoice_options': ['capture', 'process'], + 'label': "Mode", 'help': "Space-separated list of calibration options to run. By default, executes the full 'capture process' pipeline. To execute a single step, enter just that step (ex: 'process')." + }, + 'invert_v': { + 'flag': "-iv", 'flag_long': "--invertVertical", 'default': False, 'type': bool, 'action': "store_true", + 'label': "Flip verticaly", 'help': "Invert vertical axis of the camera for the display" + }, + 'invert_h': { + 'flag': "-ih", 'flag_long': "--invertHorizontal", 'default': False, 'type': bool, 'action': "store_true", + 'label': "Flip horizontaly", 'help': "Invert horizontal axis of the camera for the display" + }, + 'outputScaleFactor': { + 'flag': "-osf", 'flag_long': "--outputScaleFactor", 'default': 0.5, 'type': float, 'min': 0.1, 'max': 2.0, 'step': 0.1, + 'label': "Visualization scale factor", 'help': "Set the scaling factor for output visualization. Default: 0.5." + }, + 'maxEpiploarError': { + 'flag': "-ep", 'flag_long': "--maxEpiploarError", 'default': "1.0", 'type': float, + 'help': "Sets the maximum epiploar allowed with rectification" + }, + 'cameraMode': { + 'flag': "-cm", 'flag_long': "--cameraMode", 'default': "perspective", 'type': str, + 'help': "Choose between perspective and Fisheye" + }, + 'rgbLensPosition': { + 'flag': "-rlp", 'flag_long': "--rgbLensPosition", 'default': 135, 'type': int, 'min': 0, 'max': 255, + 'label': "RGB focus", 'help': "Set the manual lens position of the camera for calibration" + }, + 'fps': { + 'flag': "-fps", 'flag_long': "--fps", 'default': 10, 'type': int, 'min': 1, 'max': 30, + 'label': "Camera FPS", 'help': "Set capture FPS for all cameras. Default: 10" + }, + 'captureDelay': { + 'flag': "-cd", 'flag_long': "--captureDelay", 'default': 5, 'type': int, min: 0, + 'label': "Capture delay (s)", 'help': "Choose how much delay to add between pressing the key and capturing the image. Default: 5" + }, + 'debug': { + 'flag': "-d", 'flag_long': "--debug", 'default': False, 'type': bool, 'action': "store_true", + 'help': "Enable debug logs." + }, + 'factoryCalibration': { + 'flag': "-fac", 'flag_long': "--factoryCalibration", 'default': False, 'type': bool, 'action': "store_true", + 'help': "Enable writing to Factory Calibration." + }, + 'noGui': { + 'flag': "-ng", 'flag_long': "--noGui", 'default': False, 'type': bool, 'action': "store_true", + 'help': "Disable GUI." + } +} def create_blank(width, height, rgb_color=(0, 0, 0)): """Create new image(numpy array) filled with certain color in RGB""" @@ -60,60 +230,121 @@ def parse_args(): ''' parser = ArgumentParser( epilog=epilog_text, formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument("-c", "--count", default=1, type=int, required=False, - help="Number of images per polygon to capture. Default: 1.") - parser.add_argument("-s", "--squareSizeCm", type=float, required=True, - help="Square size of calibration pattern used in centimeters. Default: 2.0cm.") - parser.add_argument("-ms", "--markerSizeCm", type=float, required=False, - help="Marker size in charuco boards.") - parser.add_argument("-db", "--defaultBoard", default=False, action="store_true", - help="Calculates the -ms parameter automatically based on aspect ratio of charuco board in the repository") - parser.add_argument("-nx", "--squaresX", default="11", type=int, required=False, - help="number of chessboard squares in X direction in charuco boards.") - parser.add_argument("-ny", "--squaresY", default="8", type=int, required=False, - help="number of chessboard squares in Y direction in charuco boards.") - parser.add_argument("-rd", "--rectifiedDisp", default=True, action="store_false", - help="Display rectified images with lines drawn for epipolar check") - parser.add_argument("-drgb", "--disableRgb", default=False, action="store_true", - help="Disable rgb camera Calibration") - parser.add_argument("-slr", "--swapLR", default=False, action="store_true", - help="Interchange Left and right camera port.") - parser.add_argument("-m", "--mode", default=['capture', 'process'], nargs='*', type=str, required=False, - help="Space-separated list of calibration options to run. By default, executes the full 'capture process' pipeline. To execute a single step, enter just that step (ex: 'process').") - parser.add_argument("-brd", "--board", default=None, type=str, required=True, - help="BW1097, BW1098OBC - Board type from resources/boards/ (not case-sensitive). " - "Or path to a custom .json board config. Mutually exclusive with [-fv -b -w]") - parser.add_argument("-iv", "--invertVertical", dest="invert_v", default=False, action="store_true", - help="Invert vertical axis of the camera for the display") - parser.add_argument("-ih", "--invertHorizontal", dest="invert_h", default=False, action="store_true", - help="Invert horizontal axis of the camera for the display") - parser.add_argument("-ep", "--maxEpiploarError", default="1.0", type=float, required=False, - help="Sets the maximum epiploar allowed with rectification") - parser.add_argument("-cm", "--cameraMode", default="perspective", type=str, - required=False, help="Choose between perspective and Fisheye") - parser.add_argument("-rlp", "--rgbLensPosition", default=135, type=int, - required=False, help="Set the manual lens position of the camera for calibration") - parser.add_argument("-fps", "--fps", default=10, type=int, - required=False, help="Set capture FPS for all cameras. Default: %(default)s") - parser.add_argument("-cd", "--captureDelay", default=5, type=int, - required=False, help="Choose how much delay to add between pressing the key and capturing the image. Default: %(default)s") - parser.add_argument("-d", "--debug", default=False, action="store_true", help="Enable debug logs.") - parser.add_argument("-fac", "--factoryCalibration", default=False, action="store_true", - help="Enable writing to Factory Calibration.") + for name, properties in SETTINGS.items(): + if properties['type'] == bool: + parser.add_argument(properties['flag'], properties['flag_long'], default=properties['default'], action=properties['action'], dest=name, help=properties['help']) + elif properties['type'] == str: + parser.add_argument(properties['flag'], properties['flag_long'], default=properties['default'], nargs=properties.get('nargs'), dest=name, type=properties['type'], help=properties['help']) + else: + parser.add_argument(properties['flag'], properties['flag_long'], default=properties['default'], dest=name, type=properties['type'], help=properties['help']) + options = parser.parse_args() + print(options) + # Set some extra defaults, `-brd` would override them if options.markerSizeCm is None: - if options.defaultBoard: - options.markerSizeCm = options.squareSizeCm * 0.75 - else: - raise argparse.ArgumentError(options.markerSizeCm, "-ms / --markerSizeCm needs to be provided (you can use -db / --defaultBoard if using calibration board from this repository or calib.io to calculate -ms automatically)") + options.markerSizeCm = options.squareSizeCm * 0.75 if options.squareSizeCm < 2.2: raise argparse.ArgumentTypeError("-s / --squareSizeCm needs to be greater than 2.2 cm") return options +class HostSync: + def __init__(self, deltaMilliSec): + self.arrays = {} + self.arraySize = 15 + self.recentFrameTs = None + self.deltaMilliSec = timedelta(milliseconds=deltaMilliSec) + # self.synced = queue.Queue() + + def remove(self, t1): + return timedelta(milliseconds=500) < (self.recentFrameTs - t1) + + def add_msg(self, name, data, ts): + if name not in self.arrays: + self.arrays[name] = deque(maxlen=self.arraySize) + # Add msg to array + self.arrays[name].appendleft({'data': data, 'timestamp': ts}) + if self.recentFrameTs == None or self.recentFrameTs - ts: + self.recentFrameTs = ts + # print(len(self.arrays[name])) + # print(f'Added Msgs typ {name}') + # print(ts) + # for name, arr in self.arrays.items(): + # for i, obj in enumerate(arr): + # if self.remove(obj['timestamp']): + # arr.remove(obj) + # else: break + + def clearQueues(self): + print('Clearing Queues...') + for name, msgList in self.arrays.items(): + self.arrays[name].clear() + print(len(self.arrays[name])) + + def get_synced(self): + synced = {} + for name, msgList in self.arrays.items(): + # print('len(pivotM---------sgList)') + # print(len(pivotMsgList)) + + if len(msgList) != self.arraySize: + return False + + for name, pivotMsgList in self.arrays.items(): + print('len(pivotMsgList)') + print(len(pivotMsgList)) + pivotMsgListDuplicate = pivotMsgList + while pivotMsgListDuplicate: + currPivot = pivotMsgListDuplicate.popleft() + synced[name] = currPivot['data'] + + for subName, msgList in self.arrays.items(): + print(f'len of {subName}') + print(len(msgList)) + if name == subName: + continue + msgListDuplicate = msgList.copy() + while msgListDuplicate: + print(f'---len of dup {subName} is {len(msgListDuplicate)}') + currMsg = msgListDuplicate.popleft() + time_diff = abs(currMsg['timestamp'] - currPivot['timestamp']) + print(f'---Time diff is {time_diff} and delta is {self.deltaMilliSec}') + if time_diff < self.deltaMilliSec: + print(f'--------Adding {subName} to sync. Messages left is {len(msgListDuplicate)}') + synced[subName] = currMsg['data'] + break + print(f'Size of Synced is {len(synced)} amd array size is {len(self.arrays)}') + if len(synced) == len(self.arrays): + self.clearQueues() + return synced + + # raise SystemExit(1) + self.clearQueues() + return False + + + """ for name, arr in self.arrays.items(): + for i, obj in enumerate(arr): + time_diff = abs(obj['timestamp'] - self.recentFrameTs) + print("Time diff for {0} is {1} milliseconds".format(name ,time_diff.total_seconds() * 1000)) + # 20ms since we add rgb/depth frames at 30FPS => 33ms. If + # time difference is below 20ms, it's considered as synced + if time_diff < self.deltaMilliSec: + synced[name] = obj['data'] + # print(f"{name}: {i}/{len(arr)}") + break + print(f'Size of Synced is {len(synced)} amd array size is {len(self.arrays)}') + if len(synced) == len(self.arrays): + for name, arr in self.arrays.items(): + for i, obj in enumerate(arr): + if self.remove(obj['timestamp']): + arr.remove(obj) + else: break + return synced + return False """ class Main: output_scale_factor = 0.5 @@ -127,7 +358,11 @@ class Main: def __init__(self): global debug self.args = parse_args() + self.guess_board_config() + if not self.args.noGui: + self.settings_gui() debug = self.args.debug + self.output_scale_factor = self.args.outputScaleFactor self.aruco_dictionary = cv2.aruco.Dictionary_get( cv2.aruco.DICT_4X4_1000) self.focus_value = self.args.rgbLensPosition @@ -140,6 +375,11 @@ def __init__(self): 'Board config not found: {}'.format(board_path)) with open(board_path) as fp: self.board_config = json.load(fp) + self.board_config = self.board_config['board_config'] + self.board_config_backup = self.board_config + else: + raise ValueError(f'Please specify the board config file using --board argument.') + # TODO: set the total images # random polygons for count self.total_images = self.args.count * \ @@ -147,11 +387,30 @@ def __init__(self): if debug: print("Using Arguments=", self.args) - if self.args.board.upper() == 'OAK-D-LITE': - raise Exception( - "OAK-D-Lite Calibration is not supported on main yet. Please use `lite_calibration` branch to calibrate your OAK-D-Lite!!") + # if self.args.board.upper() == 'OAK-D-LITE': + # raise Exception( + # "OAK-D-Lite Calibration is not supported on main yet. Please use `lite_calibration` branch to calibrate your OAK-D-Lite!!") + + self.device = dai.Device() + cameraProperties = self.device.getConnectedCameraFeatures() + for properties in cameraProperties: + for in_cam in self.board_config['cameras'].keys(): + cam_info = self.board_config['cameras'][in_cam] + if properties.socket == stringToCam[in_cam]: + self.board_config['cameras'][in_cam]['sensorName'] = properties.sensorName + print('Cam: {} and focus: {}'.format(cam_info['name'], properties.hasAutofocus)) + self.board_config['cameras'][in_cam]['hasAutofocus'] = properties.hasAutofocus + # self.auto_checkbox_dict[cam_info['name'] + '-Camera-connected'].check() + break + pipeline = self.create_pipeline() - self.device = dai.Device(pipeline) + self.device.startPipeline(pipeline) + + self.camera_queue = {} + for config_cam in self.board_config['cameras']: + cam = self.board_config['cameras'][config_cam] + self.camera_queue[cam['name']] = self.device.getOutputQueue(cam['name'], 1, False) + """ cameraProperties = self.device.getConnectedCameraProperties() for properties in cameraProperties: if properties.sensorName == 'OV7251': @@ -159,10 +418,108 @@ def __init__(self): "OAK-D-Lite Calibration is not supported on main yet. Please use `lite_calibration` branch to calibrate your OAK-D-Lite!!") self.device.startPipeline(pipeline)""" - self.left_camera_queue = self.device.getOutputQueue("left", 30, True) - self.right_camera_queue = self.device.getOutputQueue("right", 30, True) - if not self.args.disableRgb: - self.rgb_camera_queue = self.device.getOutputQueue("rgb", 30, True) + # self.left_camera_queue = self.device.getOutputQueue("left", 30, True) + # self.right_camera_queue = self.device.getOutputQueue("right", 30, True) + # if not self.args.disableRgb: + # self.rgb_camera_queue = self.device.getOutputQueue("rgb", 30, True) + + def guess_board_config(self): + if self.args.board: # if board is set with flags then use that + return + + product_name = None + try: + with dai.Device() as device: + try: + product_name = device.readFactoryCalibration().eepromToJson().get('productName') + except: pass + if not product_name: + try: + product_name = device.readCalibration().eepromToJson().get('productName') + except: pass + except: pass + + if not product_name: + print("Couldn't determine product name from EEPROM") + return + + product_name = product_name.upper().replace(' ', '-') + for board_type in BOARD_TYPES: + if board_type in product_name: + self.args.board = board_type + return + + print("Couldn't determine board type from product name: {}".format(product_name)) + + + def settings_gui(self): + from PyQt5 import QtCore, QtWidgets + app = QtWidgets.QApplication([]) + window = QtWidgets.QDialog() + window.setWindowFlags(window.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint) + window.setWindowTitle('DepthAI Calibration Settings') + # Define the layout + layout = QtWidgets.QGridLayout() + layout.columnCount = 2 + for i, (name, properties) in enumerate(SETTINGS.items()): + label = properties.get('label') + if label is None: + continue + if properties['type'] == bool: + widget = QtWidgets.QCheckBox() + widget.setChecked(getattr(self.args, name)) + widget.stateChanged.connect(lambda v, n=name: setattr(self.args, n, v != 0)) + elif properties['type'] == int: + widget = QtWidgets.QSpinBox() + widget.setMinimum(properties.get('min', 0)) + widget.setMaximum(properties.get('max', 100)) + widget.setValue(getattr(self.args, name)) + widget.valueChanged.connect(lambda v, n=name: setattr(self.args, n, v)) + elif properties['type'] == float: + widget = QtWidgets.QDoubleSpinBox() + widget.setMinimum(properties.get('min', 0)) + widget.setMaximum(properties.get('max', 100)) + widget.setSingleStep(properties.get('step', 0.1)) + widget.setValue(getattr(self.args, name)) + widget.valueChanged.connect(lambda v, n=name: setattr(self.args, n, v)) + elif properties['type'] == str: + if 'options' in properties: + widget = QtWidgets.QComboBox() + widget.addItems(properties['options']) + widget.setCurrentText(getattr(self.args, name)) + widget.currentTextChanged.connect(lambda v, n=name: setattr(self.args, n, v)) + elif 'multichoice_options' in properties: + # create a row of checkboxes + widget = QtWidgets.QWidget() + widget.setLayout(QtWidgets.QHBoxLayout()) + checkboxes = {} + for choice in properties['multichoice_options']: + checkbox = QtWidgets.QCheckBox(choice) + checkbox.setChecked(choice in getattr(self.args, name)) + checkbox.stateChanged.connect(lambda v, n=name, c=choice: setattr(self.args, n, [n for n, c in checkboxes.items() if c.isChecked()])) + checkboxes[choice] = checkbox + widget.layout().addWidget(checkbox) + else: + widget = QtWidgets.QLineEdit() + widget.setText(getattr(self.args, name)) + widget.textChanged.connect(lambda v, n=name: setattr(self.args, n, v)) + else: + continue + + label_widget = QtWidgets.QLabel(label) + label_widget.setToolTip(properties.get('help')) + widget.setToolTip(properties.get('help')) + layout.addWidget(label_widget, i, 0) + layout.addWidget(widget, i, 1) + + button = QtWidgets.QPushButton('Start Calibration') + button.clicked.connect(lambda: window.accept()) + layout.addWidget(button, i + 1, 0, 1, 2) + + window.reject = lambda: sys.exit(0) + window.setLayout(layout) + window.show() + app.exec() def is_markers_found(self, frame): marker_corners, _, _ = cv2.aruco.detectMarkers( @@ -185,53 +542,50 @@ def test_camera_orientation(self, frame_l, frame_r): if left_corner[0][0] - right_corner[0][0] < 0: return False return True - + def create_pipeline(self): pipeline = dai.Pipeline() - cam_left = pipeline.createMonoCamera() - cam_right = pipeline.createMonoCamera() - - xout_left = pipeline.createXLinkOut() - xout_right = pipeline.createXLinkOut() - - if self.args.swapLR: - cam_left.setBoardSocket(dai.CameraBoardSocket.RIGHT) - cam_right.setBoardSocket(dai.CameraBoardSocket.LEFT) - else: - cam_left.setBoardSocket(dai.CameraBoardSocket.LEFT) - cam_right.setBoardSocket(dai.CameraBoardSocket.RIGHT) - - cam_left.setResolution( - dai.MonoCameraProperties.SensorResolution.THE_800_P) - cam_left.setFps(self.args.fps) - - cam_right.setResolution( - dai.MonoCameraProperties.SensorResolution.THE_800_P) - cam_right.setFps(self.args.fps) - - xout_left.setStreamName("left") - cam_left.out.link(xout_left.input) + fps = 10 + for cam_id in self.board_config['cameras']: + cam_info = self.board_config['cameras'][cam_id] + if cam_info['type'] == 'mono': + cam_node = pipeline.createMonoCamera() + xout = pipeline.createXLinkOut() - xout_right.setStreamName("right") - cam_right.out.link(xout_right.input) + cam_node.setBoardSocket(stringToCam[cam_id]) + cam_node.setResolution(camToMonoRes[cam_info['sensorName']]) + cam_node.setFps(fps) - if not self.args.disableRgb: - rgb_cam = pipeline.createColorCamera() - rgb_cam.setResolution( - dai.ColorCameraProperties.SensorResolution.THE_4_K) - rgb_cam.setInterleaved(False) - rgb_cam.setBoardSocket(dai.CameraBoardSocket.RGB) - rgb_cam.setIspScale(1, 3) - rgb_cam.initialControl.setManualFocus(self.focus_value) - rgb_cam.setFps(self.args.fps) - - xout_rgb_isp = pipeline.createXLinkOut() - xout_rgb_isp.setStreamName("rgb") - rgb_cam.isp.link(xout_rgb_isp.input) + xout.setStreamName(cam_info['name']) + cam_node.out.link(xout.input) + else: + cam_node = pipeline.createColorCamera() + xout = pipeline.createXLinkOut() + + cam_node.setBoardSocket(stringToCam[cam_id]) + cam_node.setResolution(camToRgbRes[cam_info['sensorName']]) + cam_node.setFps(fps) + + xout.setStreamName(cam_info['name']) + cam_node.isp.link(xout.input) + if cam_info['sensorName'] == "OV9*82": + cam_node.initialControl.setSharpness(0) + cam_node.initialControl.setLumaDenoise(0) + cam_node.initialControl.setChromaDenoise(4) + + if cam_info['hasAutofocus']: + cam_node.initialControl.setManualFocus(self.focus_value) + + controlIn = pipeline.createXLinkIn() + controlIn.setStreamName(cam_info['name'] + '-control') + controlIn.out.link(cam_node.inputControl) + xout.input.setBlocking(False) + xout.input.setQueueSize(1) return pipeline + def parse_frame(self, frame, stream_name): if not self.is_markers_found(frame): return False @@ -274,7 +628,7 @@ def show(position, text): def show_failed_capture_frame(self): width, height = int( self.width * self.output_scale_factor), int(self.height * self.output_scale_factor) - info_frame = np.zeros((height, width, 3), np.uint8) + info_frame = np.zeros((self.height, self.width, 3), np.uint8) print("py: Capture failed, unable to find chessboard! Fix position and press spacebar again") def show(position, text): @@ -319,6 +673,189 @@ def empty_calibration(self, calib: dai.CalibrationHandler): for attr in ["boardName", "boardRev"]: if getattr(data, attr): return False return True + + def capture_images_sync(self): + finished = False + capturing = False + start_timer = False + timer = self.args.captureDelay + prev_time = None + curr_time = None + + self.display_name = "Image Window" + syncCollector = HostSync(60) + while not finished: + currImageList = {} + for key in self.camera_queue.keys(): + frameMsg = self.camera_queue[key].get() + + # print(f'Timestamp of {key} is {frameMsg.getTimestamp()}') + + syncCollector.add_msg(key, frameMsg, frameMsg.getTimestamp()) + gray_frame = None + if frameMsg.getType() == dai.RawImgFrame.Type.RAW8: + gray_frame = frameMsg.getCvFrame() + else: + gray_frame = cv2.cvtColor(frameMsg.getCvFrame(), cv2.COLOR_BGR2GRAY) + currImageList[key] = gray_frame + # print(gray_frame.shape) + + resizeHeight = 0 + resizeWidth = 0 + for name, imgFrame in currImageList.items(): + + # print(f'original Shape of {name} is {imgFrame.shape}' ) + currImageList[name] = cv2.resize( + imgFrame, (0, 0), fx=self.output_scale_factor, fy=self.output_scale_factor) + + height, width = currImageList[name].shape + + widthRatio = resizeWidth / width + heightRatio = resizeHeight / height + + + # if widthRatio > 1.0 and heightRatio > 1.0 and widthRatio < 1.2 and heightRatio < 1.2: + # continue + + + if (widthRatio > 0.8 and heightRatio > 0.8 and widthRatio <= 1.0 and heightRatio <= 1.0) or (widthRatio > 1.2 and heightRatio > 1.2) or (resizeHeight == 0): + resizeWidth = width + resizeHeight = height + # elif widthRatio > 1.2 and heightRatio > 1.2: + + + # if width < resizeWidth: + # resizeWidth = width + # if height > resizeHeight: + # resizeHeight = height + + # print(f'Scale Shape is {resizeWidth}x{resizeHeight}' ) + + combinedImage = None + for name, imgFrame in currImageList.items(): + height, width = imgFrame.shape + if width > resizeWidth and height > resizeHeight: + imgFrame = cv2.resize( + imgFrame, (0, 0), fx= resizeWidth / width, fy= resizeWidth / width) + + # print(f'final_scaledImageSize is {imgFrame.shape}') + if self.polygons is None: + self.height, self.width = imgFrame.shape + print(self.height, self.width) + self.polygons = calibUtils.setPolygonCoordinates( + self.height, self.width) + + localPolygon = np.array([self.polygons[self.current_polygon]]) + print(localPolygon.shape) + print(localPolygon) + if self.images_captured_polygon == 1: + # perspectiveRotationMatrix = Rotation.from_euler('z', 45, degrees=True).as_matrix() + angle = 30. + theta = (angle/180.) * np.pi + perspectiveRotationMatrix = np.array([[np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)]]) + + localPolygon = np.matmul(localPolygon, perspectiveRotationMatrix).astype(np.int32) + localPolygon[0][:, 1] += abs(localPolygon.min()) + if self.images_captured_polygon == 2: + # perspectiveRotationMatrix = Rotation.from_euler('z', -45, degrees=True).as_matrix() + angle = -30. + theta = (angle/180.) * np.pi + perspectiveRotationMatrix = np.array([[np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)]]) + localPolygon = np.matmul(localPolygon, perspectiveRotationMatrix).astype(np.int32) + localPolygon[0][:, 1] += (height - abs(localPolygon[0][:, 1].max())) + localPolygon[0][:, 0] += abs(localPolygon[0][:, 1].min()) + + print(localPolygon) + print(localPolygon.shape) + cv2.polylines( + imgFrame, localPolygon, + True, (0, 0, 255), 4) + + # TODO(Sachin): Add this back with proper alignment + # cv2.putText( + # imgFrame, + # "Polygon Position: {}. Captured {} of {} {} images".format( + # self.current_polygon + 1, self.images_captured, self.total_images, name), + # (0, 700), cv2.FONT_HERSHEY_TRIPLEX, 1.0, (255, 0, 0)) + + height, width = imgFrame.shape + height_offset = (resizeHeight - height)//2 + width_offset = (resizeWidth - width)//2 + subImage = np.pad(imgFrame, ((height_offset, height_offset), (width_offset,width_offset)), 'constant', constant_values=0) + if combinedImage is None: + combinedImage = subImage + else: + combinedImage = np.hstack((combinedImage, subImage)) + + key = cv2.waitKey(1) + if key == 27 or key == ord("q"): + print("py: Calibration has been interrupted!") + raise SystemExit(0) + elif key == ord(" "): + start_timer = True + prev_time = time.time() + timer = self.args.captureDelay + + if start_timer == True: + curr_time = time.time() + if curr_time - prev_time >= 1: + prev_time = curr_time + timer = timer - 1 + if timer <= 0 and start_timer == True: + start_timer = False + capturing = True + print('Start capturing...') + + image_shape = combinedImage.shape + cv2.putText(combinedImage, str(timer), + (image_shape[1]//2, image_shape[0]//2), font, + 7, (0, 255, 255), + 4, cv2.LINE_AA) + + cv2.imshow(self.display_name, combinedImage) + tried = {} + allPassed = True + + if capturing: + syncedMsgs = syncCollector.get_synced() + if syncedMsgs == False: + for key in self.camera_queue.keys(): + self.camera_queue[key].getAll() + continue + for name, frameMsg in syncedMsgs.items(): + tried[name] = self.parse_frame(frameMsg.getCvFrame(), name) + allPassed = allPassed and tried[name] + + if allPassed: + if not self.images_captured: + leftStereo = self.board_config['cameras'][self.board_config['stereo_config']['left_cam']]['name'] + rightStereo = self.board_config['cameras'][self.board_config['stereo_config']['right_cam']]['name'] + print(f'Left Camera of stereo is {leftStereo} and right Camera of stereo is {rightStereo}') + # if not self.test_camera_orientation(syncedMsgs[leftStereo].getCvFrame(), syncedMsgs[rightStereo].getCvFrame()): + # self.show_failed_orientation() + + self.images_captured += 1 + self.images_captured_polygon += 1 + capturing = False + else: + self.show_failed_capture_frame() + capturing = False + + # print(f'self.images_captured_polygon {self.images_captured_polygon}') + # print(f'self.current_polygon {self.current_polygon}') + # print(f'len(self.polygons) {len(self.polygons)}') + + if self.images_captured_polygon == self.args.count: + self.images_captured_polygon = 0 + self.current_polygon += 1 + + if self.current_polygon == len(self.polygons): + finished = True + cv2.destroyAllWindows() + break + def capture_images(self): finished = False @@ -522,129 +1059,171 @@ def capture_images(self): def calibrate(self): print("Starting image processing") - cal_data = calibUtils.StereoCalibration() + stereo_calib = calibUtils.StereoCalibration() dest_path = str(Path('resources').absolute()) - self.args.cameraMode = 'perspective' # hardcoded for now + # self.args.cameraMode = 'perspective' # hardcoded for now try: - epiploar_error, epiploar_error_rRgb, calibData = cal_data.calibrate(self.dataset_path, self.args.squareSizeCm, - self.args.markerSizeCm, self.args.squaresX, self.args.squaresY, self.args.cameraMode, not self.args.disableRgb, self.args.rectifiedDisp) - if epiploar_error > self.args.maxEpiploarError: - image = create_blank(900, 512, rgb_color=red) - text = "High L-r epiploar_error: " + str(epiploar_error) - cv2.putText(image, text, (10, 250), font, 2, (0, 0, 0), 2) - text = "Requires Recalibration " - cv2.putText(image, text, (10, 300), font, 2, (0, 0, 0), 2) - - cv2.imshow("Result Image", image) - cv2.waitKey(0) - print("Requires Recalibration.....!!") - raise SystemExit(1) - elif epiploar_error_rRgb is not None and epiploar_error_rRgb > self.args.maxEpiploarError: - image = create_blank(900, 512, rgb_color=red) - text = "High RGB-R epiploar_error: " + str(epiploar_error_rRgb) - cv2.putText(image, text, (10, 250), font, 2, (0, 0, 0), 2) - text = "Requires Recalibration " - cv2.putText(image, text, (10, 300), font, 2, (0, 0, 0), 2) - - cv2.imshow("Result Image", image) - cv2.waitKey(0) - print("Requires Recalibration.....!!") - raise SystemExit(1) - left = dai.CameraBoardSocket.LEFT - right = dai.CameraBoardSocket.RIGHT - if self.args.swapLR: - left = dai.CameraBoardSocket.RIGHT - right = dai.CameraBoardSocket.LEFT + # stereo_calib = StereoCalibration() + print("Starting image processingxxccx") + print(self.args.squaresX) + status, result_config = stereo_calib.calibrate( + self.board_config, + self.dataset_path, + self.args.squareSizeCm, + self.args.markerSizeCm, + self.args.squaresX, + self.args.squaresY, + self.args.cameraMode, + self.args.rectifiedDisp) # Turn off enable disp rectify calibration_handler = self.device.readCalibration() + try: + if self.empty_calibration(calibration_handler): + calibration_handler.setBoardInfo(self.board_config['name'], self.board_config['revision']) + except Exception as e: + print('Device closed in exception..' ) + self.device.close() + print(e) + print(traceback.format_exc()) + raise SystemExit(1) - # calibration_handler.setBoardInfo(self.board_config['board_config']['name'], self.board_config['board_config']['revision']) - # Set board name / revision only if calibration is empty - if self.empty_calibration(calibration_handler): - calibration_handler.setBoardInfo(self.board_config['board_config']['name'], self.board_config['board_config']['revision']) - - calibration_handler.setCameraIntrinsics(left, calibData[2], 1280, 800) - calibration_handler.setCameraIntrinsics(right, calibData[3], 1280, 800) - measuredTranslation = [ - - self.board_config['board_config']['left_to_right_distance_cm'], 0.0, 0.0] - calibration_handler.setCameraExtrinsics( - left, right, calibData[5], calibData[6], measuredTranslation) - - calibration_handler.setDistortionCoefficients(left, calibData[9] ) - calibration_handler.setDistortionCoefficients(right, calibData[10]) - - calibration_handler.setFov(left, self.board_config['board_config']['left_fov_deg']) - calibration_handler.setFov(right, self.board_config['board_config']['left_fov_deg']) - - calibration_handler.setStereoLeft( - left, calibData[0]) - calibration_handler.setStereoRight( - right, calibData[1]) - - if not self.args.disableRgb: - calibration_handler.setCameraIntrinsics(dai.CameraBoardSocket.RGB, calibData[4], 1920, 1080) - calibration_handler.setDistortionCoefficients(dai.CameraBoardSocket.RGB, calibData[11]) - calibration_handler.setFov(dai.CameraBoardSocket.RGB, self.board_config['board_config']['rgb_fov_deg']) - calibration_handler.setLensPosition(dai.CameraBoardSocket.RGB, self.focus_value) - - measuredTranslation = [ - self.board_config['board_config']['left_to_right_distance_cm'] - self.board_config['board_config']['left_to_rgb_distance_cm'], 0.0, 0.0] - calibration_handler.setCameraExtrinsics( - right, dai.CameraBoardSocket.RGB, calibData[7], calibData[8], measuredTranslation) - - resImage = None - if not self.device.isClosed(): + # calibration_handler.set + error_text = [] + + for camera in result_config['cameras'].keys(): + cam_info = result_config['cameras'][camera] + # log_list.append(self.ccm_selected[cam_info['name']]) + + color = green + reprojection_error_threshold = 1.0 + if cam_info['size'][1] > 720: + print(cam_info['size'][1]) + reprojection_error_threshold = reprojection_error_threshold * cam_info['size'][1] / 720 + + if cam_info['name'] == 'rgb': + reprojection_error_threshold = 3 + print('Reprojection error threshold -> {}'.format(reprojection_error_threshold)) + + if cam_info['reprojection_error'] > reprojection_error_threshold: + color = red + error_text.append("high Reprojection Error") + text = cam_info['name'] + ' Reprojection Error: ' + format(cam_info['reprojection_error'], '.6f') + print(text) + # pygame_render_text(self.screen, text, (vis_x, vis_y), color, 30) + + calibration_handler.setDistortionCoefficients(stringToCam[camera], cam_info['dist_coeff']) + calibration_handler.setCameraIntrinsics(stringToCam[camera], cam_info['intrinsics'], cam_info['size'][0], cam_info['size'][1]) + calibration_handler.setFov(stringToCam[camera], cam_info['hfov']) + + if cam_info['hasAutofocus']: + calibration_handler.setLensPosition(stringToCam[camera], self.focus_value) + + # log_list.append(self.focusSigma[cam_info['name']]) + # log_list.append(cam_info['reprojection_error']) + # color = green/// + # epErrorZText + if 'extrinsics' in cam_info: + + if 'to_cam' in cam_info['extrinsics']: + right_cam = result_config['cameras'][cam_info['extrinsics']['to_cam']]['name'] + left_cam = cam_info['name'] + + epipolar_threshold = 0.6 + + if cam_info['extrinsics']['epipolar_error'] > epipolar_threshold: + color = red + error_text.append("high epipolar error between " + left_cam + " and " + right_cam) + elif cam_info['extrinsics']['epipolar_error'] == -1: + color = red + error_text.append("Epiploar validation failed between " + left_cam + " and " + right_cam) + + # log_list.append(cam_info['extrinsics']['epipolar_error']) + # text = left_cam + "-" + right_cam + ' Avg Epipolar error: ' + format(cam_info['extrinsics']['epipolar_error'], '.6f') + # pygame_render_text(self.screen, text, (vis_x, vis_y), color, 30) + # vis_y += 30 + specTranslation = np.array([cam_info['extrinsics']['specTranslation']['x'], cam_info['extrinsics']['specTranslation']['y'], cam_info['extrinsics']['specTranslation']['z']], dtype=np.float32) + + calibration_handler.setCameraExtrinsics(stringToCam[camera], stringToCam[cam_info['extrinsics']['to_cam']], cam_info['extrinsics']['rotation_matrix'], cam_info['extrinsics']['translation'], specTranslation) + if result_config['stereo_config']['left_cam'] == camera and result_config['stereo_config']['right_cam'] == cam_info['extrinsics']['to_cam']: + calibration_handler.setStereoLeft(stringToCam[camera], result_config['stereo_config']['rectification_left']) + calibration_handler.setStereoRight(stringToCam[cam_info['extrinsics']['to_cam']], result_config['stereo_config']['rectification_right']) + elif result_config['stereo_config']['left_cam'] == cam_info['extrinsics']['to_cam'] and result_config['stereo_config']['right_cam'] == camera: + calibration_handler.setStereoRight(stringToCam[camera], result_config['stereo_config']['rectification_right']) + calibration_handler.setStereoLeft(stringToCam[cam_info['extrinsics']['to_cam']], result_config['stereo_config']['rectification_left']) + + if len(error_text) == 0: + print('Flashing Calibration data into ') + # print(calib_dest_path) + + eeepromData = calibration_handler.getEepromData() + print(f'EEPROM VERSION being flashed is -> {eeepromData.version}') + eeepromData.version = 7 + print(f'EEPROM VERSION being flashed is -> {eeepromData.version}') mx_serial_id = self.device.getDeviceInfo().getMxId() calib_dest_path = dest_path + '/' + mx_serial_id + '.json' calibration_handler.eepromToJsonFile(calib_dest_path) - is_write_succesful = False - - try: - if self.args.factoryCalibration: + # try: + self.device.flashCalibration2(calibration_handler) + is_write_succesful = True + # except RuntimeError as e: + # is_write_succesful = False + # print(e) + # print("Writing in except...") + # is_write_succesful = self.device.flashCalibration2(calibration_handler) + + if self.args.factoryCalibration: + try: self.device.flashFactoryCalibration(calibration_handler) - is_write_succesful = self.device.flashCalibration( - calibration_handler) - except: - print("Writing in except...") - - if self.args.factoryCalibration: - self.device.flashFactoryCalibration(calibration_handler) - is_write_succesful = self.device.flashCalibration( - calibration_handler) + is_write_factory_sucessful = True + except RuntimeError: + print("flashFactoryCalibration Failed...") + is_write_factory_sucessful = False if is_write_succesful: + + + """ eepromUnionData = {} + calibHandler = self.device.readCalibration2() + eepromUnionData['calibrationUser'] = calibHandler.eepromToJson() + + calibHandler = self.device.readFactoryCalibration() + eepromUnionData['calibrationFactory'] = calibHandler.eepromToJson() + + eepromUnionData['calibrationUserRaw'] = self.device.readCalibrationRaw() + eepromUnionData['calibrationFactoryRaw'] = self.device.readFactoryCalibrationRaw() + with open(calib_dest_path, "w") as outfile: + json.dump(eepromUnionData, outfile, indent=4) """ + self.device.close() + text = "EEPROM written succesfully" resImage = create_blank(900, 512, rgb_color=green) - text = "Calibration Succesful with" - cv2.putText(resImage, text, (10, 250), - font, 2, (0, 0, 0), 2) - text = "Epipolar error of " + str(epiploar_error) - cv2.putText(resImage, text, (10, 300), - font, 2, (0, 0, 0), 2) + cv2.putText(resImage, text, (10, 250), font, 2, (0, 0, 0), 2) + cv2.imshow("Result Image", resImage) + cv2.waitKey(0) + else: + self.device.close() + text = "EEPROM write Failed!!" resImage = create_blank(900, 512, rgb_color=red) - text = "EEprom Write Failed!! " + str(epiploar_error) - cv2.putText(resImage, text, (10, 250), - font, 2, (0, 0, 0), 2) - text = "Try recalibrating !!" - cv2.putText(resImage, text, (10, 300), - font, 2, (0, 0, 0), 2) - - + cv2.putText(resImage, text, (10, 250), font, 2, (0, 0, 0), 2) + cv2.imshow("Result Image", resImage) + cv2.waitKey(0) + # return (False, "EEPROM write Failed!!") + else: - # calib_dest_path = dest_path + '/depthai_calib.json' - # calibration_handler.eepromToJsonFile(calib_dest_path) - resImage = create_blank(900, 512, rgb_color=red) - text = "Calibratin succesful. " + str(epiploar_error) - cv2.putText(resImage, text, (10, 250), font, 2, (0, 0, 0), 2) - # text = "Device not found to write to EEPROM" - # cv2.putText(resImage, text, (10, 300), font, 2, (0, 0, 0), 2) - - if resImage is not None: - cv2.imshow("Result Image", resImage) - cv2.waitKey(0) - except AssertionError as e: - print("[ERROR] " + str(e)) + self.device.close() + print(error_text) + for text in error_text: + # text = error_text[0] + resImage = create_blank(900, 512, rgb_color=red) + cv2.putText(resImage, text, (10, 250), font, 2, (0, 0, 0), 2) + cv2.imshow("Result Image", resImage) + cv2.waitKey(0) + except Exception as e: + self.device.close() + print('Device closed in exception..' ) + print(e) + print(traceback.format_exc()) raise SystemExit(1) def run(self): @@ -652,16 +1231,16 @@ def run(self): try: if Path('dataset').exists(): shutil.rmtree('dataset/') - Path("dataset/left").mkdir(parents=True, exist_ok=True) - Path("dataset/right").mkdir(parents=True, exist_ok=True) - if not self.args.disableRgb: - Path("dataset/rgb").mkdir(parents=True, exist_ok=True) + for cam_id in self.board_config['cameras']: + name = self.board_config['cameras'][cam_id]['name'] + Path("dataset/{}".format(name)).mkdir(parents=True, exist_ok=True) + except OSError: traceback.print_exc() print("An error occurred trying to create image dataset directories!") raise SystemExit(1) self.show_info_frame() - self.capture_images() + self.capture_images_sync() self.dataset_path = str(Path("dataset").absolute()) if 'process' in self.args.mode: self.calibrate() diff --git a/depthai_helpers/calibration_utils.py b/depthai_helpers/calibration_utils.py index ff9a77617..944a0d267 100644 --- a/depthai_helpers/calibration_utils.py +++ b/depthai_helpers/calibration_utils.py @@ -5,12 +5,16 @@ import os import shutil import numpy as np -import re +from scipy.spatial.transform import Rotation import time import json import cv2.aruco as aruco from pathlib import Path +from collections import deque # Creates a set of 13 polygon coordinates +traceLevel = 2 +# trace = 3 -> Undisted image viz +# trace = 2 -> Print logs def setPolygonCoordinates(height, width): @@ -21,24 +25,36 @@ def setPolygonCoordinates(height, width): slope = 150 p_coordinates = [ - [[margin,margin], [margin, height-margin], [width-margin, height-margin], [width-margin, margin]], - - [[margin,0], [margin,height], [width//2, height-slope], [width//2, slope]], - [[horizontal_shift, 0], [horizontal_shift, height], [width//2 + horizontal_shift, height-slope], [width//2 + horizontal_shift, slope]], - [[horizontal_shift*2-margin, 0], [horizontal_shift*2-margin, height], [width//2 + horizontal_shift*2-margin, height-slope], [width//2 + horizontal_shift*2-margin, slope]], - - [[width-margin, 0], [width-margin, height], [width//2, height-slope], [width//2, slope]], - [[width-horizontal_shift, 0], [width-horizontal_shift, height], [width//2-horizontal_shift, height-slope], [width//2-horizontal_shift, slope]], - [[width-horizontal_shift*2+margin, 0], [width-horizontal_shift*2+margin, height], [width//2-horizontal_shift*2+margin, height-slope], [width//2-horizontal_shift*2+margin, slope]], - - [[0,margin], [width, margin], [width-slope, height//2], [slope, height//2]], - [[0,vertical_shift], [width, vertical_shift], [width-slope, height//2+vertical_shift], [slope, height//2+vertical_shift]], - [[0,vertical_shift*2-margin], [width, vertical_shift*2-margin], [width-slope, height//2+vertical_shift*2-margin], [slope, height//2+vertical_shift*2-margin]], - - [[0,height-margin], [width, height-margin], [width-slope, height//2], [slope, height//2]], - [[0,height-vertical_shift], [width, height-vertical_shift], [width-slope, height//2-vertical_shift], [slope, height//2-vertical_shift]], - [[0,height-vertical_shift*2+margin], [width, height-vertical_shift*2+margin], [width-slope, height//2-vertical_shift*2+margin], [slope, height//2-vertical_shift*2+margin]] - ] + [[margin, margin], [margin, height-margin], + [width-margin, height-margin], [width-margin, margin]], + + [[margin, 0], [margin, height], [width//2, height-slope], [width//2, slope]], + [[horizontal_shift, 0], [horizontal_shift, height], [ + width//2 + horizontal_shift, height-slope], [width//2 + horizontal_shift, slope]], + [[horizontal_shift*2-margin, 0], [horizontal_shift*2-margin, height], [width//2 + + horizontal_shift*2-margin, height-slope], [width//2 + horizontal_shift*2-margin, slope]], + + [[width-margin, 0], [width-margin, height], + [width//2, height-slope], [width//2, slope]], + [[width-horizontal_shift, 0], [width-horizontal_shift, height], [width // + 2-horizontal_shift, height-slope], [width//2-horizontal_shift, slope]], + [[width-horizontal_shift*2+margin, 0], [width-horizontal_shift*2+margin, height], [width // + 2-horizontal_shift*2+margin, height-slope], [width//2-horizontal_shift*2+margin, slope]], + + [[0, margin], [width, margin], [ + width-slope, height//2], [slope, height//2]], + [[0, vertical_shift], [width, vertical_shift], [width-slope, + height//2+vertical_shift], [slope, height//2+vertical_shift]], + [[0, vertical_shift*2-margin], [width, vertical_shift*2-margin], [width-slope, + height//2+vertical_shift*2-margin], [slope, height//2+vertical_shift*2-margin]], + + [[0, height-margin], [width, height-margin], + [width-slope, height//2], [slope, height//2]], + [[0, height-vertical_shift], [width, height-vertical_shift], [width - + slope, height//2-vertical_shift], [slope, height//2-vertical_shift]], + [[0, height-vertical_shift*2+margin], [width, height-vertical_shift*2+margin], [width - + slope, height//2-vertical_shift*2+margin], [slope, height//2-vertical_shift*2+margin]] + ] return p_coordinates @@ -71,120 +87,113 @@ def polygon_from_image_name(image_name): class StereoCalibration(object): + global traceLevel """Class to Calculate Calibration and Rectify a Stereo Camera.""" def __init__(self): """Class to Calculate Calibration and Rectify a Stereo Camera.""" - def calibrate(self, filepath, square_size, mrk_size, squaresX, squaresY, camera_model, calibrate_rgb, enable_disp_rectify): + def calibrate(self, board_config, filepath, square_size, mrk_size, squaresX, squaresY, camera_model, enable_disp_rectify): """Function to calculate calibration for stereo camera.""" start_time = time.time() # init object data - self.calibrate_rgb = calibrate_rgb + print(f'squareX is {squaresX}') self.enable_rectification_disp = enable_disp_rectify - self.cameraModel = camera_model + self.cameraModel = camera_model self.data_path = filepath self.aruco_dictionary = aruco.Dictionary_get(aruco.DICT_4X4_1000) + self.board = aruco.CharucoBoard_create( - # 22, 16, - squaresX, squaresY, - square_size, - mrk_size, - self.aruco_dictionary) + # 22, 16, + squaresX, squaresY, + square_size, + mrk_size, + self.aruco_dictionary) - - # parameters = aruco.DetectorParameters_create() + # parameters = aruco.DetectorParameters_create() assert mrk_size != None, "ERROR: marker size not set" - self.calibrate_charuco3D(filepath) - - # self.stereo_calibrate_two_homography_calib() - print('~~~~~ Starting Stereo Calibration ~~~~~') - # self.stereo_calib_two_homo() - - # rgb-right extrinsic calibration - if self.calibrate_rgb: - # if True: - # things to do. - # First: change the center and other thigns of the calibration matrix of rgb camera - self.rgb_calibrate(filepath) - else: - self.M3 = np.zeros((3, 3), dtype=np.float32) - self.R_rgb = np.zeros((3, 3), dtype=np.float32) - self.T_rgb = np.zeros(3, dtype=np.float32) - self.d3 = np.zeros(14, dtype=np.float32) - - # self.M3_scaled_write = np.copy(self.M3_scaled) - # self.M3_scaled_write[1, 2] += 40 - - R1_fp32 = self.R1.astype(np.float32) - R2_fp32 = self.R2.astype(np.float32) - M1_fp32 = self.M1.astype(np.float32) - M2_fp32 = self.M2.astype(np.float32) - M3_fp32 = self.M3.astype(np.float32) - - R_fp32 = self.R.astype(np.float32) # L-R rotation - T_fp32 = self.T.astype(np.float32) # L-R translation - R_rgb_fp32 = self.R_rgb.astype(np.float32) - T_rgb_fp32 = self.T_rgb.astype(np.float32) - - d1_coeff_fp32 = self.d1.astype(np.float32) - d2_coeff_fp32 = self.d2.astype(np.float32) - d3_coeff_fp32 = self.d3.astype(np.float32) - - if self.calibrate_rgb: - R_rgb_fp32 = np.linalg.inv(R_rgb_fp32) - T_rgb_fp32[0] = -T_rgb_fp32[0] - T_rgb_fp32[1] = -T_rgb_fp32[1] - T_rgb_fp32[2] = -T_rgb_fp32[2] - - self.calib_data = [R1_fp32, R2_fp32, M1_fp32, M2_fp32, M3_fp32, R_fp32, T_fp32, R_rgb_fp32, T_rgb_fp32, d1_coeff_fp32, d2_coeff_fp32, d3_coeff_fp32] + + for camera in board_config['cameras'].keys(): + cam_info = board_config['cameras'][camera] + print( + '<------------Calibrating {} ------------>'.format(cam_info['name'])) + images_path = filepath + '/' + cam_info['name'] + ret, intrinsics, dist_coeff, _, _, size = self.calibrate_intrinsics( + images_path, cam_info['hfov']) + cam_info['intrinsics'] = intrinsics + cam_info['dist_coeff'] = dist_coeff + cam_info['size'] = size # (Width, height) + cam_info['reprojection_error'] = ret + print( + '<------------Camera Name: {} ------------>'.format(cam_info['name'])) + print("Reprojection error of {0}: {1}".format( + cam_info['name'], ret)) + print("Estimated intrinsics of {0}: \n {1}".format( + cam_info['name'], intrinsics)) - if 1: # Print matrices, to compare with device data - np.set_printoptions(suppress=True, precision=6) - print("\nR1 (left)"); print(R1_fp32) - print("\nR2 (right)"); print(R2_fp32) - print("\nM1 (left)"); print(M1_fp32) - print("\nM2 (right)"); print(M2_fp32) - print("\nR"); print(R_fp32) - print("\nT"); print(T_fp32) - print("\nM3 (rgb)"); print(M3_fp32) - print("\R (rgb)") - print(R_rgb_fp32) - print("\nT (rgb)") - print(T_rgb_fp32) - - if 0: # Print computed homography, to compare with device data - np.set_printoptions(suppress=True, precision=6) - for res_height in [800, 720, 400]: - m1 = np.copy(M1_fp32) - m2 = np.copy(M2_fp32) - if res_height == 720: - m1[1, 2] -= 40 - m2[1, 2] -= 40 - if res_height == 400: - m_scale = [[0.5, 0, 0], - [0, 0.5, 0], - [0, 0, 1]] - m1 = np.matmul(m_scale, m1) - m2 = np.matmul(m_scale, m2) - h1 = np.matmul(np.matmul(m2, R1_fp32), np.linalg.inv(m1)) - h2 = np.matmul(np.matmul(m2, R2_fp32), np.linalg.inv(m2)) - h1 = np.linalg.inv(h1) - h2 = np.linalg.inv(h2) - print('\nHomography H1, H2 for height =', res_height) - print(h1) - print() - print(h2) - - print("\tTook %i seconds to run image processing." % - (round(time.time() - start_time, 2))) - - self.create_save_mesh() - - if self.calibrate_rgb: - return self.test_epipolar_charuco_lr(filepath), self.test_epipolar_charuco_rgbr(filepath), self.calib_data - else: - return self.test_epipolar_charuco_lr(filepath), None, self.calib_data + for camera in board_config['cameras'].keys(): + left_cam_info = board_config['cameras'][camera] + if 'extrinsics' in left_cam_info: + if 'to_cam' in left_cam_info['extrinsics']: + left_cam = camera + right_cam = left_cam_info['extrinsics']['to_cam'] + left_path = filepath + '/' + left_cam_info['name'] + + right_cam_info = board_config['cameras'][left_cam_info['extrinsics']['to_cam']] + right_path = filepath + '/' + right_cam_info['name'] + print('<-------------Extrinsics calibration of {} and {} ------------>'.format( + left_cam_info['name'], right_cam_info['name'])) + + specTranslation = left_cam_info['extrinsics']['specTranslation'] + rot = left_cam_info['extrinsics']['rotation'] + + translation = np.array( + [specTranslation['x'], specTranslation['y'], specTranslation['z']], dtype=np.float32) + rotation = Rotation.from_euler( + 'xyz', [rot['r'], rot['p'], rot['y']], degrees=True).as_matrix().astype(np.float32) + + extrinsics = self.calibrate_extrinsics(left_path, right_path, left_cam_info['intrinsics'], left_cam_info[ + 'dist_coeff'], right_cam_info['intrinsics'], right_cam_info['dist_coeff'], translation, rotation) + if extrinsics[0] == -1: + return -1, extrinsics[1] + + if board_config['stereo_config']['left_cam'] == left_cam and board_config['stereo_config']['right_cam'] == right_cam: + board_config['stereo_config']['rectification_left'] = extrinsics[3] + board_config['stereo_config']['rectification_right'] = extrinsics[4] + board_config['stereo_config']['p_left'] = extrinsics[5] + board_config['stereo_config']['p_right'] = extrinsics[6] + elif board_config['stereo_config']['left_cam'] == right_cam and board_config['stereo_config']['right_cam'] == left_cam: + board_config['stereo_config']['rectification_left'] = extrinsics[4] + board_config['stereo_config']['rectification_right'] = extrinsics[3] + board_config['stereo_config']['p_left'] = extrinsics[6] + board_config['stereo_config']['p_right'] = extrinsics[5] + + """ for stereoObj in board_config['stereo_config']: + + if stereoObj['left_cam'] == left_cam and stereoObj['right_cam'] == right_cam and stereoObj['main'] == 1: + stereoObj['rectification_left'] = extrinsics[3] + stereoObj['rectification_right'] = extrinsics[4] """ + + print('<-------------Epipolar error of {} and {} ------------>'.format( + left_cam_info['name'], right_cam_info['name'])) + left_cam_info['extrinsics']['epipolar_error'] = self.test_epipolar_charuco( + left_path, + right_path, + left_cam_info['intrinsics'], + left_cam_info['dist_coeff'], + right_cam_info['intrinsics'], + right_cam_info['dist_coeff'], + extrinsics[2], # Translation between left and right Cameras + extrinsics[3], # Left Rectification rotation + extrinsics[4], # Right Rectification rotation + extrinsics[5], # Left Rectification Intrinsics + extrinsics[6]) # Right Rectification Intrinsics + + left_cam_info['extrinsics']['rotation_matrix'] = extrinsics[1] + left_cam_info['extrinsics']['translation'] = extrinsics[2] + left_cam_info['extrinsics']['stereo_error'] = extrinsics[0] + + return 1, board_config def analyze_charuco(self, images, scale_req=False, req_resolution=(800, 1280)): """ @@ -199,39 +208,30 @@ def analyze_charuco(self, images, scale_req=False, req_resolution=(800, 1280)): # decimator = 0 # SUB PIXEL CORNER DETECTION CRITERION criteria = (cv2.TERM_CRITERIA_EPS + - cv2.TERM_CRITERIA_MAX_ITER, 100, 0.00001) + cv2.TERM_CRITERIA_MAX_ITER, 10000, 0.00001) count = 0 for im in images: - print("=> Processing image {0}".format(im)) + if traceLevel == 2: + print("=> Processing image {0}".format(im)) + img_pth = Path(im) frame = cv2.imread(im) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - # gray = cv2.flip(gray, 0) # TODO(Sachin) : remove this later - # width = scale[1] expected_height = gray.shape[0]*(req_resolution[1]/gray.shape[1]) - # print('expected height -------------------> ' + str(expected_height)) - # print('required height -------------------> ' + - # str(req_resolution)) if scale_req and not (gray.shape[0] == req_resolution[0] and gray.shape[1] == req_resolution[1]): - # print("scaling----------------------->") if int(expected_height) == req_resolution[0]: # resizing to have both stereo and rgb to have same # resolution to capture extrinsics of the rgb-right camera gray = cv2.resize(gray, req_resolution[::-1], interpolation=cv2.INTER_CUBIC) - # print('~~~~~~~~~~ Only resizing.... ~~~~~~~~~~~~~~~~') else: # resizing and cropping to have both stereo and rgb to have same resolution # to calculate extrinsics of the rgb-right camera scale_width = req_resolution[1]/gray.shape[1] dest_res = ( int(gray.shape[1] * scale_width), int(gray.shape[0] * scale_width)) - # print("destination resolution------>") - # print(dest_res) gray = cv2.resize( gray, dest_res, interpolation=cv2.INTER_CUBIC) - # print("scaled gray shape") - # print(gray.shape) if gray.shape[0] < req_resolution[0]: raise RuntimeError("resizeed height of rgb is smaller than required. {0} < {1}".format( gray.shape[0], req_resolution[0])) @@ -240,26 +240,15 @@ def analyze_charuco(self, images, scale_req=False, req_resolution=(800, 1280)): # gray = gray[: req_resolution[0], :] gray = gray[del_height: del_height + req_resolution[0], :] - # print("del height ??") - # print(del_height) - # print(gray.shape) count += 1 - # self.parse_frame(gray, 'rgb_resized', - # 'rgb_resized_'+str(count)) marker_corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers( gray, self.aruco_dictionary) marker_corners, ids, refusd, recoverd = cv2.aruco.refineDetectedMarkers(gray, self.board, marker_corners, ids, rejectedCorners=rejectedImgPoints) - print('{0} number of Markers corners detected in the above image'.format( - len(marker_corners))) + if traceLevel == 1: + print('{0} number of Markers corners detected in the image {1}'.format( + len(marker_corners), img_pth.name)) if len(marker_corners) > 0: - # print(len(marker_corners)) - # SUB PIXEL DETECTION - # for corner in marker_corners: - # cv2.cornerSubPix(gray, corner, - # winSize = (5,5), - # zeroZone = (-1,-1), - # criteria = criteria) res2 = cv2.aruco.interpolateCornersCharuco( marker_corners, ids, gray, self.board) @@ -270,142 +259,136 @@ def analyze_charuco(self, images, scale_req=False, req_resolution=(800, 1280)): winSize=(5, 5), zeroZone=(-1, -1), criteria=criteria) - allCorners.append(res2[1]) # Charco chess corners - allIds.append(res2[2]) # charuco chess corner id's + allCorners.append(res2[1]) # Charco chess corners + allIds.append(res2[2]) # charuco chess corner id's all_marker_corners.append(marker_corners) all_marker_ids.append(ids) all_recovered.append(recoverd) else: - print("in else") + err_name = f'Failed to detect markers in the image {im}' + raise RuntimeError(err_name) else: print(im + " Not found") - # decimator+=1 + raise RuntimeError("Failed to detect markers in the image") + + # imsize = gray.shape[::-1] + return allCorners, allIds, all_marker_corners, all_marker_ids, gray.shape[::-1], all_recovered + + def calibrate_intrinsics(self, image_files, hfov): + image_files = glob.glob(image_files + "/*") + image_files.sort() + assert len( + image_files) != 0, "ERROR: Images not read correctly, check directory" + + allCorners, allIds, _, _, imsize, _ = self.analyze_charuco(image_files) + if self.cameraModel == 'perspective': + ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors = self.calibrate_camera_charuco( + allCorners, allIds, imsize, hfov) + # (Height, width) + if traceLevel == 3: + self.fisheye_undistort_visualizaation( + image_files, camera_matrix, distortion_coefficients, imsize) + + return ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, imsize + else: + print('Fisheye--------------------------------------------------') + ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors = self.calibrate_fisheye( + allCorners, allIds, imsize) + if traceLevel == 3: + self.fisheye_undistort_visualizaation( + image_files, camera_matrix, distortion_coefficients, imsize) + - imsize = gray.shape - return allCorners, allIds, all_marker_corners, all_marker_ids, imsize, all_recovered + # (Height, width) + return ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, imsize - def calibrate_charuco3D(self, filepath): + def calibrate_extrinsics(self, images_left, images_right, M_l, d_l, M_r, d_r, guess_translation, guess_rotation): self.objpoints = [] # 3d point in real world space self.imgpoints_l = [] # 2d points in image plane. self.imgpoints_r = [] # 2d points in image plane. - # calcorners_l = [] # 2d points in image - # calcorners_r = [] # 2d points in image - # calids_l = [] # ids found in imag - # calids_r = [] # ids found in imag + images_left = glob.glob(images_left + "/*") + images_right = glob.glob(images_right + "/*") - images_left = glob.glob(filepath + "/left/*") - images_right = glob.glob(filepath + "/right/*") - # images_rgb = glob.glob(filepath + "/rgb/*") - # print("Images left path------------------->") - # print(images_left) images_left.sort() images_right.sort() - # images_rgb.sort() assert len( - images_left) != 0, "ERROR: Images not read correctly, check directory" + images_left) != 0, "ERROR: Images not found, check directory" assert len( - images_right) != 0, "ERROR: Images not read correctly, check directory" - # assert len( - # images_rgb) != 0, "ERROR: Images not read correctly, check directory" - - print("~~~~~~~~~~~ POSE ESTIMATION LEFT CAMERA ~~~~~~~~~~~~~") - allCorners_l, allIds_l, _, _, imsize, _ = self.analyze_charuco( - images_left) - allCorners_r, allIds_r, _, _, imsize, _ = self.analyze_charuco( - images_right) - self.img_shape = imsize[::-1] - - # self.img_shape_rgb = imsize_rgb[::-1] - if self.cameraModel == 'perspective': - ret_l, self.M1, self.d1, rvecs, tvecs = self.calibrate_camera_charuco( - allCorners_l, allIds_l, self.img_shape) - ret_r, self.M2, self.d2, rvecs, tvecs = self.calibrate_camera_charuco( - allCorners_r, allIds_r, self.img_shape) - else: - ret_l, self.M1, self.d1, rvecs, tvecs = self.calibrate_fisheye(allCorners_l, allIds_l, self.img_shape) - ret_r, self.M2, self.d2, rvecs, tvecs = self.calibrate_fisheye(allCorners_r, allIds_r, self.img_shape) - # self.fisheye_undistort_visualizaation(images_left, self.M1, self.d1, self.img_shape) - # self.fisheye_undistort_visualizaation(images_right, self.M2, self.d2, self.img_shape) - - - print("~~~~~~~~~~~~~RMS error of left~~~~~~~~~~~~~~") - print(ret_l) - print(ret_r) - print(self.M1) - print(self.M2) - print(self.d1) - print(self.d2) - # if self.cameraModel == 'perspective': - ret, self.M1, self.d1, self.M2, self.d2, self.R, self.T, E, F = self.calibrate_stereo(allCorners_l, allIds_l, allCorners_r, allIds_r, self.img_shape, self.M1, self.d1, self.M2, self.d2) - # else: - # ret, self.M1, self.d1, self.M2, self.d2, self.R, self.T = self.calibrate_stereo(allCorners_l, allIds_l, allCorners_r, allIds_r, self.img_shape, self.M1, self.d1, self.M2, self.d2) - print("~~~~~~~~~~~~~RMS error of L-R~~~~~~~~~~~~~~") - print(ret) - """ - left_corners_sampled = [] - right_corners_sampled = [] - obj_pts = [] - one_pts = self.board.chessboardCorners - for i in range(len(allIds_l)): - left_sub_corners = [] - right_sub_corners = [] - obj_pts_sub = [] - # if len(allIds_l[i]) < 70 or len(allIds_r[i]) < 70: - # continue - for j in range(len(allIds_l[i])): - idx = np.where(allIds_r[i] == allIds_l[i][j]) - if idx[0].size == 0: - continue - left_sub_corners.append(allCorners_l[i][j]) - right_sub_corners.append(allCorners_r[i][idx]) - obj_pts_sub.append(one_pts[allIds_l[i][j]]) - - obj_pts.append(np.array(obj_pts_sub, dtype=np.float32)) - left_corners_sampled.append( - np.array(left_sub_corners, dtype=np.float32)) - right_corners_sampled.append( - np.array(right_sub_corners, dtype=np.float32)) - - self.objpoints = obj_pts - self.imgpoints_l = left_corners_sampled - self.imgpoints_r = right_corners_sampled - - flags = 0 - flags |= cv2.CALIB_USE_INTRINSIC_GUESS - flags |= cv2.CALIB_RATIONAL_MODEL - - stereocalib_criteria = (cv2.TERM_CRITERIA_COUNT + - cv2.TERM_CRITERIA_EPS, 100, 1e-5) - - ret, self.M1, self.d1, self.M2, self.d2, self.R, self.T, E, F = cv2.stereoCalibrate( - self.objpoints, self.imgpoints_l, self.imgpoints_r, - self.M1, self.d1, self.M2, self.d2, self.img_shape, - criteria=stereocalib_criteria, flags=flags) - print("<~~ ~~~~~~~~~~~ RMS of stereo ~~~~~~~~~~~ ~~>") - print('RMS error of stereo calibration of left-right is {0}'.format(ret)) """ - - # TODO(sachin): Fix rectify for Fisheye - if self.cameraModel == 'perspective': - - self.R1, self.R2, self.P1, self.P2, self.Q, validPixROI1, validPixROI2 = cv2.stereoRectify( - self.M1, - self.d1, - self.M2, - self.d2, - self.img_shape, self.R, self.T) - else: - self.R1, self.R2, self.P1, self.P2, self.Q = cv2.fisheye.stereoRectify(self.M1, - self.d1, - self.M2, - self.d2, - self.img_shape, self.R, self.T) - - self.H1 = np.matmul(np.matmul(self.M2, self.R1), - np.linalg.inv(self.M1)) - self.H2 = np.matmul(np.matmul(self.M2, self.R2), - np.linalg.inv(self.M2)) + images_right) != 0, "ERROR: Images not found, check directory" + # print('Images from left and right') + # print(images_left[0]) + # print(images_right[0]) + + scale = None + scale_req = False + frame_left_shape = cv2.imread(images_left[0], 0).shape # (h,w) + frame_right_shape = cv2.imread(images_right[0], 0).shape + scalable_res = frame_left_shape + scaled_res = frame_right_shape + # print(f' Frame left shape: {frame_left_shape} and right shape: {frame_right_shape}') + if frame_right_shape[0] < frame_left_shape[0] and frame_right_shape[1] < frame_left_shape[1]: + scale_req = True + scale = frame_right_shape[1] / frame_left_shape[1] + elif frame_right_shape[0] > frame_left_shape[0] and frame_right_shape[1] > frame_left_shape[1]: + scale_req = True + scale = frame_left_shape[1] / frame_right_shape[1] + scalable_res = frame_right_shape + scaled_res = frame_left_shape + + if scale_req: + scaled_height = scale * scalable_res[0] + diff = scaled_height - scaled_res[0] + # if scaled_height < smaller_res[0]: + if diff < 0: + scaled_res = (int(scaled_height), scaled_res[1]) + + print( + f'Is scale Req: {scale_req}\n scale value: {scale} \n scalable Res: {scalable_res} \n scale Res: {scaled_res}') + print("Original res Left :{}".format(frame_left_shape)) + print("Original res Right :{}".format(frame_right_shape)) + print("Scale res :{}".format(scaled_res)) + + # scaled_res = (scaled_height, ) + M_lp = self.scale_intrinsics(M_l, frame_left_shape, scaled_res) + M_rp = self.scale_intrinsics(M_r, frame_right_shape, scaled_res) + + # print("~~~~~~~~~~~ POSE ESTIMATION LEFT CAMERA ~~~~~~~~~~~~~") + allCorners_l, allIds_l, _, _, imsize_l, _ = self.analyze_charuco( + images_left, scale_req, scaled_res) + + # print("~~~~~~~~~~~ POSE ESTIMATION RIGHT CAMERA ~~~~~~~~~~~~~") + allCorners_r, allIds_r, _, _, imsize_r, _ = self.analyze_charuco( + images_right, scale_req, scaled_res) + + print(f'Image size of right side (w, h):{imsize_r}') + print(f'Image size of left side (w, h):{imsize_l}') + + assert imsize_r == imsize_l, "Left and right resolution scaling is wrong" + + return self.calibrate_stereo( + allCorners_l, allIds_l, allCorners_r, allIds_r, imsize_r, M_lp, d_l, M_rp, d_r, guess_translation, guess_rotation) + + def scale_intrinsics(self, intrinsics, originalShape, destShape): + scale = destShape[1] / originalShape[1] # scale on width + scale_mat = np.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]]) + scaled_intrinsics = np.matmul(scale_mat, intrinsics) + """ print("Scaled height offset : {}".format( + (originalShape[0] * scale - destShape[0]) / 2)) + print("Scaled width offset : {}".format( + (originalShape[1] * scale - destShape[1]) / 2)) """ + scaled_intrinsics[1][2] -= (originalShape[0] # c_y - along height of the image + * scale - destShape[0]) / 2 + scaled_intrinsics[0][2] -= (originalShape[1] # c_x width of the image + * scale - destShape[1]) / 2 + if traceLevel == 2: + print('original_intrinsics') + print(intrinsics) + print('scaled_intrinsics') + print(scaled_intrinsics) + + return scaled_intrinsics def fisheye_undistort_visualizaation(self, img_list, K, D, img_size): for im in img_list: @@ -413,38 +396,61 @@ def fisheye_undistort_visualizaation(self, img_list, K, D, img_size): img = cv2.imread(im) # h, w = img.shape[:2] if self.cameraModel == 'perspective': + # kScaled, _ = cv2.getOptimalNewCameraMatrix(K, D, img_size, 0) + # print(f'K scaled is \n {kScaled} and size is \n {img_size}') + # print(f'D Value is \n {D}') map1, map2 = cv2.initUndistortRectifyMap( - K, D, np.eye(3), K, img_size, cv2.CV_32FC1) + K, D, np.eye(3), K, img_size, cv2.CV_32FC1) else: - map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, img_size, cv2.CV_32FC1) - - undistorted_img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) - cv2.imshow("undistorted", undistorted_img) - cv2.waitKey(0) - # cv2.destroyAllWindows() + map1, map2 = cv2.fisheye.initUndistortRectifyMap( + K, D, np.eye(3), K, img_size, cv2.CV_32FC1) + undistorted_img = cv2.remap( + img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) + cv2.imshow("undistorted", undistorted_img) + if traceLevel == 4: + print(f'image path - {im}') + print(f'Image Undistorted Size {undistorted_img.shape}') + k = cv2.waitKey(0) + if k == 27: # Esc key to stop + break - def calibrate_camera_charuco(self, allCorners, allIds, imsize): + def calibrate_camera_charuco(self, allCorners, allIds, imsize, hfov): """ Calibrates the camera using the dected corners. """ + f = imsize[0] / (2 * np.tan(np.deg2rad(hfov/2))) + # TODO(sachin): Change the initialization to be initialized using the guess from fov print("CAMERA CALIBRATION") print(imsize) - if imsize[1] < 1100: + cameraMatrixInit = np.array([[f, 0.0, imsize[0]/2], + [0.0, f, imsize[1]/2], + [0.0, 0.0, 1.0]]) + + # cameraMatrixInit = np.array([[857.1668, 0.0, 643.9126], + # [0.0, 856.0823, 387.56018], + # [0.0, 0.0, 1.0]]) + """ if imsize[1] < 700: + cameraMatrixInit = np.array([[400.0, 0.0, imsize[0]/2], + [0.0, 400.0, imsize[1]/2], + [0.0, 0.0, 1.0]]) + elif imsize[1] < 1100: cameraMatrixInit = np.array([[857.1668, 0.0, 643.9126], [0.0, 856.0823, 387.56018], [0.0, 0.0, 1.0]]) else: cameraMatrixInit = np.array([[3819.8801, 0.0, 1912.8375], [0.0, 3819.8801, 1135.3433], - [0.0, 0.0, 1.]]) - - print("Camera Matrix initialization.............") - print(cameraMatrixInit) + [0.0, 0.0, 1.]]) """ + if traceLevel == 1: + print( + f'Camera Matrix initialization with HFOV of {hfov} is.............') + print(cameraMatrixInit) distCoeffsInit = np.zeros((5, 1)) flags = (cv2.CALIB_USE_INTRINSIC_GUESS + - cv2.CALIB_RATIONAL_MODEL + cv2.CALIB_FIX_ASPECT_RATIO) + cv2.CALIB_RATIONAL_MODEL) + # flags = (cv2.CALIB_RATIONAL_MODEL) (ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, @@ -457,8 +463,10 @@ def calibrate_camera_charuco(self, allCorners, allIds, imsize): cameraMatrix=cameraMatrixInit, distCoeffs=distCoeffsInit, flags=flags, - criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 10000, 1e-9)) - + criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 50000, 1e-9)) + if traceLevel == 2: + print('Per View Errors...') + print(perViewErrors) return ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors def calibrate_fisheye(self, allCorners, allIds, imsize): @@ -469,221 +477,247 @@ def calibrate_fisheye(self, allCorners, allIds, imsize): for j in range(len(allIds[i])): obj_pts_sub.append(one_pts[allIds[i][j]]) obj_points.append(np.array(obj_pts_sub, dtype=np.float32)) - cameraMatrixInit = np.array([[500, 0.0, 643.9126], [0.0, 500, 387.56018], [0.0, 0.0, 1.0]]) - + print("Camera Matrix initialization.............") print(cameraMatrixInit) - flags = cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC + flags = 0 + # flags |= cv2.fisheye.CALIB_CHECK_COND + # flags |= cv2.fisheye.CALIB_USE_INTRINSIC_GUESS + flags = cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC + cv2.fisheye.CALIB_CHECK_COND + cv2.fisheye.CALIB_FIX_SKEW + distCoeffsInit = np.zeros((4, 1)) term_criteria = (cv2.TERM_CRITERIA_COUNT + - cv2.TERM_CRITERIA_EPS, 100, 1e-5) - - return cv2.fisheye.calibrate(obj_points, allCorners, imsize, cameraMatrixInit, distCoeffsInit, flags = flags, criteria = term_criteria) - - def calibrate_stereo(self, allCorners_l, allIds_l, allCorners_r, allIds_r, imsize, cameraMatrix_l, distCoeff_l, cameraMatrix_r, distCoeff_r): + cv2.TERM_CRITERIA_EPS, 50000, 1e-9) + + return cv2.fisheye.calibrate(obj_points, allCorners, imsize, cameraMatrixInit, distCoeffsInit, flags=flags, criteria=term_criteria) + + def calibrate_stereo(self, allCorners_l, allIds_l, allCorners_r, allIds_r, imsize, cameraMatrix_l, distCoeff_l, cameraMatrix_r, distCoeff_r, t_in, r_in): left_corners_sampled = [] right_corners_sampled = [] obj_pts = [] one_pts = self.board.chessboardCorners print('allIds_l') print(len(allIds_l)) + print('allIds_r') print(len(allIds_r)) - print('allIds_l') + # print('allIds_l') # print(allIds_l) - print('allIds_r') # print(allIds_r) for i in range(len(allIds_l)): left_sub_corners = [] right_sub_corners = [] obj_pts_sub = [] - # if len(allIds_l[i]) < 70 or len(allIds_r[i]) < 70: - # continue + #if len(allIds_l[i]) < 70 or len(allIds_r[i]) < 70: + # continue for j in range(len(allIds_l[i])): + # print(f' allIds_l[i][j] is {allIds_l[i][j]}') + # print(f' allIds_r[i] is {allIds_r[i]}') idx = np.where(allIds_r[i] == allIds_l[i][j]) if idx[0].size == 0: continue left_sub_corners.append(allCorners_l[i][j]) right_sub_corners.append(allCorners_r[i][idx]) obj_pts_sub.append(one_pts[allIds_l[i][j]]) - - obj_pts.append(np.array(obj_pts_sub, dtype=np.float32)) - left_corners_sampled.append( - np.array(left_sub_corners, dtype=np.float32)) - right_corners_sampled.append( - np.array(right_sub_corners, dtype=np.float32)) + if len(left_sub_corners) > 3 and len(right_sub_corners) > 3: + obj_pts.append(np.array(obj_pts_sub, dtype=np.float32)) + left_corners_sampled.append( + np.array(left_sub_corners, dtype=np.float32)) + right_corners_sampled.append( + np.array(right_sub_corners, dtype=np.float32)) + else: + return -1, "Stereo Calib failed due to less common features" stereocalib_criteria = (cv2.TERM_CRITERIA_COUNT + - cv2.TERM_CRITERIA_EPS, 100, 1e-5) + cv2.TERM_CRITERIA_EPS, 50000, 1e-9) if self.cameraModel == 'perspective': flags = 0 - flags |= cv2.CALIB_USE_INTRINSIC_GUESS # TODO(sACHIN): Try without intrinsic guess + flags |= cv2.CALIB_FIX_INTRINSIC + # flags |= cv2.CALIB_USE_INTRINSIC_GUESS flags |= cv2.CALIB_RATIONAL_MODEL + # print(flags) + if traceLevel == 1: + print('Printing Extrinsics guesses...') + print(r_in) + print(t_in) + if 1: + ret, M1, d1, M2, d2, R, T, E, F, _ = cv2.stereoCalibrateExtended( + obj_pts, left_corners_sampled, right_corners_sampled, + cameraMatrix_l, distCoeff_l, cameraMatrix_r, distCoeff_r, imsize, + R=r_in, T=t_in, criteria=stereocalib_criteria , flags=flags) + else: + ret, cameraMatrix_l, distCoeff_l, cameraMatrix_r, distCoeff_r, R, T, E, F, _ = cv2.stereoCalibrateExtended( + obj_pts, left_corners_sampled, right_corners_sampled, + cameraMatrix_l, distCoeff_l, cameraMatrix_r, distCoeff_r, imsize, + R=r_in, T=t_in, criteria=stereocalib_criteria , flags=flags) + + print(f'Reprojection error is {ret}') + print('Printing Extrinsics res...') + print(R) + print(T) + r_euler = Rotation.from_matrix(R).as_euler('xyz', degrees=True) + print(f'Euler angles in XYZ {r_euler}') + + + # ret, M1, d1, M2, d2, R, T, E, F = cv2.stereoCalibrate( + # obj_pts, left_corners_sampled, right_corners_sampled, + # cameraMatrix_l, distCoeff_l, cameraMatrix_r, distCoeff_r, imsize, + # criteria=stereocalib_criteria, flags=flags) + # if np.absolute(T[1]) > 0.2: + print(f'Image size---: {imsize}') + + R_l, R_r, P_l, P_r, Q, validPixROI1, validPixROI2 = cv2.stereoRectify( + cameraMatrix_l, + distCoeff_l, + cameraMatrix_r, + distCoeff_r, + imsize, R, T) # , alpha=0.1 + # self.P_l = P_l + # self.P_r = P_r + r_euler = Rotation.from_matrix(R_l).as_euler('xyz', degrees=True) + print(f'R_L Euler angles in XYZ {r_euler}') + r_euler = Rotation.from_matrix(R_r).as_euler('xyz', degrees=True) + print(f'R_R Euler angles in XYZ {r_euler}') + + # print(f'P_l is \n {P_l}') + # print(f'P_r is \n {P_r}') - return cv2.stereoCalibrate( - obj_pts, left_corners_sampled, right_corners_sampled, - cameraMatrix_l, distCoeff_l, cameraMatrix_r, distCoeff_r, imsize, - criteria=stereocalib_criteria, flags=flags) + return [ret, R, T, R_l, R_r, P_l, P_r] elif self.cameraModel == 'fisheye': - print(len(obj_pts)) - print('obj_pts') - # print(obj_pts) - print(len(left_corners_sampled)) - print('left_corners_sampled') - # print(left_corners_sampled) - print(len(right_corners_sampled)) - print('right_corners_sampled') - # print(right_corners_sampled) - for i in range(len(obj_pts)): - print('---------------------') - print(i) - print(len(obj_pts[i])) - print(len(left_corners_sampled[i])) - print(len(right_corners_sampled[i])) flags = 0 - # flags |= cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC # TODO(sACHIN): Try without intrinsic guess - return cv2.fisheye.stereoCalibrate( + flags |= cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC + flags |= cv2.fisheye.CALIB_CHECK_COND + flags |= cv2.fisheye.CALIB_FIX_SKEW + # flags |= cv2.CALIB_FIX_INTRINSIC + # flags |= cv2.CALIB_RATIONAL_MODEL + # TODO(sACHIN): Try without intrinsic guess + # flags |= cv2.CALIB_USE_INTRINSIC_GUESS + # TODO(sACHIN): Try without intrinsic guess + # flags |= cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC + # flags = cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC + cv2.fisheye.CALIB_CHECK_COND + cv2.fisheye.CALIB_FIX_SKEW + if traceLevel == 3: + print('Fisyeye stereo model..................') + print(len(left_corners_sampled)) + for i in range(0, len(left_corners_sampled)): + print(f'{len(left_corners_sampled[i])} -- {len(obj_pts[i])}') + print('Right cornered samples..................') + print(len(right_corners_sampled)) + for i in range(0, len(right_corners_sampled)): + print(f'{len(right_corners_sampled[i])} -- {len(obj_pts[i])}') + del obj_pts[4] + del right_corners_sampled[4] + del left_corners_sampled[4] + ret, M1, d1, M2, d2, R, T, E, F = cv2.fisheye.stereoCalibrate( obj_pts, left_corners_sampled, right_corners_sampled, cameraMatrix_l, distCoeff_l, cameraMatrix_r, distCoeff_r, imsize, flags=flags, criteria=stereocalib_criteria), None, None - - def rgb_calibrate(self, filepath): - images_right = glob.glob(filepath + "/right/*") - images_rgb = glob.glob(filepath + "/rgb/*") - - images_rgb_pth = Path(filepath + "/rgb") - if not images_rgb_pth.exists(): - raise RuntimeError("RGB dataset folder not found!! To skip rgb calibration use -drgb argument") + R_l, R_r, P_l, P_r, Q = cv2.fisheye.stereoRectify( + cameraMatrix_l, + distCoeff_l, + cameraMatrix_r, + distCoeff_r, + imsize, R, T) + + return [ret, R, T, R_l, R_r] - images_right.sort() - images_rgb.sort() - - allCorners_rgb_scaled, allIds_rgb_scaled, _, _, imsize_rgb_scaled, _ = self.analyze_charuco( - images_rgb, scale_req=True, req_resolution=(720, 1280)) - self.img_shape_rgb_scaled = imsize_rgb_scaled[::-1] - - ret_rgb_scaled, self.M3_scaled, self.d3_scaled, rvecs, tvecs = self.calibrate_camera_charuco( - allCorners_rgb_scaled, allIds_rgb_scaled, imsize_rgb_scaled[::-1]) - - allCorners_r_rgb, allIds_r_rgb, _, _, _, _ = self.analyze_charuco( - images_right, scale_req=True, req_resolution=(720, 1280)) - - print("RGB callleded RMS at 720") - print(ret_rgb_scaled) - print(imsize_rgb_scaled) - print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') - # print(self.M3_scaled) - - # sampling common detected corners - rgb_scaled_rgb_corners_sampled = [] - rgb_scaled_right_corners_sampled = [] - rgb_scaled_obj_pts = [] - rgb_pts = None - right_pts = None - one_pts = self.board.chessboardCorners - for i in range(len(allIds_rgb_scaled)): - rgb_sub_corners = [] - right_sub_corners = [] - obj_pts_sub = [] - # if len(allIds_l[i]) < 70 or len(allIds_r[i]) < 70: - # continue - for j in range(len(allIds_rgb_scaled[i])): - idx = np.where(allIds_r_rgb[i] == allIds_rgb_scaled[i][j]) - if idx[0].size == 0: - continue - rgb_sub_corners.append(allCorners_rgb_scaled[i][j]) - right_sub_corners.append(allCorners_r_rgb[i][idx]) - obj_pts_sub.append(one_pts[allIds_rgb_scaled[i][j]]) - - rgb_scaled_obj_pts.append( - np.array(obj_pts_sub, dtype=np.float32)) - rgb_scaled_rgb_corners_sampled.append( - np.array(rgb_sub_corners, dtype=np.float32)) - rgb_scaled_right_corners_sampled.append( - np.array(right_sub_corners, dtype=np.float32)) - if rgb_pts is None: - rgb_pts = np.array(rgb_sub_corners, dtype=np.float32) - right_pts = np.array(right_sub_corners, dtype=np.float32) + def display_rectification(self, image_data_pairs, isHorizontal): + print( + "Displaying Stereo Pair for visual inspection. Press the [ESC] key to exit.") + for image_data_pair in image_data_pairs: + if isHorizontal: + img_concat = cv2.hconcat( + [image_data_pair[0], image_data_pair[1]]) else: - np.vstack( - (rgb_pts, np.array(rgb_sub_corners, dtype=np.float32))) - np.vstack((right_pts, np.array( - right_sub_corners, dtype=np.float32))) - - self.objpoints_rgb_r = rgb_scaled_obj_pts - self.imgpoints_rgb = rgb_scaled_rgb_corners_sampled - self.imgpoints_rgb_right = rgb_scaled_right_corners_sampled - - flags = 0 - flags |= cv2.CALIB_FIX_INTRINSIC - flags |= cv2.CALIB_RATIONAL_MODEL + img_concat = cv2.vconcat( + [image_data_pair[0], image_data_pair[1]]) + line_row = 0 + while isHorizontal and line_row < img_concat.shape[0]: + cv2.line(img_concat, + (0, line_row), (img_concat.shape[1], line_row), + (0, 255, 0), 1) + line_row += 30 - stereocalib_criteria = (cv2.TERM_CRITERIA_COUNT + - cv2.TERM_CRITERIA_EPS, 100, 1e-5) + line_col = 0 + while not isHorizontal and line_col < img_concat.shape[1]: + cv2.line(img_concat, + (line_col, 0), (line_col, img_concat.shape[0]), + (0, 255, 0), 1) + line_col += 40 - # print(M_RGB) - print('vs. intrinisics computed after scaling the image --->') - # self.M3, self.d3 - scale = 1920/1280 - print(scale) - scale_mat = np.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]]) - self.M3 = np.matmul(scale_mat, self.M3_scaled) - self.d3 = self.d3_scaled - print(self.M3_scaled) - print(self.M3) - - self.M2_rgb = np.copy(self.M2) - self.M2_rgb[1, 2] -= 40 - self.d2_rgb = np.copy(self.d1) - - ret, _, _, _, _, self.R_rgb, self.T_rgb, E, F = cv2.stereoCalibrate( - self.objpoints_rgb_r, self.imgpoints_rgb, self.imgpoints_rgb_right, - self.M3_scaled, self.d3_scaled, self.M2_rgb, self.d2_rgb, self.img_shape_rgb_scaled, - criteria=stereocalib_criteria, flags=flags) - print("~~~~~~ Stereo calibration rgb-left RMS error ~~~~~~~~") - print(ret) - - # Rectification is only to test the epipolar - self.R1_rgb, self.R2_rgb, self.P1_rgb, self.P2_rgb, self.Q_rgb, validPixROI1, validPixROI2 = cv2.stereoRectify( - self.M3_scaled, - self.d3_scaled, - self.M2_rgb, - self.d2_rgb, - self.img_shape_rgb_scaled, self.R_rgb, self.T_rgb) - - def test_epipolar_charuco_lr(self, dataset_dir): - print("<-----------------Epipolar error of LEFT-right camera---------------->") - images_left = glob.glob(dataset_dir + '/left/*.png') - images_right = glob.glob(dataset_dir + '/right/*.png') - images_left.sort() - images_right.sort() - print("HU IHER") - assert len(images_left) != 0, "ERROR: Images not read correctly" - assert len(images_right) != 0, "ERROR: Images not read correctly" - criteria = (cv2.TERM_CRITERIA_EPS + - cv2.TERM_CRITERIA_MAX_ITER, 100, 0.00001) + img_concat = cv2.resize( + img_concat, (0, 0), fx=0.4, fy=0.4) + + # show image + cv2.imshow('Stereo Pair', img_concat) + k = cv2.waitKey(0) + if k == 27: # Esc key to stop + break - # if not use_homo: + # os._exit(0) + # raise SystemExit() + + cv2.destroyWindow('Stereo Pair') + + def scale_image(self, img, scaled_res): + expected_height = img.shape[0]*(scaled_res[1]/img.shape[1]) + # print("Expected Height: {}".format(expected_height)) + + if not (img.shape[0] == scaled_res[0] and img.shape[1] == scaled_res[1]): + if int(expected_height) == scaled_res[0]: + # resizing to have both stereo and rgb to have same + # resolution to capture extrinsics of the rgb-right camera + img = cv2.resize(img, (scaled_res[1], scaled_res[0]), + interpolation=cv2.INTER_CUBIC) + return img + else: + # resizing and cropping to have both stereo and rgb to have same resolution + # to calculate extrinsics of the rgb-right camera + scale_width = scaled_res[1]/img.shape[1] + dest_res = ( + int(img.shape[1] * scale_width), int(img.shape[0] * scale_width)) + img = cv2.resize( + img, dest_res, interpolation=cv2.INTER_CUBIC) + if img.shape[0] < scaled_res[0]: + raise RuntimeError("resizeed height of rgb is smaller than required. {0} < {1}".format( + img.shape[0], scaled_res[0])) + # print(gray.shape[0] - req_resolution[0]) + del_height = (img.shape[0] - scaled_res[0]) // 2 + # gray = gray[: req_resolution[0], :] + img = img[del_height: del_height + scaled_res[0], :] + return img + else: + return img + + def sgdEpipolar(self, images_left, images_right, M_lp, d_l, M_rp, d_r, r_l, r_r, kScaledL, kScaledR, scaled_res, isHorizontal): mapx_l, mapy_l = cv2.initUndistortRectifyMap( - self.M1, self.d1, self.R1, self.P1, self.img_shape, cv2.CV_32FC1) + M_lp, d_l, r_l, kScaledL, scaled_res[::-1], cv2.CV_32FC1) mapx_r, mapy_r = cv2.initUndistortRectifyMap( - self.M2, self.d2, self.R2, self.P2, self.img_shape, cv2.CV_32FC1) - print("Printing p1 and p2") - print(self.P1) - print(self.P2) + M_rp, d_r, r_r, kScaledR, scaled_res[::-1], cv2.CV_32FC1) + + image_data_pairs = [] + imagesCount = 0 + # print(len(images_left)) + # print(len(images_right)) for image_left, image_right in zip(images_left, images_right): # read images + imagesCount += 1 + # print(imagesCount) img_l = cv2.imread(image_left, 0) img_r = cv2.imread(image_right, 0) - # warp right image + img_l = self.scale_image(img_l, scaled_res) + img_r = self.scale_image(img_r, scaled_res) + # print(img_l.shape) + # print(img_r.shape) + + # warp right image # img_l = cv2.warpPerspective(img_l, self.H1, img_l.shape[::-1], # cv2.INTER_CUBIC + # cv2.WARP_FILL_OUTLIERS + @@ -698,12 +732,13 @@ def test_epipolar_charuco_lr(self, dataset_dir): img_r = cv2.remap(img_r, mapx_r, mapy_r, cv2.INTER_LINEAR) image_data_pairs.append((img_l, img_r)) - - # compute metrics + # print(f'Images data pair size ios {len(image_data_pairs)}') imgpoints_r = [] imgpoints_l = [] + criteria = (cv2.TERM_CRITERIA_EPS + + cv2.TERM_CRITERIA_MAX_ITER, 10000, 0.00001) + for i, image_data_pair in enumerate(image_data_pairs): - # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) marker_corners_l, ids_l, rejectedImgPoints = cv2.aruco.detectMarkers( image_data_pair[0], self.aruco_dictionary) marker_corners_l, ids_l, _, _ = cv2.aruco.refineDetectedMarkers(image_data_pair[0], self.board, @@ -720,7 +755,22 @@ def test_epipolar_charuco_lr(self, dataset_dir): marker_corners_l, ids_l, image_data_pair[0], self.board) res2_r = cv2.aruco.interpolateCornersCharuco( marker_corners_r, ids_r, image_data_pair[1], self.board) - if res2_l[1] is not None and res2_l[2] is not None and len(res2_l[1]) > 3: + + # img_concat = cv2.hconcat([image_data_pair[0], image_data_pair[1]]) + # img_concat = cv2.cvtColor(img_concat, cv2.COLOR_GRAY2RGB) + # line_row = 0 + # while line_row < img_concat.shape[0]: + # cv2.line(img_concat, + # (0, line_row), (img_concat.shape[1], line_row), + # (0, 255, 0), 1) + # line_row += 30 + + # cv2.imshow('Stereo Pair', img_concat) + # k = cv2.waitKey(0) + # if k == 27: # Esc key to stop + # break + + if res2_l[1] is not None and res2_r[2] is not None and len(res2_l[1]) > 3 and len(res2_r[1]) > 3: cv2.cornerSubPix(image_data_pair[0], res2_l[1], winSize=(5, 5), @@ -731,117 +781,169 @@ def test_epipolar_charuco_lr(self, dataset_dir): zeroZone=(-1, -1), criteria=criteria) - # termination criteria - img_pth = Path(images_right[i]) - name = img_pth.name - print("Image name {}".format(name)) - corners_l = [] - corners_r = [] - for j in range(len(res2_l[2])): - idx = np.where(res2_r[2] == res2_l[2][j]) - if idx[0].size == 0: - continue - corners_l.append(res2_l[1][j]) - corners_r.append(res2_r[1][idx]) -# obj_pts_sub.append(one_pts[allIds_l[i][j]]) - -# obj_pts.append(np.array(obj_pts_sub, dtype=np.float32)) -# left_sub_corners_sampled.append(np.array(left_sub_corners, dtype=np.float32)) -# right_sub_corners_sampled.append(np.array(right_sub_corners, dtype=np.float32)) - - imgpoints_l.extend(corners_l) - imgpoints_r.extend(corners_r) - epi_error_sum = 0 - for l_pt, r_pt in zip(corners_l, corners_r): - epi_error_sum += abs(l_pt[0][1] - r_pt[0][1]) - - print("Average Epipolar Error per image on host in " + img_pth.name + " : " + - str(epi_error_sum / len(corners_l))) + # termination criteria + img_pth_right = Path(images_right[i]) + img_pth_left = Path(images_left[i]) + org = (100, 50) + # cv2.imshow('ltext', lText) + # cv2.waitKey(0) + localError = 0 + corners_l = [] + corners_r = [] + for j in range(len(res2_l[2])): + idx = np.where(res2_r[2] == res2_l[2][j]) + if idx[0].size == 0: + continue + corners_l.append(res2_l[1][j]) + corners_r.append(res2_r[1][idx]) + + imgpoints_l.extend(corners_l) + imgpoints_r.extend(corners_r) + epi_error_sum = 0 + for l_pt, r_pt in zip(corners_l, corners_r): + if isHorizontal: + epi_error_sum += abs(l_pt[0][1] - r_pt[0][1]) + else: + epi_error_sum += abs(l_pt[0][0] - r_pt[0][0]) + # localError = epi_error_sum / len(corners_l) + + # print("Average Epipolar in test Error per image on host in " + img_pth_right.name + " : " + + # str(localError)) + else: + print('Numer of corners is in left -> {} and right -> {}'.format( + len(marker_corners_l), len(marker_corners_r))) + raise SystemExit(1) epi_error_sum = 0 for l_pt, r_pt in zip(imgpoints_l, imgpoints_r): - epi_error_sum += abs(l_pt[0][1] - r_pt[0][1]) + if isHorizontal: + epi_error_sum += abs(l_pt[0][1] - r_pt[0][1]) + else: + epi_error_sum += abs(l_pt[0][0] - r_pt[0][0]) avg_epipolar = epi_error_sum / len(imgpoints_r) - print("Average Epipolar Error: " + str(avg_epipolar)) - - if self.enable_rectification_disp: - self.display_rectification(image_data_pairs) - + print("Average Epipolar Error in test is : " + str(avg_epipolar)) return avg_epipolar - def test_epipolar_charuco_rgbr(self, dataset_dir): - images_rgb = glob.glob(dataset_dir + '/rgb/*.png') - images_right = glob.glob(dataset_dir + '/right/*.png') - images_rgb.sort() + + def test_epipolar_charuco(self, left_img_pth, right_img_pth, M_l, d_l, M_r, d_r, t, r_l, r_r, p_l, p_r): + images_left = glob.glob(left_img_pth + '/*.png') + images_right = glob.glob(right_img_pth + '/*.png') + images_left.sort() images_right.sort() - print("<-----------------Epipolar error of rgb-right camera---------------->") - assert len(images_rgb) != 0, "ERROR: Images not read correctly" + assert len(images_left) != 0, "ERROR: Images not read correctly" assert len(images_right) != 0, "ERROR: Images not read correctly" - # criteria for marker detection/corner detections - criteria = (cv2.TERM_CRITERIA_EPS + - cv2.TERM_CRITERIA_MAX_ITER, 100, 0.00001) - scale_width = 1280/self.img_shape_rgb_scaled[0] - print('scaled using {0}'.format(self.img_shape_rgb_scaled[0])) + isHorizontal = True + if np.absolute(t[1]) > 0.6: + isHorizontal = False + + scale = None + scale_req = False + frame_left_shape = cv2.imread(images_left[0], 0).shape + frame_right_shape = cv2.imread(images_right[0], 0).shape + scalable_res = frame_left_shape + scaled_res = frame_right_shape + if frame_right_shape[0] < frame_left_shape[0] and frame_right_shape[1] < frame_left_shape[1]: + scale_req = True + scale = frame_right_shape[1] / frame_left_shape[1] + elif frame_right_shape[0] > frame_left_shape[0] and frame_right_shape[1] > frame_left_shape[1]: + scale_req = True + scale = frame_left_shape[1] / frame_right_shape[1] + scalable_res = frame_right_shape + scaled_res = frame_left_shape + + if scale_req: + scaled_height = scale * scalable_res[0] + diff = scaled_height - scaled_res[0] + # if scaled_height < smaller_res[0]: + if diff < 0: + scaled_res = (int(scaled_height), scaled_res[1]) + + print( + f'Is scale Req: {scale_req}\n scale value: {scale} \n scalable Res: {scalable_res} \n scale Res: {scaled_res}') + print("Original res Left :{}".format(frame_left_shape)) + print("Original res Right :{}".format(frame_right_shape)) + # print("Scale res :{}".format(scaled_res)) + + M_lp = self.scale_intrinsics(M_l, frame_left_shape, scaled_res) + M_rp = self.scale_intrinsics(M_r, frame_right_shape, scaled_res) + + # p_lp = self.scale_intrinsics(p_l, frame_left_shape, scaled_res) + # p_rp = self.scale_intrinsics(p_r, frame_right_shape, scaled_res) - # if not use_homo: - mapx_rgb, mapy_rgb = cv2.initUndistortRectifyMap( - self.M3_scaled, self.d3_scaled, self.R1_rgb, self.M3_scaled, self.img_shape_rgb_scaled, cv2.CV_32FC1) + criteria = (cv2.TERM_CRITERIA_EPS + + cv2.TERM_CRITERIA_MAX_ITER, 10000, 0.00001) + + # TODO(Sachin): Observe Images by adding visualization + # TODO(Sachin): Check if the stetch is only in calibration Images + print('Original intrinsics ....') + print(M_lp) + # print(d_l) + print(M_rp) + # print(d_r) + + print(f'Width and height is {scaled_res[::-1]}') + # print(d_r) + # kScaledL, _ = cv2.getOptimalNewCameraMatrix(M_l, d_l, scaled_res[::-1], 0) + # kScaledR, _ = cv2.getOptimalNewCameraMatrix(M_r, d_r, scaled_res[::-1], 0) + kScaledL = M_rp + kScaledR = kScaledL + # print('Intrinsics from the getOptimalNewCameraMatrix....') + # print(kScaledL) + # print(kScaledR) + # oldEpipolarError = None + # epQueue = deque() + # movePos = True + + r_id = np.identity(3) + mapx_l, mapy_l = cv2.initUndistortRectifyMap( + M_lp, d_l, r_l, M_rp, scaled_res[::-1], cv2.CV_32FC1) mapx_r, mapy_r = cv2.initUndistortRectifyMap( - self.M2_rgb, self.d2_rgb, self.R2_rgb, self.M3_scaled, self.img_shape_rgb_scaled, cv2.CV_32FC1) - - # self.H1_rgb = np.matmul(np.matmul(self.M2, self.R1_rgb), - # np.linalg.inv(M_rgb)) - # self.H2_r = np.matmul(np.matmul(self.M2, self.R2_rgb), - # np.linalg.inv(self.M2)) + M_rp, d_r, r_r, M_rp, scaled_res[::-1], cv2.CV_32FC1) + image_data_pairs = [] - count = 0 - for image_rgb, image_right in zip(images_rgb, images_right): + for image_left, image_right in zip(images_left, images_right): # read images - img_rgb = cv2.imread(image_rgb, 0) + img_l = cv2.imread(image_left, 0) img_r = cv2.imread(image_right, 0) - img_r = img_r[40: 760, :] - - dest_res = (int(img_rgb.shape[1] * scale_width), - int(img_rgb.shape[0] * scale_width)) - # print("RGB size ....") - # print(img_rgb.shape) - # print(dest_res) - - if img_rgb.shape[0] < 720: - raise RuntimeError("resizeed height of rgb is smaller than required. {0} < {1}".format( - img_rgb.shape[0], req_resolution[0])) - del_height = (img_rgb.shape[0] - 720) // 2 - # print("del height ??") - # print(del_height) - img_rgb = img_rgb[del_height: del_height + 720, :] - # print("resized_shape") - # print(img_rgb.shape) - # self.parse_frame(img_rgb, "rectified_rgb_before", - # "rectified_"+str(count)) - # warp right image + img_l = self.scale_image(img_l, scaled_res) + img_r = self.scale_image(img_r, scaled_res) + # print(img_l.shape) + # print(img_r.shape) - # img_rgb = cv2.warpPerspective(img_rgb, self.H1_rgb, img_rgb.shape[::-1], + # warp right image + # img_l = cv2.warpPerspective(img_l, self.H1, img_l.shape[::-1], # cv2.INTER_CUBIC + # cv2.WARP_FILL_OUTLIERS + # cv2.WARP_INVERSE_MAP) - # img_r = cv2.warpPerspective(img_r, self.H2_r, img_r.shape[::-1], + # img_r = cv2.warpPerspective(img_r, self.H2, img_r.shape[::-1], # cv2.INTER_CUBIC + # cv2.WARP_FILL_OUTLIERS + # cv2.WARP_INVERSE_MAP) - img_rgb = cv2.remap(img_rgb, mapx_rgb, mapy_rgb, cv2.INTER_LINEAR) + img_l = cv2.remap(img_l, mapx_l, mapy_l, cv2.INTER_LINEAR) img_r = cv2.remap(img_r, mapx_r, mapy_r, cv2.INTER_LINEAR) - # self.parse_frame(img_rgb, "rectified_rgb", "rectified_"+str(count)) - image_data_pairs.append((img_rgb, img_r)) - count += 1 + image_data_pairs.append((img_l, img_r)) + + if traceLevel == 3: + cv2.imshow("undistorted-Left", img_l) + cv2.imshow("undistorted-right", img_r) + # print(f'image path - {im}') + # print(f'Image Undistorted Size {undistorted_img.shape}') + k = cv2.waitKey(0) + if k == 27: # Esc key to stop + break + if traceLevel == 3: + cv2.destroyWindow("undistorted-left") + cv2.destroyWindow("undistorted-right") # compute metrics imgpoints_r = [] imgpoints_l = [] + # new_imagePairs = [] for i, image_data_pair in enumerate(image_data_pairs): # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) marker_corners_l, ids_l, rejectedImgPoints = cv2.aruco.detectMarkers( @@ -855,12 +957,28 @@ def test_epipolar_charuco_rgbr(self, dataset_dir): marker_corners_r, ids_r, _, _ = cv2.aruco.refineDetectedMarkers(image_data_pair[1], self.board, marker_corners_r, ids_r, rejectedCorners=rejectedImgPoints) - + print(f'Marekrs length r is {len(marker_corners_r)}') + print(f'Marekrs length l is {len(marker_corners_l)}') res2_l = cv2.aruco.interpolateCornersCharuco( marker_corners_l, ids_l, image_data_pair[0], self.board) res2_r = cv2.aruco.interpolateCornersCharuco( marker_corners_r, ids_r, image_data_pair[1], self.board) - if res2_l[1] is not None and res2_l[2] is not None and len(res2_l[1]) > 3: + + img_concat = cv2.hconcat([image_data_pair[0], image_data_pair[1]]) + img_concat = cv2.cvtColor(img_concat, cv2.COLOR_GRAY2RGB) + line_row = 0 + while line_row < img_concat.shape[0]: + cv2.line(img_concat, + (0, line_row), (img_concat.shape[1], line_row), + (0, 255, 0), 1) + line_row += 30 + + # cv2.imshow('Stereo Pair', img_concat) + # k = cv2.waitKey(0) + # if k == 27: # Esc key to stop + # break + + if res2_l[1] is not None and res2_r[2] is not None and len(res2_l[1]) > 3 and len(res2_r[1]) > 3: cv2.cornerSubPix(image_data_pair[0], res2_l[1], winSize=(5, 5), @@ -871,107 +989,81 @@ def test_epipolar_charuco_rgbr(self, dataset_dir): zeroZone=(-1, -1), criteria=criteria) - # termination criteria - corners_l = [] - corners_r = [] - for j in range(len(res2_l[2])): - idx = np.where(res2_r[2] == res2_l[2][j]) - if idx[0].size == 0: - continue - corners_l.append(res2_l[1][j]) - corners_r.append(res2_r[1][idx]) + # termination criteria + img_pth_right = Path(images_right[i]) + img_pth_left = Path(images_left[i]) + org = (100, 50) + # cv2.imshow('ltext', lText) + # cv2.waitKey(0) + localError = 0 + corners_l = [] + corners_r = [] + for j in range(len(res2_l[2])): + idx = np.where(res2_r[2] == res2_l[2][j]) + if idx[0].size == 0: + continue + corners_l.append(res2_l[1][j]) + corners_r.append(res2_r[1][idx]) + + imgpoints_l.extend(corners_l) + imgpoints_r.extend(corners_r) + epi_error_sum = 0 + for l_pt, r_pt in zip(corners_l, corners_r): + if isHorizontal: + epi_error_sum += abs(l_pt[0][1] - r_pt[0][1]) + else: + epi_error_sum += abs(l_pt[0][0] - r_pt[0][0]) + localError = epi_error_sum / len(corners_l) + + print("Average Epipolar Error per image on host in " + img_pth_right.name + " : " + + str(localError)) + else: + print('Numer of corners is in left -> {} and right -> {}'.format( + len(marker_corners_l), len(marker_corners_r))) + return -1 + lText = cv2.putText(cv2.cvtColor(image_data_pair[0],cv2.COLOR_GRAY2RGB), img_pth_left.name, org, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA) + rText = cv2.putText(cv2.cvtColor(image_data_pair[1],cv2.COLOR_GRAY2RGB), img_pth_right.name + " Error: " + str(localError), org, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA) + image_data_pairs[i] = (lText, rText) + - imgpoints_l.extend(corners_l) - imgpoints_r.extend(corners_r) - epi_error_sum = 0 - for l_pt, r_pt in zip(corners_l, corners_r): - epi_error_sum += abs(l_pt[0][1] - r_pt[0][1]) - img_pth = Path(images_right[i]) - # name = img_pth.name - print("Average Epipolar Error per image on host in " + img_pth.name + " : " + - str(epi_error_sum / len(corners_l))) - epi_error_sum = 0 for l_pt, r_pt in zip(imgpoints_l, imgpoints_r): - epi_error_sum += abs(l_pt[0][1] - r_pt[0][1]) + if isHorizontal: + epi_error_sum += abs(l_pt[0][1] - r_pt[0][1]) + else: + epi_error_sum += abs(l_pt[0][0] - r_pt[0][0]) avg_epipolar = epi_error_sum / len(imgpoints_r) - print("Average Epipolar Error of rgb_right: " + str(avg_epipolar)) + print("Average Epipolar Error is : " + str(avg_epipolar)) if self.enable_rectification_disp: - self.display_rectification(image_data_pairs) + self.display_rectification(image_data_pairs, isHorizontal) return avg_epipolar - def display_rectification(self, image_data_pairs): - print("Displaying Stereo Pair for visual inspection. Press the [ESC] key to exit.") - for image_data_pair in image_data_pairs: - img_concat = cv2.hconcat([image_data_pair[0], image_data_pair[1]]) - img_concat = cv2.cvtColor(img_concat, cv2.COLOR_GRAY2RGB) - - # draw epipolar lines for debug purposes - line_row = 0 - while line_row < img_concat.shape[0]: - cv2.line(img_concat, - (0, line_row), (img_concat.shape[1], line_row), - (0, 255, 0), 1) - line_row += 30 - - # show image - cv2.imshow('Stereo Pair', img_concat) - k = cv2.waitKey(0) - if k == 27: # Esc key to stop - break - - # os._exit(0) - # raise SystemExit() - - cv2.destroyWindow('Stereo Pair') - - def display_rectification(self, image_data_pairs): - print("Displaying Stereo Pair for visual inspection. Press the [ESC] key to exit.") - for image_data_pair in image_data_pairs: - img_concat = cv2.hconcat([image_data_pair[0], image_data_pair[1]]) - img_concat = cv2.cvtColor(img_concat, cv2.COLOR_GRAY2RGB) - - # draw epipolar lines for debug purposes - line_row = 0 - while line_row < img_concat.shape[0]: - cv2.line(img_concat, - (0, line_row), (img_concat.shape[1], line_row), - (0, 255, 0), 1) - line_row += 30 - - # show image - cv2.imshow('Stereo Pair', img_concat) - k = cv2.waitKey(0) - if k == 27: # Esc key to stop - break - - # os._exit(0) - # raise SystemExit() - - cv2.destroyWindow('Stereo Pair') - - def create_save_mesh(self): #, output_path): + def create_save_mesh(self): # , output_path): curr_path = Path(__file__).parent.resolve() print("Mesh path") print(curr_path) - map_x_l, map_y_l = cv2.initUndistortRectifyMap(self.M1, self.d1, self.R1, self.M2, self.img_shape, cv2.CV_32FC1) - map_x_r, map_y_r = cv2.initUndistortRectifyMap(self.M2, self.d2, self.R2, self.M2, self.img_shape, cv2.CV_32FC1) + map_x_l, map_y_l = cv2.initUndistortRectifyMap( + self.M1, self.d1, self.R1, self.M2, self.img_shape, cv2.CV_32FC1) + map_x_r, map_y_r = cv2.initUndistortRectifyMap( + self.M2, self.d2, self.R2, self.M2, self.img_shape, cv2.CV_32FC1) + """ map_x_l_fp32 = map_x_l.astype(np.float32) map_y_l_fp32 = map_y_l.astype(np.float32) map_x_r_fp32 = map_x_r.astype(np.float32) map_y_r_fp32 = map_y_r.astype(np.float32) + print("shape of maps") print(map_x_l.shape) print(map_y_l.shape) print(map_x_r.shape) - print(map_y_r.shape) + print(map_y_r.shape) """ meshCellSize = 16 mesh_left = [] @@ -1004,14 +1096,14 @@ def create_save_mesh(self): #, output_path): row_right.append(map_y_r[y, x]) row_right.append(map_x_r[y, x]) if (map_x_l.shape[1] % meshCellSize) % 2 != 0: - row_left.append(0) - row_left.append(0) - row_right.append(0) - row_right.append(0) + row_left.append(0) + row_left.append(0) + row_right.append(0) + row_right.append(0) mesh_left.append(row_left) - mesh_right.append(row_right) - + mesh_right.append(row_right) + mesh_left = np.array(mesh_left) mesh_right = np.array(mesh_right) left_mesh_fpath = str(curr_path) + '/../resources/left_mesh.calib' diff --git a/resources/boards/ACME01.json b/resources/boards/ACME01.json deleted file mode 100644 index e94758047..000000000 --- a/resources/boards/ACME01.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "board_config": - { - "name": "ACME01", - "revision": "V1.2", - "swap_left_and_right_cameras": false, - "left_fov_deg": 71.86, - "rgb_fov_deg": 68.7938, - "left_to_right_distance_cm": 7.5, - "left_to_rgb_distance_cm": 3.75 - } -} - diff --git a/resources/boards/AR0234DEV.json b/resources/boards/AR0234DEV.json new file mode 100644 index 000000000..47825dfee --- /dev/null +++ b/resources/boards/AR0234DEV.json @@ -0,0 +1,56 @@ +{ + "board_config": + { + "name": "AR0234DEV", + "revision": "R1M0E1", + "cameras":{ + "CAM_C": { + "name": "right", + "hfov": 140, + "type": "color", + "extrinsics": { + "to_cam": "CAM_B", + "specTranslation": { + "x": -7.5, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_B": { + "name": "left", + "hfov": 140, + "type": "color", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 0, + "y": -3.5, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_A": { + "name": "below", + "hfov": 140, + "type": "color" + } + + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } + } +} + diff --git a/resources/boards/BW1097.json b/resources/boards/BW1097.json index 16c1546ca..b7d6723b6 100644 --- a/resources/boards/BW1097.json +++ b/resources/boards/BW1097.json @@ -3,11 +3,52 @@ { "name": "OAK-D-CM3", "revision": "R2M2E3", - "swap_left_and_right_cameras": true, - "left_fov_deg": 71.86, - "rgb_fov_deg": 68.7938, - "left_to_right_distance_cm": 9.0, - "left_to_rgb_distance_cm": 2.0 + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 68.7938, + "type": "color" + }, + "CAM_B": { + "name": "left", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_C", + "specTranslation": { + "x": -9.0, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_C": { + "name": "right", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 7, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + } + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } } } - diff --git a/resources/boards/BW1098OBC.json b/resources/boards/BW1098OBC.json index 8a54bc8ad..672df86c2 100644 --- a/resources/boards/BW1098OBC.json +++ b/resources/boards/BW1098OBC.json @@ -3,10 +3,53 @@ { "name": "OAK-D", "revision": "R1M0E1", - "swap_left_and_right_cameras": true, - "left_fov_deg": 71.86, - "rgb_fov_deg": 68.7938, - "left_to_right_distance_cm": 7.5, - "left_to_rgb_distance_cm": 3.75 + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 68.7938, + "type": "color" + }, + "CAM_B": { + "name": "left", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_C", + "specTranslation": { + "x": -7.5, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_C": { + "name": "right", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 3.75, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + } + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } } } + diff --git a/resources/boards/DM1092.json b/resources/boards/DM1092.json new file mode 100644 index 000000000..ac029a371 --- /dev/null +++ b/resources/boards/DM1092.json @@ -0,0 +1,55 @@ +{ + "board_config": + { + "name": "OAK-D-IoT-40", + "revision": "R2M2E2", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 68.7938, + "type": "color" + }, + "CAM_B": { + "name": "left", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_C", + "specTranslation": { + "x": -4, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_C": { + "name": "right", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 3, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + } + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } + } +} + diff --git a/resources/boards/DM1097.json b/resources/boards/DM1097.json new file mode 100644 index 000000000..1b302f5ea --- /dev/null +++ b/resources/boards/DM1097.json @@ -0,0 +1,54 @@ +{ + "board_config": + { + "name": "OAK-D-CM4", + "revision": "R2M0E2", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 68.7938, + "type": "color" + }, + "CAM_B": { + "name": "left", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_C", + "specTranslation": { + "x": -9.0, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_C": { + "name": "right", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 7, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + } + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } + } +} diff --git a/resources/boards/DM1098OAKD_WIFI.json b/resources/boards/DM1098OAKD_WIFI.json new file mode 100644 index 000000000..7f7d7a1de --- /dev/null +++ b/resources/boards/DM1098OAKD_WIFI.json @@ -0,0 +1,55 @@ +{ + "board_config": + { + "name": "OAK-D-IoT-75", + "revision": "R0M0E0", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 68.7938, + "type": "color" + }, + "CAM_B": { + "name": "left", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_C", + "specTranslation": { + "x": -7.5, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_C": { + "name": "right", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 3.75, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + } + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } + } +} + diff --git a/resources/boards/DM2097.json b/resources/boards/DM2097.json deleted file mode 100644 index 84a00b156..000000000 --- a/resources/boards/DM2097.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "board_config": - { - "name": "DM2097", - "revision": "R0M0E0", - "swap_left_and_right_cameras": false, - "left_fov_deg": 71.86, - "rgb_fov_deg": 68.7938, - "left_to_right_distance_cm": 9.0, - "left_to_rgb_distance_cm": 7.7 - } -} diff --git a/resources/boards/OAK-1-LITE-W.json b/resources/boards/OAK-1-LITE-W.json new file mode 100644 index 000000000..24cac5786 --- /dev/null +++ b/resources/boards/OAK-1-LITE-W.json @@ -0,0 +1,14 @@ +{ + "board_config": + { + "name": "OAK-1-Lite-W", + "revision": "R3M1E2", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 108.0, + "type": "color" + } + } + } +} \ No newline at end of file diff --git a/resources/boards/OAK-1-LITE.json b/resources/boards/OAK-1-LITE.json new file mode 100644 index 000000000..ab97c8889 --- /dev/null +++ b/resources/boards/OAK-1-LITE.json @@ -0,0 +1,14 @@ +{ + "board_config": + { + "name": "OAK-1-LITE", + "revision": "R1M0E2", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 68.7938, + "type": "color" + } + } + } +} \ No newline at end of file diff --git a/resources/boards/OAK-1-MAX.json b/resources/boards/OAK-1-MAX.json new file mode 100644 index 000000000..14be62a8a --- /dev/null +++ b/resources/boards/OAK-1-MAX.json @@ -0,0 +1,14 @@ +{ + "board_config": + { + "name": "OAK-1-MAX", + "revision": "R3M1E2", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 68.7938, + "type": "color" + } + } + } +} \ No newline at end of file diff --git a/resources/boards/OAK-1-POE.json b/resources/boards/OAK-1-POE.json new file mode 100644 index 000000000..c682b0c97 --- /dev/null +++ b/resources/boards/OAK-1-POE.json @@ -0,0 +1,16 @@ +{ + "board_config": + { + "name": "OAK-1-POE", + "revision": "R2M1E2", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 68.7938, + "type": "color" + } + } + + } +} + diff --git a/resources/boards/OAK-1-W.json b/resources/boards/OAK-1-W.json new file mode 100644 index 000000000..6dab92b13 --- /dev/null +++ b/resources/boards/OAK-1-W.json @@ -0,0 +1,14 @@ +{ + "board_config": + { + "name": "OAK-1-W", + "revision": "R3M1E2", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 108.0, + "type": "color" + } + } + } +} \ No newline at end of file diff --git a/resources/boards/OAK-1.json b/resources/boards/OAK-1.json new file mode 100644 index 000000000..2bbf8de9f --- /dev/null +++ b/resources/boards/OAK-1.json @@ -0,0 +1,14 @@ +{ + "board_config": + { + "name": "OAK-1", + "revision": "R3M1E2", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 68.7938, + "type": "color" + } + } + } +} \ No newline at end of file diff --git a/resources/boards/OAK-D-CM4-POE.json b/resources/boards/OAK-D-CM4-POE.json deleted file mode 100644 index b9c67c2e8..000000000 --- a/resources/boards/OAK-D-CM4-POE.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "board_config": - { - "name": "OAK-D-CM4-POE", - "revision": "R2M1E2", - "swap_left_and_right_cameras": false, - "left_fov_deg": 71.86, - "rgb_fov_deg": 68.7938, - "left_to_right_distance_cm": 9.0, - "left_to_rgb_distance_cm": 7.7 - } -} diff --git a/resources/boards/OAK-D-CM4.json b/resources/boards/OAK-D-CM4.json deleted file mode 100644 index 4d976da08..000000000 --- a/resources/boards/OAK-D-CM4.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "board_config": - { - "name": "OAK-D-CM4", - "revision": "R2M0E2", - "swap_left_and_right_cameras": true, - "left_fov_deg": 71.86, - "rgb_fov_deg": 68.7938, - "left_to_right_distance_cm": 9.0, - "left_to_rgb_distance_cm": 2.0 - } -} - diff --git a/resources/boards/OAK-D-IoT-40.json b/resources/boards/OAK-D-IoT-40.json deleted file mode 100644 index 23ba6912b..000000000 --- a/resources/boards/OAK-D-IoT-40.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "board_config": - { - "name": "OAK-D-IoT-40", - "revision": "R2M2E2", - "swap_left_and_right_cameras": true, - "left_fov_deg": 71.86, - "rgb_fov_deg": 68.7938, - "left_to_right_distance_cm": 4.0, - "left_to_rgb_distance_cm": 3.0 - } -} diff --git a/resources/boards/OAK-D-LITE.json b/resources/boards/OAK-D-LITE.json index 03376e6f6..483ca6c8d 100644 --- a/resources/boards/OAK-D-LITE.json +++ b/resources/boards/OAK-D-LITE.json @@ -2,11 +2,55 @@ "board_config": { "name": "OAK-D-LITE", - "revision": "R0M0E0", - "left_fov_deg": 72.9, - "rgb_fov_deg": 68.7938, - "left_to_right_distance_cm": 7.5, - "left_to_rgb_distance_cm": 3.75 + "revision": "R1M1E3", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 68.7938, + "type": "color" + }, + "CAM_B": { + "name": "left", + "hfov": 72.9, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_C", + "specTranslation": { + "x": -7.5, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_C": { + "name": "right", + "hfov": 72.9, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 3.75, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + } + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } } } + diff --git a/resources/boards/OAK-D-POE-W.json b/resources/boards/OAK-D-POE-W.json new file mode 100644 index 000000000..a81caf948 --- /dev/null +++ b/resources/boards/OAK-D-POE-W.json @@ -0,0 +1,55 @@ +{ + "board_config": + { + "name": "OAK-D POE-W", + "revision": "R3M2E2", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 108.0, + "type": "color" + }, + "CAM_B": { + "name": "left", + "hfov": 127.0, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_C", + "specTranslation": { + "x": -7.5, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_C": { + "name": "right", + "hfov": 127.0, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 3.75, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + } + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } + } +} + diff --git a/resources/boards/OAK-D-POE.json b/resources/boards/OAK-D-POE.json new file mode 100644 index 000000000..b9d48c662 --- /dev/null +++ b/resources/boards/OAK-D-POE.json @@ -0,0 +1,55 @@ +{ + "board_config": + { + "name": "OAK-D-POE", + "revision": "R3M1E3", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 68.7938, + "type": "color" + }, + "CAM_B": { + "name": "left", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_C", + "specTranslation": { + "x": -7.5, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_C": { + "name": "right", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 3.75, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + } + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } + } +} + diff --git a/resources/boards/OAK-D-PRO-POE-CUSTOM.json b/resources/boards/OAK-D-PRO-POE-CUSTOM.json new file mode 100644 index 000000000..502cab780 --- /dev/null +++ b/resources/boards/OAK-D-PRO-POE-CUSTOM.json @@ -0,0 +1,55 @@ +{ + "board_config": + { + "name": "OAK-D-PRO-POE-CUSTOM", + "revision": "R3M1E3", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 71.86, + "type": "color" + }, + "CAM_B": { + "name": "left", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_C", + "specTranslation": { + "x": -7.5, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_C": { + "name": "right", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 3.75, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + } + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } + } +} + diff --git a/resources/boards/OAK-D-PRO-POE.json b/resources/boards/OAK-D-PRO-POE.json new file mode 100644 index 000000000..f68487d30 --- /dev/null +++ b/resources/boards/OAK-D-PRO-POE.json @@ -0,0 +1,55 @@ +{ + "board_config": + { + "name": "OAK-D-PRO-POE", + "revision": "R3M1E3", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 68.7938, + "type": "color" + }, + "CAM_B": { + "name": "left", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_C", + "specTranslation": { + "x": -7.5, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_C": { + "name": "right", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 3.75, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + } + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } + } +} + diff --git a/resources/boards/OAK-D-PRO-W-CUSTOM.json b/resources/boards/OAK-D-PRO-W-CUSTOM.json new file mode 100644 index 000000000..742417bcb --- /dev/null +++ b/resources/boards/OAK-D-PRO-W-CUSTOM.json @@ -0,0 +1,55 @@ +{ + "board_config": + { + "name": "OAK-D Pro-W", + "revision": "R3M2E2", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 127.0, + "type": "color" + }, + "CAM_B": { + "name": "left", + "hfov": 127.0, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_C", + "specTranslation": { + "x": -7.5, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_C": { + "name": "right", + "hfov": 127.0, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 3.75, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + } + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } + } +} + diff --git a/resources/boards/OAK-D-PRO-W-POE-CUSTOM.json b/resources/boards/OAK-D-PRO-W-POE-CUSTOM.json new file mode 100644 index 000000000..7cb811ffa --- /dev/null +++ b/resources/boards/OAK-D-PRO-W-POE-CUSTOM.json @@ -0,0 +1,55 @@ +{ + "board_config": + { + "name": "OAK-D-PRO-W-POE", + "revision": "R3M1E3", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 127.0, + "type": "color" + }, + "CAM_B": { + "name": "left", + "hfov": 127.0, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_C", + "specTranslation": { + "x": -7.5, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_C": { + "name": "right", + "hfov": 127.0, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 3.75, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + } + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } + } +} + diff --git a/resources/boards/OAK-D-PRO-W-POE.json b/resources/boards/OAK-D-PRO-W-POE.json new file mode 100644 index 000000000..3f576b2c8 --- /dev/null +++ b/resources/boards/OAK-D-PRO-W-POE.json @@ -0,0 +1,55 @@ +{ + "board_config": + { + "name": "OAK-D Pro-W PoE", + "revision": "R3M2E2", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 108.0, + "type": "color" + }, + "CAM_B": { + "name": "left", + "hfov": 127.0, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_C", + "specTranslation": { + "x": -7.5, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_C": { + "name": "right", + "hfov": 127.0, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 3.75, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + } + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } + } +} + diff --git a/resources/boards/OAK-D-PRO-W.json b/resources/boards/OAK-D-PRO-W.json index 6bf207525..b50c56a3f 100644 --- a/resources/boards/OAK-D-PRO-W.json +++ b/resources/boards/OAK-D-PRO-W.json @@ -1,13 +1,55 @@ { "board_config": { - "name": "OAK-D-PRO-W", - "revision": "R3M1E3", - "swap_left_and_right_cameras": true, - "left_fov_deg": 127.0, - "rgb_fov_deg": 108.0, - "left_to_right_distance_cm": 7.5, - "left_to_rgb_distance_cm": 3.75 + "name": "OAK-D Pro-W", + "revision": "R3M2E2", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 108.0, + "type": "color" + }, + "CAM_B": { + "name": "left", + "hfov": 127.0, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_C", + "specTranslation": { + "x": -7.5, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_C": { + "name": "right", + "hfov": 127.0, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 3.75, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + } + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } } } diff --git a/resources/boards/OAK-D-PRO.json b/resources/boards/OAK-D-PRO.json index c73e744e9..8a4cfe6f8 100644 --- a/resources/boards/OAK-D-PRO.json +++ b/resources/boards/OAK-D-PRO.json @@ -3,11 +3,53 @@ { "name": "OAK-D-PRO", "revision": "R3M1E3", - "swap_left_and_right_cameras": true, - "left_fov_deg": 71.86, - "rgb_fov_deg": 68.7938, - "left_to_right_distance_cm": 7.5, - "left_to_rgb_distance_cm": 3.75 + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 68.7938, + "type": "color" + }, + "CAM_B": { + "name": "left", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_C", + "specTranslation": { + "x": -7.5, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_C": { + "name": "right", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 3.75, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + } + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } } } diff --git a/resources/boards/OAK-D-S2.json b/resources/boards/OAK-D-S2.json new file mode 100644 index 000000000..9edbf341e --- /dev/null +++ b/resources/boards/OAK-D-S2.json @@ -0,0 +1,55 @@ +{ + "board_config": + { + "name": "OAK-D-S2", + "revision": "R2M0E2", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 68.7938, + "type": "color" + }, + "CAM_B": { + "name": "left", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_C", + "specTranslation": { + "x": -7.5, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_C": { + "name": "right", + "hfov": 71.86, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 3.75, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + } + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } + } +} + diff --git a/resources/boards/OAK-D-SR.json b/resources/boards/OAK-D-SR.json new file mode 100644 index 000000000..348a3b246 --- /dev/null +++ b/resources/boards/OAK-D-SR.json @@ -0,0 +1,40 @@ +{ + "board_config": + { + "name": "OAK-D-SR", + "revision": "R0M0E0", + "cameras":{ + "LEFT": { + "name": "left", + "hfov": 71.86, + "type": "color", + "extrinsics": { + "to_cam": "RIGHT", + "specTranslation": { + "x": -2.0, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + + } + + }, + "RIGHT": { + "name": "right", + "hfov": 71.86, + "type": "color" + } + + }, + + "stereo_config":{ + "left_cam": "LEFT", + "right_cam": "RIGHT" + } + } +} diff --git a/resources/boards/OAK-D-W.json b/resources/boards/OAK-D-W.json index e70e613da..2d0e63a57 100644 --- a/resources/boards/OAK-D-W.json +++ b/resources/boards/OAK-D-W.json @@ -2,12 +2,54 @@ "board_config": { "name": "OAK-D-W", - "revision": "R3M1E3", - "swap_left_and_right_cameras": true, - "left_fov_deg": 127.0, - "rgb_fov_deg": 108.0, - "left_to_right_distance_cm": 7.5, - "left_to_rgb_distance_cm": 3.75 + "revision": "R3M2E2", + "cameras":{ + "CAM_A": { + "name": "rgb", + "hfov": 108.0, + "type": "color" + }, + "CAM_B": { + "name": "left", + "hfov": 127.0, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_C", + "specTranslation": { + "x": -7.5, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + }, + "CAM_C": { + "name": "right", + "hfov": 127.0, + "type": "mono", + "extrinsics": { + "to_cam": "CAM_A", + "specTranslation": { + "x": 3.75, + "y": 0, + "z": 0 + }, + "rotation":{ + "r": 0, + "p": 0, + "y": 0 + } + } + } + }, + "stereo_config":{ + "left_cam": "CAM_B", + "right_cam": "CAM_C" + } } }