From 8215c6aeac17b3274a047ecf95398a07ae188bb7 Mon Sep 17 00:00:00 2001 From: Adam Tonderski Date: Thu, 18 Apr 2024 09:30:00 +0200 Subject: [PATCH] Change to ruff-only with slightly increased line length (#51) * Update linter settings - only use ruff (remove black dependency - increase line length to 120 * Apply new linter * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: William Ljungbergh <37999571+wljungbergh@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 15 ++++------ pyproject.toml | 16 +++++------ zod/_zod_dataset.py | 1 + zod/anno/parser.py | 1 + zod/cli/download.py | 16 ++++------- zod/cli/extract_tsr_patches.py | 30 ++++++-------------- zod/cli/generate_coco_json.py | 17 ++++------- zod/cli/verify.py | 8 ++---- zod/cli/visualize_lidar.py | 16 +++-------- zod/data_classes/box.py | 5 ++-- zod/data_classes/calibration.py | 5 ++-- zod/data_classes/ego_motion.py | 24 ++++------------ zod/data_classes/frame.py | 12 ++------ zod/data_classes/geometry.py | 4 +-- zod/data_classes/info.py | 4 +-- zod/data_classes/oxts.py | 1 + zod/data_classes/sensor.py | 8 ++---- zod/data_classes/sequence.py | 9 ++---- zod/data_classes/vehicle_data.py | 8 ++---- zod/eval/detection/_experimental/matching.py | 12 ++------ zod/eval/detection/eval_nuscenes_style.py | 25 ++++------------ zod/utils/geometry.py | 1 + zod/visualization/bev_utils.py | 15 ++++------ zod/visualization/colorlabeler.py | 5 +--- zod/visualization/lidar_3d.py | 1 + zod/visualization/lidar_bev.py | 21 ++++---------- zod/visualization/lidar_on_image.py | 4 +-- zod/visualization/object_visualization.py | 17 +++-------- zod/visualization/oxts_on_image.py | 5 ++-- zod/zod_frames.py | 1 + 30 files changed, 95 insertions(+), 212 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 466b64d..1c21fae 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,14 +1,9 @@ repos: - - repo: https://github.com/psf/black - rev: 23.12.1 - hooks: - - id: black-jupyter - language_version: python3 - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.9 + rev: v0.3.7 hooks: + # Run the linter. - id: ruff - - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.15 - hooks: - - id: validate-pyproject + args: [ --fix ] + # Run the formatter. + - id: ruff-format diff --git a/pyproject.toml b/pyproject.toml index d567515..f865807 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,8 +49,7 @@ all = [ ] [tool.poetry.group.dev.dependencies] -ruff = "^0.0.200" -black = ">=20.0" +ruff = "^0.3.7" pre-commit = ">=2" pytest = ">=7" @@ -59,11 +58,12 @@ requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.ruff] -line-length = 100 -# isort, Warning, pycodestyle (see here: https://github.com/charliermarsh/ruff#supported-rules) -select = ["I", "W", "E"] +line-length = 120 exclude = ["zod/eval/detection/_nuscenes_eval/*"] -[tool.black] -line-length = 100 -target-version = ["py38"] +[tool.ruff.lint] +select = [ # (see here: https://github.com/charliermarsh/ruff#supported-rules) + "I", # isort + "W", # warning + "E", # pycodestyle +] diff --git a/zod/_zod_dataset.py b/zod/_zod_dataset.py index 7946f63..2d79b6f 100644 --- a/zod/_zod_dataset.py +++ b/zod/_zod_dataset.py @@ -1,4 +1,5 @@ """ZOD Frames module.""" + import json import os.path as osp from abc import ABC, abstractmethod diff --git a/zod/anno/parser.py b/zod/anno/parser.py index 69d78c8..a8a42a4 100644 --- a/zod/anno/parser.py +++ b/zod/anno/parser.py @@ -1,4 +1,5 @@ """Annotation parsers.""" + import json from dataclasses import dataclass from typing import Any, Dict, List diff --git a/zod/cli/download.py b/zod/cli/download.py index 56be1c9..3515284 100644 --- a/zod/cli/download.py +++ b/zod/cli/download.py @@ -1,4 +1,5 @@ """This script is to be used to download the Zenseact Open Dataset.""" + import contextlib import os import os.path as osp @@ -137,8 +138,7 @@ def _download(download_path: str, dbx: ResumableDropbox, info: DownloadExtractIn ) if pbar.n > info.size: tqdm.write( - f"Error! File {download_path} already exists and is larger than expected. " - "Please delete and try again." + f"Error! File {download_path} already exists and is larger than expected. " "Please delete and try again." ) if pbar.n > 0: # this means we are retrying or resuming a previously interrupted download @@ -146,9 +146,7 @@ def _download(download_path: str, dbx: ResumableDropbox, info: DownloadExtractIn # Retry download if partial file exists (e.g. due to network error) while pbar.n < info.size: try: - _, response = dbx.sharing_get_shared_link_file( - url=info.url, path=info.file_path, start=pbar.n - ) + _, response = dbx.sharing_get_shared_link_file(url=info.url, path=info.file_path, start=pbar.n) with open(download_path, "ab") as f: with contextlib.closing(response): for chunk in response.iter_content(chunk_size=CHUNK_SIZE): @@ -333,9 +331,7 @@ def _print_summary(download_settings, filter_settings, subset): print(" version: mini\n (other settings are ignored for mini)") else: print(filter_settings) - if subset == SubDataset.FRAMES and ( - filter_settings.num_scans_before == filter_settings.num_scans_after == 0 - ): + if subset == SubDataset.FRAMES and (filter_settings.num_scans_before == filter_settings.num_scans_after == 0): typer.secho( "Note! The current settings will only download the core lidar frames. " "If you need surrounding scans, set --num-scans-before and/or --num-scans-after.", @@ -389,9 +385,7 @@ def download( False, help="Extract already downloaded archives", rich_help_panel=GEN ), parallel: bool = typer.Option(True, help="Download files in parallel", rich_help_panel=GEN), - max_workers: int = typer.Option( - None, help="Max number of workers for parallel downloads", rich_help_panel=GEN - ), + max_workers: int = typer.Option(None, help="Max number of workers for parallel downloads", rich_help_panel=GEN), no_confirm: bool = typer.Option( False, "-y", diff --git a/zod/cli/extract_tsr_patches.py b/zod/cli/extract_tsr_patches.py index a8489c3..4b50dad 100644 --- a/zod/cli/extract_tsr_patches.py +++ b/zod/cli/extract_tsr_patches.py @@ -65,10 +65,8 @@ def extract_tsr_patches( resolve_path=True, help="Path to the output directory.", ), - version: Version = typer.Option(..., help="Version of the dataset to use."), - padding_factor: Optional[float] = typer.Option( - None, help="Factor to multiply the padding with." - ), + version: str = typer.Option("full", help="Version of the dataset to use. One of: full, small."), + padding_factor: Optional[float] = typer.Option(None, help="Factor to multiply the padding with."), padding_px_y: Optional[int] = typer.Option(None, help="Padding in y direction."), padding_px_x: Optional[int] = typer.Option(None, help="Padding in x direction."), num_workers: Optional[int] = typer.Option(None, help="Number of workers to use."), @@ -83,11 +81,9 @@ def extract_tsr_patches( assert not ( padding_factor is not None and (padding_px_x is not None or padding_px_y is not None) ), "Cannot specify both padding and padding_factor" - padding = ( - (padding_px_x, padding_px_y) - if (padding_px_x is not None and padding_px_y is not None) - else None - ) + + padding = (padding_px_x, padding_px_y) if (padding_px_x is not None and padding_px_y is not None) else None + settings = Settings( output_dir=output_dir, padding_factor=padding_factor, @@ -114,9 +110,7 @@ def extract_tsr_patches( train_frame_ids = set(zod_frames.get_split(constants.TRAIN)) val_frame_ids = set(zod_frames.get_split(constants.VAL)) - train_frames = [ - f for f in traffic_sign_frames if f["frame_id"].split("_")[0] in train_frame_ids - ] + train_frames = [f for f in traffic_sign_frames if f["frame_id"].split("_")[0] in train_frame_ids] val_frames = [f for f in traffic_sign_frames if f["frame_id"].split("_")[0] in val_frame_ids] # write it to a json file @@ -132,18 +126,14 @@ def extract_tsr_patches( ) -def _process_frame( - frame: ZodFrame, settings: Settings, train_ids: Set[str] -) -> List[Dict[str, Any]]: +def _process_frame(frame: ZodFrame, settings: Settings, train_ids: Set[str]) -> List[Dict[str, Any]]: """Process a single frame.""" # not all frames have traffic signs if constants.AnnotationProject.TRAFFIC_SIGNS not in frame.info.annotations: return [] - traffic_signs: List[parser.TrafficSignAnnotation] = frame.get_annotation( - constants.AnnotationProject.TRAFFIC_SIGNS - ) + traffic_signs: List[parser.TrafficSignAnnotation] = frame.get_annotation(constants.AnnotationProject.TRAFFIC_SIGNS) if len(traffic_signs) == 0: return [] @@ -196,9 +186,7 @@ def _process_frame( "center_y": float(center_y), "original_width": float(traffic_sign.bounding_box.dimension[0]), "original_height": float(traffic_sign.bounding_box.dimension[1]), - "annotation": { - key: val for key, val in traffic_sign.__dict__.items() if key != "bounding_box" - }, + "annotation": {key: val for key, val in traffic_sign.__dict__.items() if key != "bounding_box"}, } ) diff --git a/zod/cli/generate_coco_json.py b/zod/cli/generate_coco_json.py index b75cacf..614ffad 100644 --- a/zod/cli/generate_coco_json.py +++ b/zod/cli/generate_coco_json.py @@ -18,9 +18,7 @@ # Map classes to categories, starting from 1 CATEGORY_NAME_TO_ID = {cls: i + 1 for i, cls in enumerate(OBJECT_CLASSES)} -OPEN_DATASET_URL = ( - "https://www.ai.se/en/data-factory/datasets/data-factory-datasets/zenseact-open-dataset" -) +OPEN_DATASET_URL = "https://www.ai.se/en/data-factory/datasets/data-factory-datasets/zenseact-open-dataset" def _convert_frame( @@ -69,9 +67,7 @@ def generate_coco_json( """Generate COCO JSON file from the ZOD dataset.""" assert split in ["train", "val"], f"Unknown split: {split}" frame_infos = [dataset[frame_id] for frame_id in dataset.get_split(split)] - _convert_frame_w_classes = partial( - _convert_frame, classes=classes, anonymization=anonymization, use_png=use_png - ) + _convert_frame_w_classes = partial(_convert_frame, classes=classes, anonymization=anonymization, use_png=use_png) results = process_map( _convert_frame_w_classes, frame_infos, @@ -127,10 +123,8 @@ def convert_to_coco( resolve_path=True, help="Path to the output directory.", ), - version: Version = typer.Option(..., help="Version of the dataset to use."), - anonymization: Anonymization = typer.Option( - Anonymization.BLUR.value, help="Anonymization mode to use." - ), + version: str = typer.Option("full", help="Version of the dataset to use. One of: full, mini."), + anonymization: Anonymization = typer.Option(Anonymization.BLUR.value, help="Anonymization mode to use."), use_png: bool = typer.Option(False, help="Whether to use PNG images instead of JPG."), classes: List[str] = typer.Option( ["Vehicle", "Pedestrian", "VulnerableVehicle"], help="Classes to include in the dataset." @@ -142,8 +136,7 @@ def convert_to_coco( typer.echo(f"ERROR: Invalid class: {cls}.") raise typer.Exit(1) typer.echo( - "Converting ZOD to COCO format. " - f"Version: {version}, anonymization: {anonymization}, classes: {classes}" + "Converting ZOD to COCO format. " f"Version: {version}, anonymization: {anonymization}, classes: {classes}" ) zod_frames = ZodFrames(str(dataset_root), version.value) diff --git a/zod/cli/verify.py b/zod/cli/verify.py index 12e410a..c1244cf 100644 --- a/zod/cli/verify.py +++ b/zod/cli/verify.py @@ -13,9 +13,7 @@ app = typer.Typer(help="ZOD Download Verifyer", no_args_is_help=True) -def _verify_info( - info: Information, separate_lidar: bool -) -> Dict[str, Dict[str, Union[bool, List[bool]]]]: +def _verify_info(info: Information, separate_lidar: bool) -> Dict[str, Dict[str, Union[bool, List[bool]]]]: """Verify the given infos.""" stats = defaultdict(dict) stats["general"] = { @@ -57,9 +55,7 @@ def _print_results(verified_infos): groups = sorted(set().union(*verified_infos)) for group in groups: keys = sorted(set().union(*(v[group] for v in verified_infos))) - stats = { - k: [d[group][k] for d in verified_infos if group in d and k in d[group]] for k in keys - } + stats = {k: [d[group][k] for d in verified_infos if group in d and k in d[group]] for k in keys} print(f"\n\n{group.upper():^45}\n{'-' * 55}") print(f"{'Data':<20} {'Downloaded (%)':<15} {'Expected (count)':<20}") print("-" * 55) diff --git a/zod/cli/visualize_lidar.py b/zod/cli/visualize_lidar.py index d26e7d6..480df1f 100644 --- a/zod/cli/visualize_lidar.py +++ b/zod/cli/visualize_lidar.py @@ -40,18 +40,14 @@ def _visualize(data: LidarData, boxes: List[Box3D] = None): if boxes: o3d_boxes = [] for box in boxes: - o3d_box = o3d.geometry.OrientedBoundingBox.create_from_points( - o3d.utility.Vector3dVector(box.corners) - ) + o3d_box = o3d.geometry.OrientedBoundingBox.create_from_points(o3d.utility.Vector3dVector(box.corners)) o3d_box.color = (0.98, 0.63, 0.01) o3d_boxes.append(o3d_box) o3d.visualization.draw_geometries( [pcd, *o3d_boxes, o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0)] ) else: - o3d.visualization.draw_geometries( - [pcd, o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0)] - ) + o3d.visualization.draw_geometries([pcd, o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0)]) @app.command(no_args_is_help=True) @@ -61,9 +57,7 @@ def frames( frame_id: int = typer.Option(..., help="Frame id to visualize"), num_before: int = typer.Option(0, help="Number of frames before the given frame id"), num_after: int = typer.Option(0, help="Number of frames after the given frame id"), - with_bounding_boxes: bool = typer.Option( - False, help="if bounding boxes of center-frame are to be rendered" - ), + with_bounding_boxes: bool = typer.Option(False, help="if bounding boxes of center-frame are to be rendered"), ): """Visualize the lidar data for a given frame id.""" zod_frames = ZodFrames(dataset_root=dataset_root, version=version) @@ -96,9 +90,7 @@ def sequences( data = frame.get_aggregated_lidar(start=start, end=end) if downsampling > 1: typer.echo(f"Will subsample the point-cloud with a factor {downsampling}") - indexes = np.random.choice( - data.points.shape[0], size=data.points.shape[0] // downsampling, replace=False - ) + indexes = np.random.choice(data.points.shape[0], size=data.points.shape[0] // downsampling, replace=False) data.points = data.points[indexes] data.intensity = data.intensity[indexes] data.timestamps = data.timestamps[indexes] diff --git a/zod/data_classes/box.py b/zod/data_classes/box.py index 6373a4d..ee98eb1 100644 --- a/zod/data_classes/box.py +++ b/zod/data_classes/box.py @@ -1,4 +1,5 @@ """ZOD Object detection containers.""" + from __future__ import annotations from dataclasses import dataclass @@ -315,9 +316,7 @@ def get_3d_frustum( depth = np.array([max_depth] * 4) # Project the 2d corners to the max_depth using the calibration - frustum = unproject_2d_to_3d_kannala( - corners, camera_calib.intrinsics, camera_calib.undistortion, depth - ) + frustum = unproject_2d_to_3d_kannala(corners, camera_calib.intrinsics, camera_calib.undistortion, depth) if min_depth == 0.0: frustum = np.concatenate((np.zeros((4, 3)), frustum), axis=0) diff --git a/zod/data_classes/calibration.py b/zod/data_classes/calibration.py index b734fdc..e2b3b8d 100644 --- a/zod/data_classes/calibration.py +++ b/zod/data_classes/calibration.py @@ -1,4 +1,5 @@ """Calibration dataclasses.""" + from __future__ import annotations import json @@ -35,9 +36,7 @@ class Calibration: @classmethod def from_dict(cls, calib_dict: Dict[str, Any]) -> Calibration: lidars = { - Lidar.VELODYNE: LidarCalibration( - extrinsics=Pose(np.array(calib_dict["FC"]["lidar_extrinsics"])) - ), + Lidar.VELODYNE: LidarCalibration(extrinsics=Pose(np.array(calib_dict["FC"]["lidar_extrinsics"]))), } cameras = { Camera.FRONT: CameraCalibration( diff --git a/zod/data_classes/ego_motion.py b/zod/data_classes/ego_motion.py index 6865165..8419de6 100644 --- a/zod/data_classes/ego_motion.py +++ b/zod/data_classes/ego_motion.py @@ -40,9 +40,7 @@ def get_poses(self, target_ts: Union[np.ndarray, float]) -> np.ndarray: if np.isin(target_ts, self.timestamps).all(): return self.poses[self.timestamps.searchsorted(target_ts)] - closest_idxs = self.timestamps.searchsorted( - target_ts, side="right", sorter=self.timestamps.argsort() - ) + closest_idxs = self.timestamps.searchsorted(target_ts, side="right", sorter=self.timestamps.argsort()) # if the target timestamp is exactly the same as the largest timestamp # then the searchsorted will return the length of the array, which is @@ -54,9 +52,7 @@ def get_poses(self, target_ts: Union[np.ndarray, float]) -> np.ndarray: time_diffs = target_ts - self.timestamps[closest_idxs - 1] total_times = self.timestamps[closest_idxs] - self.timestamps[closest_idxs - 1] fractions = time_diffs / total_times - return interpolate_transforms( - self.poses[closest_idxs - 1], self.poses[closest_idxs], fractions - ) + return interpolate_transforms(self.poses[closest_idxs - 1], self.poses[closest_idxs], fractions) def interpolate(self, timestamps: np.ndarray) -> EgoMotion: """Interpolate ego motion to find ego motion for each target timestamp. @@ -86,15 +82,9 @@ def from_oxts_path(cls, file_path: str) -> EgoMotion: with h5py.File(file_path, "r") as file: return cls( poses=file["poses"][()], - accelerations=np.stack( - [file["accelerationX"], file["accelerationY"], file["accelerationZ"]], axis=1 - ), - velocities=np.stack( - [file["velForward"][()], file["velLateral"][()], -file["velDown"][()]], axis=1 - ), - angular_rates=np.stack( - [file["angularRateX"], file["angularRateY"], file["angularRateZ"]], axis=1 - ), + accelerations=np.stack([file["accelerationX"], file["accelerationY"], file["accelerationZ"]], axis=1), + velocities=np.stack([file["velForward"][()], file["velLateral"][()], -file["velDown"][()]], axis=1), + angular_rates=np.stack([file["angularRateX"], file["angularRateY"], file["angularRateZ"]], axis=1), timestamps=OXTS_TIMESTAMP_OFFSET + file["timestamp"][()] + file["leapSeconds"][()], origin_lat_lon=(file["posLat"][0], file["posLon"][0]), ) @@ -157,9 +147,7 @@ def interpolate_transforms(transform_1: np.ndarray, transform_2: np.ndarray, fra return transform -def interpolate_vectors( - values: np.ndarray, source_timestamps: np.ndarray, target_timestamps: np.ndarray -) -> np.ndarray: +def interpolate_vectors(values: np.ndarray, source_timestamps: np.ndarray, target_timestamps: np.ndarray) -> np.ndarray: """Interpolate vectors to find vector for each (sorted) target timestamp. Args: diff --git a/zod/data_classes/frame.py b/zod/data_classes/frame.py index 576a02b..a3e387f 100644 --- a/zod/data_classes/frame.py +++ b/zod/data_classes/frame.py @@ -67,9 +67,7 @@ def get_image( anonymization: Anonymization = Anonymization.BLUR, ) -> np.ndarray: """Get the image.""" - return self.info.get_key_camera_frame( - camera=Camera.FRONT, anonymization=anonymization - ).read() + return self.info.get_key_camera_frame(camera=Camera.FRONT, anonymization=anonymization).read() def get_lidar_frames( self, @@ -92,9 +90,7 @@ def compensate_lidar(self, data: LidarData, timestamp: float) -> LidarData: lidar_calib = self.calibration.lidars[Lidar.VELODYNE] return motion_compensate_scanwise(data, self.ego_motion, lidar_calib, timestamp) - def get_aggregated_lidar( - self, num_before: int, num_after: int = 0, timestamp: Optional[float] = None - ) -> LidarData: + def get_aggregated_lidar(self, num_before: int, num_after: int = 0, timestamp: Optional[float] = None) -> LidarData: """Get an aggregated point cloud around the keyframe.""" key_lidar_frame = self.info.get_key_lidar_frame() key_lidar_data = key_lidar_frame.read() @@ -109,9 +105,7 @@ def get_aggregated_lidar( continue lidar_data = lidar_frame.read() _adjust_lidar_core_time(lidar_data) - lidar_data = motion_compensate_scanwise( - lidar_data, self.ego_motion, lidar_calib, timestamp - ) + lidar_data = motion_compensate_scanwise(lidar_data, self.ego_motion, lidar_calib, timestamp) to_aggregate.append(lidar_data) # Aggregate the scans key_lidar_data.extend(*to_aggregate) diff --git a/zod/data_classes/geometry.py b/zod/data_classes/geometry.py index 6c49551..b95188c 100644 --- a/zod/data_classes/geometry.py +++ b/zod/data_classes/geometry.py @@ -33,9 +33,7 @@ def inverse(self) -> Pose: return Pose(np.linalg.inv(self.transform)) @classmethod - def from_translation_rotation( - cls, translation: np.ndarray, rotation_matrix: np.ndarray - ) -> Pose: + def from_translation_rotation(cls, translation: np.ndarray, rotation_matrix: np.ndarray) -> Pose: """Create a pose from a translation and a rotation.""" transform = np.eye(4, 4) transform[:3, :3] = rotation_matrix diff --git a/zod/data_classes/info.py b/zod/data_classes/info.py index a96aa03..d5ef553 100644 --- a/zod/data_classes/info.py +++ b/zod/data_classes/info.py @@ -69,9 +69,7 @@ def get_camera_lidar_map( assert ( camera_name in self.camera_frames ), f"Camera {camera_name} not found. Available cameras: {self.camera_frames.keys()}" - assert ( - lidar in self.lidar_frames - ), f"Lidar {lidar} not found. Available lidars: {self.lidar_frames.keys()}" + assert lidar in self.lidar_frames, f"Lidar {lidar} not found. Available lidars: {self.lidar_frames.keys()}" for camera_frame in self.camera_frames[camera_name]: # Get the closest lidar frame in time diff --git a/zod/data_classes/oxts.py b/zod/data_classes/oxts.py index 43adc4c..64cc19c 100644 --- a/zod/data_classes/oxts.py +++ b/zod/data_classes/oxts.py @@ -1,4 +1,5 @@ """Backwards compatibility for oxts.py.""" + import warnings from .ego_motion import * diff --git a/zod/data_classes/sensor.py b/zod/data_classes/sensor.py index dbe9083..562b12f 100644 --- a/zod/data_classes/sensor.py +++ b/zod/data_classes/sensor.py @@ -1,4 +1,5 @@ """ZOD dataclasses.""" + from __future__ import annotations from dataclasses import dataclass @@ -103,9 +104,7 @@ def transform(self, pose: Union[np.ndarray, Pose]) -> None: pose = pose.transform rotations = pose[..., :3, :3] translations = pose[..., :3, 3] - self.points = ( - self.points[..., None, :] @ rotations.swapaxes(-2, -1) + translations[..., None, :] - ).squeeze(-2) + self.points = (self.points[..., None, :] @ rotations.swapaxes(-2, -1) + translations[..., None, :]).squeeze(-2) def extend(self, *other: LidarData): """Extend this LidarData with data from another LidarData object. @@ -119,8 +118,7 @@ def extend(self, *other: LidarData): self.diode_idx = np.hstack((self.diode_idx, *(o.diode_idx for o in other))) # Core timestamp is the weighted average self.core_timestamp = ( - self.core_timestamp * len(self.timestamps) - + sum(o.core_timestamp * len(o.timestamps) for o in other) + self.core_timestamp * len(self.timestamps) + sum(o.core_timestamp * len(o.timestamps) for o in other) ) / (len(self.timestamps) + sum(len(o.timestamps) for o in other)) def __eq__(self, other: LidarData) -> Union[bool, np.bool_]: diff --git a/zod/data_classes/sequence.py b/zod/data_classes/sequence.py index 21ef5b9..6972499 100644 --- a/zod/data_classes/sequence.py +++ b/zod/data_classes/sequence.py @@ -63,10 +63,7 @@ def get_annotation(self, project: AnnotationProject) -> List[Any]: def get_lidar(self, start: int = 0, end: Optional[int] = None) -> List[LidarData]: """Get the point clouds.""" - return [ - lidar_frame.read() - for lidar_frame in self.info.get_lidar_frames(Lidar.VELODYNE)[start:end] - ] + return [lidar_frame.read() for lidar_frame in self.info.get_lidar_frames(Lidar.VELODYNE)[start:end]] def get_compensated_lidar(self, time: datetime) -> LidarData: """Get the point cloud at a given timestamp.""" @@ -79,9 +76,7 @@ def get_compensated_lidar(self, time: datetime) -> LidarData: time.timestamp(), ) - def get_aggregated_lidar( - self, start: int = 0, end: Optional[int] = None, timestamp: Optional[float] = None - ) -> LidarData: + def get_aggregated_lidar(self, start: int = 0, end: int = None, timestamp: Optional[float] = None) -> LidarData: """Get the aggregated point cloud.""" lidar_scans = self.get_lidar(start, end) if timestamp is None: diff --git a/zod/data_classes/vehicle_data.py b/zod/data_classes/vehicle_data.py index c19d4b5..0ce0c74 100644 --- a/zod/data_classes/vehicle_data.py +++ b/zod/data_classes/vehicle_data.py @@ -55,13 +55,9 @@ def from_hdf5(cls, path: str) -> "EgoVehicleControls": evc = h5_file["ego_vehicle_controls"] return cls( acc_pedal=evc["acceleration_pedal/ratio/unitless/value"][:], - brake_pedal_pressed=evc[ - "brake_pedal_pressed/is_brake_pedal_pressed/unitless/value" - ][:], + brake_pedal_pressed=evc["brake_pedal_pressed/is_brake_pedal_pressed/unitless/value"][:], steering_angle=evc["steering_wheel_angle/angle/radians/value"][:], - steering_angle_rate=evc["steering_wheel_angle/angle_rate/radians_per_second/value"][ - : - ], + steering_angle_rate=evc["steering_wheel_angle/angle_rate/radians_per_second/value"][:], steering_wheel_torque=evc["steer_wheel_torque/torque/newton_meters/value"][:], turn_indicator=evc["turn_indicator_status/state"][:], timestamp=evc["timestamp/nanoseconds/value"][:], diff --git a/zod/eval/detection/_experimental/matching.py b/zod/eval/detection/_experimental/matching.py index 0484642..2e2346b 100644 --- a/zod/eval/detection/_experimental/matching.py +++ b/zod/eval/detection/_experimental/matching.py @@ -182,14 +182,10 @@ def greedy_match( matches.append((valid_gt.pop(best_match_idx), pred)) if len(unmatched_predictions) > 0: - dont_care_matches = match_dont_care_objects( - dont_care_gt, unmatched_predictions, calibration - ) + dont_care_matches = match_dont_care_objects(dont_care_gt, unmatched_predictions, calibration) # Remove the matched false positives from the unmatched predictions. - unmatched_predictions = [ - pred for pred in unmatched_predictions if (pred not in dont_care_matches) - ] + unmatched_predictions = [pred for pred in unmatched_predictions if (pred not in dont_care_matches)] # return the matches, the unmatched predictions and the remaining valid ground truth objects return MatchedFrame( @@ -256,9 +252,7 @@ def optimal_match( match_to_dont_care = match_dont_care_objects(dont_care_gt, unmatched_predictions, calibration) # Remove the matched false positives from the unmatched predictions. - unmatched_predictions = [ - pred for pred in unmatched_predictions if pred not in match_to_dont_care - ] + unmatched_predictions = [pred for pred in unmatched_predictions if pred not in match_to_dont_care] # return the matches, the unmatched predictions and the remaining valid ground truth objects return MatchedFrame( diff --git a/zod/eval/detection/eval_nuscenes_style.py b/zod/eval/detection/eval_nuscenes_style.py index 64d8c9f..bb1c853 100644 --- a/zod/eval/detection/eval_nuscenes_style.py +++ b/zod/eval/detection/eval_nuscenes_style.py @@ -88,10 +88,7 @@ def evaluate_nuscenes_style( gt_boxes = _filter_eval_boxes_on_ranges(gt_boxes, class_ranges) det_boxes = _filter_eval_boxes_on_ranges(det_boxes, class_ranges) - metrics = { - dist_th: _nuscenes_evaluate(gt_boxes, det_boxes, dist_th=dist_th) - for dist_th in detection_cfg.dist_ths - } + metrics = {dist_th: _nuscenes_evaluate(gt_boxes, det_boxes, dist_th=dist_th) for dist_th in detection_cfg.dist_ths} evaluated_clses = set(metrics[detection_cfg.dist_ths[0]].keys()) for zod_cls in evaluated_clses: @@ -104,9 +101,7 @@ def evaluate_nuscenes_style( ) # They evaluate the tp across only one threshold for metric in VALID_TP_METRICS: - detection_metrics.add_label_tp( - zod_cls, metric, metrics[detection_cfg.dist_th_tp][zod_cls][metric] - ) + detection_metrics.add_label_tp(zod_cls, metric, metrics[detection_cfg.dist_th_tp][zod_cls][metric]) if verbose: _print_nuscenes_metrics(detection_metrics) @@ -169,25 +164,19 @@ def _nuscenes_evaluate( dist_th=dist_th, ) - metrics[cls] = { - metric: calc_tp(md, min_recall=0.1, metric_name=metric) for metric in VALID_TP_METRICS - } + metrics[cls] = {metric: calc_tp(md, min_recall=0.1, metric_name=metric) for metric in VALID_TP_METRICS} metrics[cls]["ap"] = calc_ap(md, min_recall=min_recall, min_precision=min_precision) return metrics -def _filter_eval_boxes_on_ranges( - boxes: EvalBoxes, class_ranges: Dict[str, int], verbose: bool = False -) -> EvalBoxes: +def _filter_eval_boxes_on_ranges(boxes: EvalBoxes, class_ranges: Dict[str, int], verbose: bool = False) -> EvalBoxes: """Filter out boxes that are outside of the range of the classes.""" filtered_boxes = EvalBoxes() def _filter(box: DetectionBox): return ( box.detection_name in class_ranges - and class_ranges[box.detection_name][0] - < box.ego_dist - <= class_ranges[box.detection_name][1] + and class_ranges[box.detection_name][0] < box.ego_dist <= class_ranges[box.detection_name][1] ) for frame_id in boxes.sample_tokens: @@ -231,9 +220,7 @@ def _print_nuscenes_metrics(metrics: DetectionMetrics): def _serialize(detection_metrics: DetectionMetrics) -> Dict[str, float]: # Only serialize the classes that were evaluated (had GT) classes = list(detection_metrics.mean_dist_aps.keys()) - tp_metrics = { - name: detection_metrics.tp_errors[metric] for metric, name in VALID_TP_METRICS.items() - } + tp_metrics = {name: detection_metrics.tp_errors[metric] for metric, name in VALID_TP_METRICS.items()} class_aps = {f"{cls}/mAP": detection_metrics.mean_dist_aps[cls] for cls in classes} class_tps = { f"{cls}/{name}": detection_metrics._label_tp_errors[cls][metric] diff --git a/zod/utils/geometry.py b/zod/utils/geometry.py index b0cdff9..72d6726 100644 --- a/zod/utils/geometry.py +++ b/zod/utils/geometry.py @@ -1,4 +1,5 @@ """Geometry utility functions.""" + from typing import Tuple, Union import numpy as np diff --git a/zod/visualization/bev_utils.py b/zod/visualization/bev_utils.py index 76e3617..787a8c0 100644 --- a/zod/visualization/bev_utils.py +++ b/zod/visualization/bev_utils.py @@ -1,4 +1,5 @@ """Utilities for creating point cloud input representation.""" + from dataclasses import dataclass from typing import List, Tuple @@ -93,18 +94,16 @@ def _create_pointcloud_input_pixor( A PIXOR style BEV projection of the input point cloud. """ - point_indices_c = np.cast["int32"]( - (points[:, 2] - settings.pixor_z_min) / settings.grid_cell_size - ) + point_indices_c = np.cast["int32"]((points[:, 2] - settings.pixor_z_min) / settings.grid_cell_size) point_indices_c = 1 + np.clip( point_indices_c, a_min=-1, a_max=settings.grid_channels - 3, ) point_indices_cxy = tuple( - np.transpose( - np.concatenate([np.expand_dims(point_indices_c, axis=-1), point_indices_xy], axis=-1) - ).reshape(3, -1) + np.transpose(np.concatenate([np.expand_dims(point_indices_c, axis=-1), point_indices_xy], axis=-1)).reshape( + 3, -1 + ) ) n_points = points.shape[0] @@ -112,9 +111,7 @@ def _create_pointcloud_input_pixor( point_indices_intensity_c = np.repeat(settings.grid_channels - 1, n_points) point_indices_intensity_cxy = tuple( np.transpose( - np.concatenate( - [np.expand_dims(point_indices_intensity_c, axis=-1), point_indices_xy], axis=-1 - ) + np.concatenate([np.expand_dims(point_indices_intensity_c, axis=-1), point_indices_xy], axis=-1) ).reshape(3, -1) ) diff --git a/zod/visualization/colorlabeler.py b/zod/visualization/colorlabeler.py index 1424e8f..a5ab301 100644 --- a/zod/visualization/colorlabeler.py +++ b/zod/visualization/colorlabeler.py @@ -74,10 +74,7 @@ def __init__( def label_to_color(self, label): """Convert label to color.""" - return tuple( - int(val) - for val in self.color_map_image_[int((label * 255) // self.mapsize) % 255, 0, :] - ) + return tuple(int(val) for val in self.color_map_image_[int((label * 255) // self.mapsize) % 255, 0, :]) def label_to_color_norm(self, value): """Convert label to cv2 color.""" diff --git a/zod/visualization/lidar_3d.py b/zod/visualization/lidar_3d.py index ac75be3..f668d37 100644 --- a/zod/visualization/lidar_3d.py +++ b/zod/visualization/lidar_3d.py @@ -1,4 +1,5 @@ """3D visualization tool.""" + from typing import Any, List, Tuple import dash diff --git a/zod/visualization/lidar_bev.py b/zod/visualization/lidar_bev.py index 264845c..cd31389 100644 --- a/zod/visualization/lidar_bev.py +++ b/zod/visualization/lidar_bev.py @@ -1,4 +1,5 @@ """Util to plot BEV.""" + from typing import Tuple import matplotlib.pyplot as plt @@ -19,9 +20,7 @@ def __init__(self): # Exclude dark gray color self._color = px.colors.qualitative.Dark24[:5] + px.colors.qualitative.Dark24[6:] - def __call__( - self, bev: np.ndarray, objects: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray] - ) -> go.Figure: + def __call__(self, bev: np.ndarray, objects: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]) -> go.Figure: """Go through all objects for each batch, and write them on top the BEV.""" input_ = create_pointcloud_input(bev, self._settings) classes, positions, dimensions, rotations = objects @@ -78,21 +77,15 @@ def _setup_figure(self) -> go.Figure: fig = go.Figure(layout=layout) return fig - def _calculate_ticks( - self, limits: np.ndarray - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + def _calculate_ticks(self, limits: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: _, ax = plt.subplots(1, 1) ax.set_aspect("equal") ax.set_xlim(0, int(limits[0])) ax.set_ylim(0, int(limits[1])) x_ticks = ax.get_xticks() y_ticks = ax.get_yticks() - x_ticks_text = [ - tick * self._settings.grid_cell_size + self._settings.grid_min[0] for tick in x_ticks - ] - y_ticks_text = [ - tick * self._settings.grid_cell_size + self._settings.grid_min[1] for tick in y_ticks - ] + x_ticks_text = [tick * self._settings.grid_cell_size + self._settings.grid_min[0] for tick in x_ticks] + y_ticks_text = [tick * self._settings.grid_cell_size + self._settings.grid_min[1] for tick in y_ticks] plt.close() return x_ticks, x_ticks_text, y_ticks, y_ticks_text @@ -163,8 +156,6 @@ def _activate_legend(self, fig): @staticmethod def _create_od_vis_background(input_array: np.ndarray) -> np.ndarray: """Create a gray occupancy grid as background to visualize over.""" - occupancy = np.maximum.reduce( - np.cast["float32"](np.abs(input_array) > 0.0), axis=0, keepdims=True - ) + occupancy = np.maximum.reduce(np.cast["float32"](np.abs(input_array) > 0.0), axis=0, keepdims=True) vis_bg = np.transpose(np.repeat(occupancy * 77, 3, axis=0), [2, 1, 0]) return vis_bg diff --git a/zod/visualization/lidar_on_image.py b/zod/visualization/lidar_on_image.py index 3da1951..e60064a 100644 --- a/zod/visualization/lidar_on_image.py +++ b/zod/visualization/lidar_on_image.py @@ -55,9 +55,7 @@ def project_lidar_to_image( return xyd_array, final_mask -def draw_projections_as_points( - image: np.ndarray, points: np.ndarray, clip_to: float = None -) -> np.ndarray: +def draw_projections_as_points(image: np.ndarray, points: np.ndarray, clip_to: float = None) -> np.ndarray: """Draw projected points from pointcloud to image plane as colored points. Args: diff --git a/zod/visualization/object_visualization.py b/zod/visualization/object_visualization.py index 4f8188b..bba6d78 100644 --- a/zod/visualization/object_visualization.py +++ b/zod/visualization/object_visualization.py @@ -36,20 +36,13 @@ def calc_iou(box1_corners, box2_corners): inner_bottom_coord = min(box1_corners[1][1], box2_corners[1][1]) # compute the area of intersection rectangle - inter_area = abs( - max((inner_right_coord - inner_left_coord, 0)) - * max((inner_bottom_coord - inner_top_coord), 0) - ) + inter_area = abs(max((inner_right_coord - inner_left_coord, 0)) * max((inner_bottom_coord - inner_top_coord), 0)) if inter_area == 0: return 0 # compute the area of both the prediction and ground-truth # rectangles - box1_area = abs( - (box1_corners[0][0] - box1_corners[1][0]) * (box1_corners[0][1] - box1_corners[1][1]) - ) - box2_area = abs( - (box2_corners[0][0] - box2_corners[1][0]) * (box2_corners[0][1] - box2_corners[1][1]) - ) + box1_area = abs((box1_corners[0][0] - box1_corners[1][0]) * (box1_corners[0][1] - box1_corners[1][1])) + box2_area = abs((box2_corners[0][0] - box2_corners[1][0]) * (box2_corners[0][1] - box2_corners[1][1])) # compute the intersection over union by taking the intersection # area and dividing it by the sum of prediction + ground-truth @@ -58,9 +51,7 @@ def calc_iou(box1_corners, box2_corners): return iou -def overlay_object_2d_box_on_image( - image, box2d: Box2D, color=(0, 0, 100), scale_factor=None, line_thickness=2 -): +def overlay_object_2d_box_on_image(image, box2d: Box2D, color=(0, 0, 100), scale_factor=None, line_thickness=2): """Visualize 2D box of annotated object on the image.""" left_up = apply_scale(box2d.corners[0].astype(int), scale_factor) right_bottom = apply_scale(box2d.corners[2].astype(int), scale_factor) diff --git a/zod/visualization/oxts_on_image.py b/zod/visualization/oxts_on_image.py index df7e900..1673231 100644 --- a/zod/visualization/oxts_on_image.py +++ b/zod/visualization/oxts_on_image.py @@ -1,4 +1,5 @@ """Module to perform OxTS extraction and visualize GPS track projection on image plane.""" + import cv2 import numpy as np @@ -8,9 +9,7 @@ from zod.utils.geometry import get_points_in_camera_fov, project_3d_to_2d_kannala, transform_points -def visualize_oxts_on_image( - oxts: EgoMotion, key_timestamp, calibs: Calibration, image, camera=Camera.FRONT -): +def visualize_oxts_on_image(oxts: EgoMotion, key_timestamp, calibs: Calibration, image, camera=Camera.FRONT): """Visualize oxts track on image plane.""" # get pose at key frame diff --git a/zod/zod_frames.py b/zod/zod_frames.py index dc2ae99..c7d1bf4 100644 --- a/zod/zod_frames.py +++ b/zod/zod_frames.py @@ -1,4 +1,5 @@ """ZOD Frames module.""" + from collections import defaultdict from typing import Dict, Union