Skip to content

Commit

Permalink
Change to ruff-only with slightly increased line length (#51)
Browse files Browse the repository at this point in the history
* Update linter settings

- only use ruff (remove black dependency
- increase line length to 120

* Apply new linter

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

---------

Co-authored-by: William Ljungbergh <[email protected]>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
3 people authored Apr 18, 2024
1 parent 53078a1 commit 8215c6a
Show file tree
Hide file tree
Showing 30 changed files with 95 additions and 212 deletions.
15 changes: 5 additions & 10 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,14 +1,9 @@
repos:
- repo: https://github.com/psf/black
rev: 23.12.1
hooks:
- id: black-jupyter
language_version: python3
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.1.9
rev: v0.3.7
hooks:
# Run the linter.
- id: ruff
- repo: https://github.com/abravalheri/validate-pyproject
rev: v0.15
hooks:
- id: validate-pyproject
args: [ --fix ]
# Run the formatter.
- id: ruff-format
16 changes: 8 additions & 8 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,7 @@ all = [
]

[tool.poetry.group.dev.dependencies]
ruff = "^0.0.200"
black = ">=20.0"
ruff = "^0.3.7"
pre-commit = ">=2"
pytest = ">=7"

Expand All @@ -59,11 +58,12 @@ requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"

[tool.ruff]
line-length = 100
# isort, Warning, pycodestyle (see here: https://github.com/charliermarsh/ruff#supported-rules)
select = ["I", "W", "E"]
line-length = 120
exclude = ["zod/eval/detection/_nuscenes_eval/*"]

[tool.black]
line-length = 100
target-version = ["py38"]
[tool.ruff.lint]
select = [ # (see here: https://github.com/charliermarsh/ruff#supported-rules)
"I", # isort
"W", # warning
"E", # pycodestyle
]
1 change: 1 addition & 0 deletions zod/_zod_dataset.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""ZOD Frames module."""

import json
import os.path as osp
from abc import ABC, abstractmethod
Expand Down
1 change: 1 addition & 0 deletions zod/anno/parser.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Annotation parsers."""

import json
from dataclasses import dataclass
from typing import Any, Dict, List
Expand Down
16 changes: 5 additions & 11 deletions zod/cli/download.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""This script is to be used to download the Zenseact Open Dataset."""

import contextlib
import os
import os.path as osp
Expand Down Expand Up @@ -137,18 +138,15 @@ def _download(download_path: str, dbx: ResumableDropbox, info: DownloadExtractIn
)
if pbar.n > info.size:
tqdm.write(
f"Error! File {download_path} already exists and is larger than expected. "
"Please delete and try again."
f"Error! File {download_path} already exists and is larger than expected. " "Please delete and try again."
)
if pbar.n > 0:
# this means we are retrying or resuming a previously interrupted download
tqdm.write(f"Resuming download of {download_path} from {current_size} bytes.")
# Retry download if partial file exists (e.g. due to network error)
while pbar.n < info.size:
try:
_, response = dbx.sharing_get_shared_link_file(
url=info.url, path=info.file_path, start=pbar.n
)
_, response = dbx.sharing_get_shared_link_file(url=info.url, path=info.file_path, start=pbar.n)
with open(download_path, "ab") as f:
with contextlib.closing(response):
for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
Expand Down Expand Up @@ -333,9 +331,7 @@ def _print_summary(download_settings, filter_settings, subset):
print(" version: mini\n (other settings are ignored for mini)")
else:
print(filter_settings)
if subset == SubDataset.FRAMES and (
filter_settings.num_scans_before == filter_settings.num_scans_after == 0
):
if subset == SubDataset.FRAMES and (filter_settings.num_scans_before == filter_settings.num_scans_after == 0):
typer.secho(
"Note! The current settings will only download the core lidar frames. "
"If you need surrounding scans, set --num-scans-before and/or --num-scans-after.",
Expand Down Expand Up @@ -389,9 +385,7 @@ def download(
False, help="Extract already downloaded archives", rich_help_panel=GEN
),
parallel: bool = typer.Option(True, help="Download files in parallel", rich_help_panel=GEN),
max_workers: int = typer.Option(
None, help="Max number of workers for parallel downloads", rich_help_panel=GEN
),
max_workers: int = typer.Option(None, help="Max number of workers for parallel downloads", rich_help_panel=GEN),
no_confirm: bool = typer.Option(
False,
"-y",
Expand Down
30 changes: 9 additions & 21 deletions zod/cli/extract_tsr_patches.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,8 @@ def extract_tsr_patches(
resolve_path=True,
help="Path to the output directory.",
),
version: Version = typer.Option(..., help="Version of the dataset to use."),
padding_factor: Optional[float] = typer.Option(
None, help="Factor to multiply the padding with."
),
version: str = typer.Option("full", help="Version of the dataset to use. One of: full, small."),
padding_factor: Optional[float] = typer.Option(None, help="Factor to multiply the padding with."),
padding_px_y: Optional[int] = typer.Option(None, help="Padding in y direction."),
padding_px_x: Optional[int] = typer.Option(None, help="Padding in x direction."),
num_workers: Optional[int] = typer.Option(None, help="Number of workers to use."),
Expand All @@ -83,11 +81,9 @@ def extract_tsr_patches(
assert not (
padding_factor is not None and (padding_px_x is not None or padding_px_y is not None)
), "Cannot specify both padding and padding_factor"
padding = (
(padding_px_x, padding_px_y)
if (padding_px_x is not None and padding_px_y is not None)
else None
)

padding = (padding_px_x, padding_px_y) if (padding_px_x is not None and padding_px_y is not None) else None

settings = Settings(
output_dir=output_dir,
padding_factor=padding_factor,
Expand All @@ -114,9 +110,7 @@ def extract_tsr_patches(
train_frame_ids = set(zod_frames.get_split(constants.TRAIN))
val_frame_ids = set(zod_frames.get_split(constants.VAL))

train_frames = [
f for f in traffic_sign_frames if f["frame_id"].split("_")[0] in train_frame_ids
]
train_frames = [f for f in traffic_sign_frames if f["frame_id"].split("_")[0] in train_frame_ids]
val_frames = [f for f in traffic_sign_frames if f["frame_id"].split("_")[0] in val_frame_ids]

# write it to a json file
Expand All @@ -132,18 +126,14 @@ def extract_tsr_patches(
)


def _process_frame(
frame: ZodFrame, settings: Settings, train_ids: Set[str]
) -> List[Dict[str, Any]]:
def _process_frame(frame: ZodFrame, settings: Settings, train_ids: Set[str]) -> List[Dict[str, Any]]:
"""Process a single frame."""

# not all frames have traffic signs
if constants.AnnotationProject.TRAFFIC_SIGNS not in frame.info.annotations:
return []

traffic_signs: List[parser.TrafficSignAnnotation] = frame.get_annotation(
constants.AnnotationProject.TRAFFIC_SIGNS
)
traffic_signs: List[parser.TrafficSignAnnotation] = frame.get_annotation(constants.AnnotationProject.TRAFFIC_SIGNS)

if len(traffic_signs) == 0:
return []
Expand Down Expand Up @@ -196,9 +186,7 @@ def _process_frame(
"center_y": float(center_y),
"original_width": float(traffic_sign.bounding_box.dimension[0]),
"original_height": float(traffic_sign.bounding_box.dimension[1]),
"annotation": {
key: val for key, val in traffic_sign.__dict__.items() if key != "bounding_box"
},
"annotation": {key: val for key, val in traffic_sign.__dict__.items() if key != "bounding_box"},
}
)

Expand Down
17 changes: 5 additions & 12 deletions zod/cli/generate_coco_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@

# Map classes to categories, starting from 1
CATEGORY_NAME_TO_ID = {cls: i + 1 for i, cls in enumerate(OBJECT_CLASSES)}
OPEN_DATASET_URL = (
"https://www.ai.se/en/data-factory/datasets/data-factory-datasets/zenseact-open-dataset"
)
OPEN_DATASET_URL = "https://www.ai.se/en/data-factory/datasets/data-factory-datasets/zenseact-open-dataset"


def _convert_frame(
Expand Down Expand Up @@ -69,9 +67,7 @@ def generate_coco_json(
"""Generate COCO JSON file from the ZOD dataset."""
assert split in ["train", "val"], f"Unknown split: {split}"
frame_infos = [dataset[frame_id] for frame_id in dataset.get_split(split)]
_convert_frame_w_classes = partial(
_convert_frame, classes=classes, anonymization=anonymization, use_png=use_png
)
_convert_frame_w_classes = partial(_convert_frame, classes=classes, anonymization=anonymization, use_png=use_png)
results = process_map(
_convert_frame_w_classes,
frame_infos,
Expand Down Expand Up @@ -127,10 +123,8 @@ def convert_to_coco(
resolve_path=True,
help="Path to the output directory.",
),
version: Version = typer.Option(..., help="Version of the dataset to use."),
anonymization: Anonymization = typer.Option(
Anonymization.BLUR.value, help="Anonymization mode to use."
),
version: str = typer.Option("full", help="Version of the dataset to use. One of: full, mini."),
anonymization: Anonymization = typer.Option(Anonymization.BLUR.value, help="Anonymization mode to use."),
use_png: bool = typer.Option(False, help="Whether to use PNG images instead of JPG."),
classes: List[str] = typer.Option(
["Vehicle", "Pedestrian", "VulnerableVehicle"], help="Classes to include in the dataset."
Expand All @@ -142,8 +136,7 @@ def convert_to_coco(
typer.echo(f"ERROR: Invalid class: {cls}.")
raise typer.Exit(1)
typer.echo(
"Converting ZOD to COCO format. "
f"Version: {version}, anonymization: {anonymization}, classes: {classes}"
"Converting ZOD to COCO format. " f"Version: {version}, anonymization: {anonymization}, classes: {classes}"
)

zod_frames = ZodFrames(str(dataset_root), version.value)
Expand Down
8 changes: 2 additions & 6 deletions zod/cli/verify.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,7 @@
app = typer.Typer(help="ZOD Download Verifyer", no_args_is_help=True)


def _verify_info(
info: Information, separate_lidar: bool
) -> Dict[str, Dict[str, Union[bool, List[bool]]]]:
def _verify_info(info: Information, separate_lidar: bool) -> Dict[str, Dict[str, Union[bool, List[bool]]]]:
"""Verify the given infos."""
stats = defaultdict(dict)
stats["general"] = {
Expand Down Expand Up @@ -57,9 +55,7 @@ def _print_results(verified_infos):
groups = sorted(set().union(*verified_infos))
for group in groups:
keys = sorted(set().union(*(v[group] for v in verified_infos)))
stats = {
k: [d[group][k] for d in verified_infos if group in d and k in d[group]] for k in keys
}
stats = {k: [d[group][k] for d in verified_infos if group in d and k in d[group]] for k in keys}
print(f"\n\n{group.upper():^45}\n{'-' * 55}")
print(f"{'Data':<20} {'Downloaded (%)':<15} {'Expected (count)':<20}")
print("-" * 55)
Expand Down
16 changes: 4 additions & 12 deletions zod/cli/visualize_lidar.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,18 +40,14 @@ def _visualize(data: LidarData, boxes: List[Box3D] = None):
if boxes:
o3d_boxes = []
for box in boxes:
o3d_box = o3d.geometry.OrientedBoundingBox.create_from_points(
o3d.utility.Vector3dVector(box.corners)
)
o3d_box = o3d.geometry.OrientedBoundingBox.create_from_points(o3d.utility.Vector3dVector(box.corners))
o3d_box.color = (0.98, 0.63, 0.01)
o3d_boxes.append(o3d_box)
o3d.visualization.draw_geometries(
[pcd, *o3d_boxes, o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0)]
)
else:
o3d.visualization.draw_geometries(
[pcd, o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0)]
)
o3d.visualization.draw_geometries([pcd, o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0)])


@app.command(no_args_is_help=True)
Expand All @@ -61,9 +57,7 @@ def frames(
frame_id: int = typer.Option(..., help="Frame id to visualize"),
num_before: int = typer.Option(0, help="Number of frames before the given frame id"),
num_after: int = typer.Option(0, help="Number of frames after the given frame id"),
with_bounding_boxes: bool = typer.Option(
False, help="if bounding boxes of center-frame are to be rendered"
),
with_bounding_boxes: bool = typer.Option(False, help="if bounding boxes of center-frame are to be rendered"),
):
"""Visualize the lidar data for a given frame id."""
zod_frames = ZodFrames(dataset_root=dataset_root, version=version)
Expand Down Expand Up @@ -96,9 +90,7 @@ def sequences(
data = frame.get_aggregated_lidar(start=start, end=end)
if downsampling > 1:
typer.echo(f"Will subsample the point-cloud with a factor {downsampling}")
indexes = np.random.choice(
data.points.shape[0], size=data.points.shape[0] // downsampling, replace=False
)
indexes = np.random.choice(data.points.shape[0], size=data.points.shape[0] // downsampling, replace=False)
data.points = data.points[indexes]
data.intensity = data.intensity[indexes]
data.timestamps = data.timestamps[indexes]
Expand Down
5 changes: 2 additions & 3 deletions zod/data_classes/box.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""ZOD Object detection containers."""

from __future__ import annotations

from dataclasses import dataclass
Expand Down Expand Up @@ -315,9 +316,7 @@ def get_3d_frustum(
depth = np.array([max_depth] * 4)

# Project the 2d corners to the max_depth using the calibration
frustum = unproject_2d_to_3d_kannala(
corners, camera_calib.intrinsics, camera_calib.undistortion, depth
)
frustum = unproject_2d_to_3d_kannala(corners, camera_calib.intrinsics, camera_calib.undistortion, depth)

if min_depth == 0.0:
frustum = np.concatenate((np.zeros((4, 3)), frustum), axis=0)
Expand Down
5 changes: 2 additions & 3 deletions zod/data_classes/calibration.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Calibration dataclasses."""

from __future__ import annotations

import json
Expand Down Expand Up @@ -35,9 +36,7 @@ class Calibration:
@classmethod
def from_dict(cls, calib_dict: Dict[str, Any]) -> Calibration:
lidars = {
Lidar.VELODYNE: LidarCalibration(
extrinsics=Pose(np.array(calib_dict["FC"]["lidar_extrinsics"]))
),
Lidar.VELODYNE: LidarCalibration(extrinsics=Pose(np.array(calib_dict["FC"]["lidar_extrinsics"]))),
}
cameras = {
Camera.FRONT: CameraCalibration(
Expand Down
24 changes: 6 additions & 18 deletions zod/data_classes/ego_motion.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,7 @@ def get_poses(self, target_ts: Union[np.ndarray, float]) -> np.ndarray:
if np.isin(target_ts, self.timestamps).all():
return self.poses[self.timestamps.searchsorted(target_ts)]

closest_idxs = self.timestamps.searchsorted(
target_ts, side="right", sorter=self.timestamps.argsort()
)
closest_idxs = self.timestamps.searchsorted(target_ts, side="right", sorter=self.timestamps.argsort())

# if the target timestamp is exactly the same as the largest timestamp
# then the searchsorted will return the length of the array, which is
Expand All @@ -54,9 +52,7 @@ def get_poses(self, target_ts: Union[np.ndarray, float]) -> np.ndarray:
time_diffs = target_ts - self.timestamps[closest_idxs - 1]
total_times = self.timestamps[closest_idxs] - self.timestamps[closest_idxs - 1]
fractions = time_diffs / total_times
return interpolate_transforms(
self.poses[closest_idxs - 1], self.poses[closest_idxs], fractions
)
return interpolate_transforms(self.poses[closest_idxs - 1], self.poses[closest_idxs], fractions)

def interpolate(self, timestamps: np.ndarray) -> EgoMotion:
"""Interpolate ego motion to find ego motion for each target timestamp.
Expand Down Expand Up @@ -86,15 +82,9 @@ def from_oxts_path(cls, file_path: str) -> EgoMotion:
with h5py.File(file_path, "r") as file:
return cls(
poses=file["poses"][()],
accelerations=np.stack(
[file["accelerationX"], file["accelerationY"], file["accelerationZ"]], axis=1
),
velocities=np.stack(
[file["velForward"][()], file["velLateral"][()], -file["velDown"][()]], axis=1
),
angular_rates=np.stack(
[file["angularRateX"], file["angularRateY"], file["angularRateZ"]], axis=1
),
accelerations=np.stack([file["accelerationX"], file["accelerationY"], file["accelerationZ"]], axis=1),
velocities=np.stack([file["velForward"][()], file["velLateral"][()], -file["velDown"][()]], axis=1),
angular_rates=np.stack([file["angularRateX"], file["angularRateY"], file["angularRateZ"]], axis=1),
timestamps=OXTS_TIMESTAMP_OFFSET + file["timestamp"][()] + file["leapSeconds"][()],
origin_lat_lon=(file["posLat"][0], file["posLon"][0]),
)
Expand Down Expand Up @@ -157,9 +147,7 @@ def interpolate_transforms(transform_1: np.ndarray, transform_2: np.ndarray, fra
return transform


def interpolate_vectors(
values: np.ndarray, source_timestamps: np.ndarray, target_timestamps: np.ndarray
) -> np.ndarray:
def interpolate_vectors(values: np.ndarray, source_timestamps: np.ndarray, target_timestamps: np.ndarray) -> np.ndarray:
"""Interpolate vectors to find vector for each (sorted) target timestamp.
Args:
Expand Down
Loading

0 comments on commit 8215c6a

Please sign in to comment.