From 3d41b703f326c833eabf763eaefd0d6da1361c41 Mon Sep 17 00:00:00 2001 From: Thibault Neveu Date: Tue, 14 Mar 2023 16:47:49 +0100 Subject: [PATCH] Revert "Fix unit test & setup.py" --- Dockerfile | 17 +++---- README.md | 6 +-- aloscene/depth.py | 2 +- aloscene/tensors/spatial_augmented_tensor.py | 8 ++-- .../cuda_op/sort_vertices.egg-info/PKG-INFO | 8 ++-- ...v_requirements.txt => dev_requirements.txt | 0 ...1.13.1.txt => requirements-torch1.13.1.txt | 0 requirements/requirements-torch2.1.txt | 18 ------- setup.py | 47 ++++++++----------- unittest/test_augmented_tensor.py | 5 -- unittest/test_oriented_boxes_2d.py | 2 +- unittest/test_points2d.py | 7 +-- unittest/test_projections.py | 7 +-- 13 files changed, 45 insertions(+), 82 deletions(-) rename requirements/dev_requirements.txt => dev_requirements.txt (100%) rename requirements/requirements-torch1.13.1.txt => requirements-torch1.13.1.txt (100%) delete mode 100644 requirements/requirements-torch2.1.txt diff --git a/Dockerfile b/Dockerfile index a494cd57..72e148d5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,10 +4,10 @@ FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04 #FROM nvidia/cuda:11.6.0-cudnn8-devel-ubuntu20.04 ARG py=3.9 -ARG pytorch=2.1.0.dev20230313+cu117 -ARG torchvision=0.15.0.dev20230313+cu117 -ARG torchaudio=2.0.0.dev20230313+cu117 -ARG pytorch_lightning=1.9.3 +ARG pytorch=1.13.1 +ARG torchvision=0.14.1 +ARG torchaudio=0.13.1 +ARG pytorch_lightning=1.9.0 ARG pycyda=11.7 ENV TZ=Europe/Paris @@ -30,11 +30,8 @@ ENV HOME /workspace WORKDIR /workspace # Pytorch & pytorch litning -#RUN conda install py pytorch-cuda=${pycuda} -c pytorch -c nvidia -RUN pip install --pre torch==${pytorch} torchvision==${torchvision} torchaudio==${torchaudio} --index-url https://download.pytorch.org/whl/nightly/cu117 +RUN conda install pytorch==${pytorch} torchvision==${torchvision} torchaudio==${torchaudio} pytorch-cuda=${pycuda} -c pytorch -c nvidia RUN pip install pytorch_lightning==${pytorch_lightning} -COPY requirements/requirements-torch2.1.txt /install/requirements-torch2.1.txt -RUN pip install -r /install/requirements-torch2.1.txt -COPY ./aloscene/utils /install/utils -RUN cd /install/utils/rotated_iou/cuda_op/; python setup.py install --user \ No newline at end of file +COPY requirements-torch1.13.1.txt /install/requirements-torch1.13.1.txt +RUN pip install -r /install/requirements-torch1.13.1.txt diff --git a/README.md b/README.md index 5a04f684..6e295777 100644 --- a/README.md +++ b/README.md @@ -78,17 +78,17 @@ training pipelines with **augmented tensors**. ### Docker install ``` -docker build -t aloception-oss:cuda-11.7-pytorch2.1.0-lightning1.9.3 . +docker build -t aloception-oss:cuda-11.3.1-pytorch1.13.1-lightning1.9.0 . ``` ``` -docker run --gpus all -it -v /YOUR/WORKSPACE/:/workspace --privileged -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix aloception-oss:cuda-11.7-pytorch2.1.0-lightning1.9.3 +docker run --gpus all -it -v /YOUR/WORKSPACE/:/workspace --privileged -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix aloception-oss:cuda-11.3.1-pytorch1.13.1-lightning1.9.0 ``` Or without building the image ``` -docker run --gpus all -it -v /YOUR/WORKSPACE/:/workspace --privileged -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix visualbehaviorofficial/aloception-oss:cuda-11.7-pytorch2.1.0-lightning1.9.3 +docker run --gpus all -it -v /YOUR/WORKSPACE/:/workspace --privileged -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix visualbehaviorofficial/aloception-oss:cuda-11.3.1-pytorch1.13.1-lightning1.9.0 ``` diff --git a/aloscene/depth.py b/aloscene/depth.py index f841129d..ab52592d 100644 --- a/aloscene/depth.py +++ b/aloscene/depth.py @@ -252,7 +252,7 @@ def as_points3d( target_names = tuple([n for n in self.names if n not in ("C", "H", "W")] + ["N", None]) if points is None: y_points, x_points = torch.meshgrid( - torch.arange(self.H, device=self.device), torch.arange(self.W, device=self.device), indexing="ij" + torch.arange(self.H, device=self.device), torch.arange(self.W, device=self.device) ) # Append batch & temporal dimensions for _ in range(len(target_shape[:-1])): diff --git a/aloscene/tensors/spatial_augmented_tensor.py b/aloscene/tensors/spatial_augmented_tensor.py index 7be14920..3b8ede0e 100644 --- a/aloscene/tensors/spatial_augmented_tensor.py +++ b/aloscene/tensors/spatial_augmented_tensor.py @@ -526,7 +526,7 @@ def _resize(self, size, interpolation=InterpolationMode.BILINEAR, **kwargs): if ("N" in self.names and self.size("N") == 0) or ("C" in self.names and self.size("C") == 0): shapes = list(self.shape)[:-2] + [h, w] return self.rename(None).view(shapes).reset_names() - return F.resize(self.rename(None), (h, w), interpolation=interpolation, antialias=True).reset_names() + return F.resize(self.rename(None), (h, w), interpolation=interpolation).reset_names() def _rotate(self, angle, center=None,**kwargs): """Rotate SpatialAugmentedTensor, but not its labels @@ -548,7 +548,7 @@ def _rotate(self, angle, center=None,**kwargs): ), "rotation is not possible on an empty tensor" return F.rotate(self.rename(None), angle,center=center).reset_names() - def _crop(self, H_crop: tuple, W_crop: tuple, warn_non_integer=True, **kwargs): + def _crop(self, H_crop: tuple, W_crop: tuple, **kwargs): """Crop the SpatialAugmentedTensor Parameters @@ -557,8 +557,6 @@ def _crop(self, H_crop: tuple, W_crop: tuple, warn_non_integer=True, **kwargs): (start, end) between 0 and 1 W_crop: tuple (start, end) between 0 and 1 - warn_non_integer: bool - If True, warn if the crop is not integer Returns ------- @@ -566,7 +564,7 @@ def _crop(self, H_crop: tuple, W_crop: tuple, warn_non_integer=True, **kwargs): cropped SpatialAugmentedTensor """ - H_crop, W_crop = self._relative_to_absolute_hs_ws(H_crop, W_crop, assert_integer=False, warn_non_integer=warn_non_integer) + H_crop, W_crop = self._relative_to_absolute_hs_ws(H_crop, W_crop, assert_integer=False, warn_non_integer=True) hmin, hmax = H_crop wmin, wmax = W_crop slices = self.get_slices({"H": slice(hmin, hmax), "W": slice(wmin, wmax)}) diff --git a/aloscene/utils/rotated_iou/cuda_op/sort_vertices.egg-info/PKG-INFO b/aloscene/utils/rotated_iou/cuda_op/sort_vertices.egg-info/PKG-INFO index 31aeedad..c61b97bd 100644 --- a/aloscene/utils/rotated_iou/cuda_op/sort_vertices.egg-info/PKG-INFO +++ b/aloscene/utils/rotated_iou/cuda_op/sort_vertices.egg-info/PKG-INFO @@ -1,10 +1,10 @@ -Metadata-Version: 2.1 +Metadata-Version: 1.0 Name: sort-vertices Version: 0.0.0 Summary: UNKNOWN Home-page: UNKNOWN +Author: UNKNOWN +Author-email: UNKNOWN License: UNKNOWN +Description: UNKNOWN Platform: UNKNOWN - -UNKNOWN - diff --git a/requirements/dev_requirements.txt b/dev_requirements.txt similarity index 100% rename from requirements/dev_requirements.txt rename to dev_requirements.txt diff --git a/requirements/requirements-torch1.13.1.txt b/requirements-torch1.13.1.txt similarity index 100% rename from requirements/requirements-torch1.13.1.txt rename to requirements-torch1.13.1.txt diff --git a/requirements/requirements-torch2.1.txt b/requirements/requirements-torch2.1.txt deleted file mode 100644 index df7d9615..00000000 --- a/requirements/requirements-torch2.1.txt +++ /dev/null @@ -1,18 +0,0 @@ -pycocotools==2.0.2 -PyYAML==5.4.1 -chardet==4.0.0 -idna==2.10 -scipy==1.10.0 -more_itertools==8.8.0 -requests==2.25.1 -opencv-python==4.7.0.68 -python-dateutil==2.8.2 -urllib3==1.26.6 -protobuf==4.21.12 -wandb==0.13.9 -tqdm==4.62.3 -captum==0.4.0 -setuptools==59.5.0 -numpy==1.23.5 -pytest==7.2.2 -Image==1.5.33 \ No newline at end of file diff --git a/setup.py b/setup.py index b08da288..551f474c 100644 --- a/setup.py +++ b/setup.py @@ -3,39 +3,32 @@ setup( name='aloception', author='Visual Behavior', - version='0.5.1', + version='0.3.0', description='Aloception is a set of package for computer vision: aloscene, alodataset, alonet.', packages=find_packages(include=['aloscene', 'alodataset', 'alonet']), url='https://visualbehavior.ai/', download_url='https://github.com/Visual-Behavior/aloception-oss', install_requires=[ - 'pycocotools==2.0.2', - 'PyYAML==5.4.1', - 'chardet==4.0.0', - 'idna==2.10', - - 'scipy==1.10.0', - - 'more_itertools==8.8.0', - 'requests==2.25.1', - 'opencv-python==4.7.0.68', - - 'python-dateutil==2.8.2', - 'urllib3==1.26.6', - - 'protobuf==4.21.12', - 'wandb==0.13.9', - + 'matplotlib==3.5.3', + 'more-itertools==8.8.0', # required for alodataset waymo + 'onnx==1.12.0', + 'onnx_graphsurgeon==0.0.1.dev5', + 'onnxsim==0.4.8', + 'opencv-python==4.5.3.56' + 'Pillow==9.2.0', + 'pycocotools==2.0.2', # required for alodataset coco + 'pytorch_lightning==1.4.1', + 'pytorch_quantization==0.0.1.dev5', + 'Requests==2.28.1', + 'scipy==1.4.1', # required for alonet/detr/matcher + 'setuptools==63.4.1', + 'tensorflow==2.10.0', # required for alodataset/prepare/waymo_converter + 'tensorrt==0.0.1.dev5', + 'torchvision==0.13.1', 'tqdm==4.62.3', - 'captum==0.4.0', - - 'setuptools==59.5.0', - - 'numpy==1.23.5', - - 'pytest==7.2.2', - 'Image==1.5.33' - ], + 'ts==0.5.1', + 'wandb==0.12.2', + 'waymo_open_dataset==1.0.1'], setup_requires=['numpy', 'torch', 'nvidia-pyindex', 'pycuda'], license_files=['LICENSE'], keywords=['artificial intelligence', 'computer vision'], diff --git a/unittest/test_augmented_tensor.py b/unittest/test_augmented_tensor.py index 821f08db..d35fd132 100644 --- a/unittest/test_augmented_tensor.py +++ b/unittest/test_augmented_tensor.py @@ -64,8 +64,3 @@ def test_batch_list_intersection_unmergeable_child(): assert len(f.flow) == 2 assert f.flow[0].shape == (2, 10, 10) assert f.flow[1] is None - -if __name__ == "__main__": - test_batch_list_intersection_property() - test_batch_list_intersection_mergeable_child() - test_batch_list_intersection_unmergeable_child() \ No newline at end of file diff --git a/unittest/test_oriented_boxes_2d.py b/unittest/test_oriented_boxes_2d.py index 273c7010..b97048e1 100644 --- a/unittest/test_oriented_boxes_2d.py +++ b/unittest/test_oriented_boxes_2d.py @@ -3,7 +3,7 @@ import aloscene from aloscene import OrientedBoxes2D -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +device = torch.device("cpu") def tensor_equal(tensor1, tensor2, threshold=1e-4): diff --git a/unittest/test_points2d.py b/unittest/test_points2d.py index 110a2552..fc4a078c 100644 --- a/unittest/test_points2d.py +++ b/unittest/test_points2d.py @@ -9,15 +9,12 @@ def test_crop_abs(): image = np.zeros((3, 843, 1500)) corners = [[298, 105], [1250, 105], [298, 705], [1250, 705]] frame = aloscene.Frame(image) - labels = aloscene.Labels([0, 1, 2, 3], labels_names=["corners0", "corners1", "corners2", "corners3"]) + labels = aloscene.Labels([0, 0, 0, 0], labels_names=["corners"]) corners = aloscene.Points2D( corners, points_format="xy", frame_size=(frame.H, frame.W), absolute=True, labels=labels ) - frame.append_points2d(corners) - - frame = frame.crop(H_crop=(0.0, 0.5), W_crop=(0.0, 0.5), warn_non_integer=False) - + frame = frame.crop(H_crop=(0.0, 0.5), W_crop=(0.0, 0.5)) assert torch.allclose(frame.points2d[0].as_tensor(), corners[0].as_tensor()) assert np.allclose(frame.points2d.frame_size[0], frame.HW[0]) assert np.allclose(frame.points2d.frame_size[1], frame.HW[1]) diff --git a/unittest/test_projections.py b/unittest/test_projections.py index a3143fec..cbe1a100 100644 --- a/unittest/test_projections.py +++ b/unittest/test_projections.py @@ -16,7 +16,8 @@ def _test_disp_depth_points3d(depth, height, width, resize=True): ) d = depth.resize((height * 2, width * 2)) - v = depth.resize((height * 2, width * 2)).as_disp()#.resize((height, width)).as_depth() + + v = depth.resize((height * 2, width * 2)).as_disp().resize((height, width)).as_depth() assert torch.allclose( depth.as_tensor(), @@ -122,5 +123,5 @@ def test_disp_depth_points3d_projection4(): if __name__ == "__main__": test_disp_depth_points3d_projection1() - #test_disp_depth_points3d_projection2() - #test_disp_depth_points3d_projection4() + test_disp_depth_points3d_projection2() + test_disp_depth_points3d_projection4()