diff --git a/.isort.cfg b/.isort.cfg new file mode 100644 index 0000000..bf7b9fb --- /dev/null +++ b/.isort.cfg @@ -0,0 +1,2 @@ +[settings] +profile=black \ No newline at end of file diff --git a/README.md b/README.md index afca3f7..4cffcd7 100644 --- a/README.md +++ b/README.md @@ -54,10 +54,10 @@ docker pull sinzlab/pytorch:v3.9-torch1.9.0-cuda11.1-dj0.12.7 5. You can now open JupyterLab in your browser at [`http://localhost:10101`](http://localhost:10101). #### Available Models -| Model Name | description | Artifact path | Import Code | -| --- |---------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------|----------------------------------| -| cGNF Human 3.6m | Model trained on the Human 3.6M dataset with MPII input keypoints. | ```ppierzc/cgnf/cgnf_human36m:best``` | ```from propose.models.flows import CondGraphFlow``` | - | HRNet | Instance of the [official](https://github.com/leoxiaobin/deep-high-resolution-net.pytorch) HRNet model trained on the MPII dataset with w32 and 256x256 | ```ppierzc/cgnf/hrnet:v0``` | ```from propose.models.detectors import HRNet``` | +| Model Name | description | Artifact path | Import Code | +| --- |---------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------|----------------------------------| +| cGNF Human 3.6m | Model trained on the Human 3.6M dataset with MPII input keypoints. | ```ppierzc/propose_human36m/mpii-prod:best``` | ```from propose.models.flows import CondGraphFlow``` | + | HRNet | Instance of the [official](https://github.com/leoxiaobin/deep-high-resolution-net.pytorch) HRNet model trained on the MPII dataset with w32 and 256x256 | ```ppierzc/cgnf/hrnet:v0``` | ```from propose.models.detectors import HRNet``` | ### Run Tests To run the tests, from the root directory call: diff --git a/notebooks/demo/demo.ipynb b/notebooks/demo/demo.ipynb index cf3f40b..5593e3c 100644 --- a/notebooks/demo/demo.ipynb +++ b/notebooks/demo/demo.ipynb @@ -30,8 +30,8 @@ "execution_count": 3, "outputs": [], "source": [ - "mocap_path = f'../../data/rat7m/mocap/mocap-s4-d1.mat'\n", - "vid_path = f'../../data/rat7m/movies/s4-d1/s4-d1-camera4-0.mp4'\n", + "mocap_path = f\"../../data/rat7m/mocap/mocap-s4-d1.mat\"\n", + "vid_path = f\"../../data/rat7m/movies/s4-d1/s4-d1-camera4-0.mp4\"\n", "\n", "vid = imageio.get_reader(vid_path)" ], @@ -77,7 +77,7 @@ "\n", "pose = pose[mask]\n", "\n", - "np.array(pose._edge('HeadF', 'HeadB'))" + "np.array(pose._edge(\"HeadF\", \"HeadB\"))" ], "metadata": { "collapsed": false, @@ -103,9 +103,9 @@ ], "source": [ "fig = plt.figure(figsize=(10, 10))\n", - "ax1 = fig.add_subplot(1, 1, 1, projection='3d')\n", + "ax1 = fig.add_subplot(1, 1, 1, projection=\"3d\")\n", "ax1.get_proj = lambda: np.dot(Axes3D.get_proj(ax1), np.diag([1, 1, 0.75, 1]))\n", - "ax1.view_init(30, 30)\n", + "ax1.view_init(30, 30)\n", "ax1.set_xlim(-400, -100)\n", "ax1.set_ylim(-300, 0)\n", "ax1.set_zlim(0, 100)\n", @@ -136,7 +136,7 @@ } ], "source": [ - "camera = cameras['Camera4']\n", + "camera = cameras[\"Camera4\"]\n", "pose2D = Rat7mPose(camera.proj2D(pose))\n", "\n", "frame_idx = camera.frames.squeeze()[mask][0]\n", @@ -169,7 +169,7 @@ ], "source": [ "cameras = load_cameras(mocap_path)\n", - "camera = cameras['Camera4']\n", + "camera = cameras[\"Camera4\"]\n", "camera.frames = camera.frames.squeeze()[mask]\n", "pose_idx = 0\n", "\n", @@ -183,16 +183,16 @@ "\n", "fig = plt.figure(figsize=(20, 10))\n", "\n", - "ax1 = fig.add_subplot(1, 2, 1, projection='3d')\n", + "ax1 = fig.add_subplot(1, 2, 1, projection=\"3d\")\n", "ax1.get_proj = lambda: np.dot(Axes3D.get_proj(ax1), np.diag([1, 1, 0.75, 1]))\n", - "ax1.view_init(30, 30)\n", + "ax1.view_init(30, 30)\n", "ax1.set_xlim(-400, -100)\n", "ax1.set_ylim(-300, 0)\n", "ax1.set_zlim(0, 100)\n", "\n", "ax2 = fig.add_subplot(1, 2, 2)\n", - "ax2.set_title('Camera 4')\n", - "plt.axis('off')\n", + "ax2.set_title(\"Camera 4\")\n", + "plt.axis(\"off\")\n", "\n", "img = ax2.imshow(im)\n", "animate1 = pose.animate(ax1)\n", @@ -200,6 +200,7 @@ "\n", "plt.close(fig)\n", "\n", + "\n", "def animate(i):\n", " frame_idx = camera.frames.squeeze()[pose_idx + i]\n", " im = vid.get_data(frame_idx)\n", @@ -209,14 +210,17 @@ " animate1(i)\n", " animate2(i)\n", "\n", + "\n", "ani = animation.FuncAnimation(fig, animate, frames=100)\n", "\n", - "Writer = animation.writers['ffmpeg']\n", - "writer = Writer(fps=30, metadata=dict(artist='Me'), bitrate=-1)\n", + "Writer = animation.writers[\"ffmpeg\"]\n", + "writer = Writer(fps=30, metadata=dict(artist=\"Me\"), bitrate=-1)\n", "\n", "pbar = tqdm(total=100, position=0)\n", "\n", - "ani.save('walk_cam_4_sub.mp4', writer=writer, progress_callback=lambda i, n: pbar.update(1))" + "ani.save(\n", + " \"walk_cam_4_sub.mp4\", writer=writer, progress_callback=lambda i, n: pbar.update(1)\n", + ")" ], "metadata": { "collapsed": false, diff --git a/notebooks/demo/load_human36m.ipynb b/notebooks/demo/load_human36m.ipynb index 9e5bb0a..8738abd 100644 --- a/notebooks/demo/load_human36m.ipynb +++ b/notebooks/demo/load_human36m.ipynb @@ -42,28 +42,28 @@ "path = \"/Users/paulpierzchlewicz/PycharmProjects/propose/data/human36m/Directions.60457274.cdf\"\n", "poses = load_poses(path)\n", "\n", - "poses /= poses.std() # Normalize poses\n", + "poses /= poses.std() # Normalize poses\n", "\n", "poses = Human36mPose(poses)\n", "\n", "pose = poses[200]\n", "\n", - "plt.style.use('default')\n", + "plt.style.use(\"default\")\n", "fig = plt.figure()\n", - "ax = fig.add_subplot(111, projection='3d')\n", - "ax.view_init(elev=15., azim=120)\n", - "poses[395].plot(ax=ax, alpha=.1)\n", - "poses[396].plot(ax=ax, alpha=.2)\n", - "poses[397].plot(ax=ax, alpha=.3)\n", - "poses[398].plot(ax=ax, alpha=.4)\n", - "poses[399].plot(ax=ax, alpha=.5)\n", + "ax = fig.add_subplot(111, projection=\"3d\")\n", + "ax.view_init(elev=15.0, azim=120)\n", + "poses[395].plot(ax=ax, alpha=0.1)\n", + "poses[396].plot(ax=ax, alpha=0.2)\n", + "poses[397].plot(ax=ax, alpha=0.3)\n", + "poses[398].plot(ax=ax, alpha=0.4)\n", + "poses[399].plot(ax=ax, alpha=0.5)\n", "poses[400].plot(ax=ax, alpha=1)\n", "\n", "ax.set_xlim(2, -2)\n", "ax.set_ylim(2, -2)\n", "ax.set_zlim(-2, 2)\n", "\n", - "ax.xaxis.pane.fill = False # Left pane\n", + "ax.xaxis.pane.fill = False # Left pane\n", "ax.yaxis.pane.fill = False\n", "ax.zaxis.pane.fill = False\n", "ax.grid(False)\n", @@ -88,7 +88,7 @@ "ax.set_yticks([])\n", "ax.set_zticks([])\n", "\n", - "plt.savefig('./human36m_pose.png', dpi=300)\n", + "plt.savefig(\"./human36m_pose.png\", dpi=300)\n", "\n", "plt.show()" ], diff --git a/notebooks/demo/load_rat7m_dataset_demo.ipynb b/notebooks/demo/load_rat7m_dataset_demo.ipynb index 6e58252..00e8a05 100644 --- a/notebooks/demo/load_rat7m_dataset_demo.ipynb +++ b/notebooks/demo/load_rat7m_dataset_demo.ipynb @@ -46,8 +46,8 @@ "execution_count": 7, "outputs": [], "source": [ - "dirname = '/Users/paulpierzchlewicz/PycharmProjects/propose/data/rat7m' # Choose this such that it points to your dataset\n", - "data_key = 's4-d1'" + "dirname = \"/Users/paulpierzchlewicz/PycharmProjects/propose/data/rat7m\" # Choose this such that it points to your dataset\n", + "data_key = \"s4-d1\"" ], "metadata": { "collapsed": false, @@ -61,17 +61,21 @@ "execution_count": 18, "outputs": [], "source": [ - "dataset = Rat7mDataset(dirname=dirname, data_key=data_key, transforms=[\n", - " tr.SwitchArmsElbows(),\n", - " tr.CropImageToPose(),\n", - " tr.RotatePoseToCamera(),\n", - " tr.CenterPose(),\n", - " tr.ScalePose(scale=0.03),\n", - " ScaleInputs(scale=0.1, multichannel=True, anti_aliasing=True),\n", - " tr.NormaliseImageScale(),\n", - " tr.ToGraph(),\n", - " ToTensor()\n", - "])" + "dataset = Rat7mDataset(\n", + " dirname=dirname,\n", + " data_key=data_key,\n", + " transforms=[\n", + " tr.SwitchArmsElbows(),\n", + " tr.CropImageToPose(),\n", + " tr.RotatePoseToCamera(),\n", + " tr.CenterPose(),\n", + " tr.ScalePose(scale=0.03),\n", + " ScaleInputs(scale=0.1, multichannel=True, anti_aliasing=True),\n", + " tr.NormaliseImageScale(),\n", + " tr.ToGraph(),\n", + " ToTensor(),\n", + " ],\n", + ")" ], "metadata": { "collapsed": false, @@ -103,25 +107,25 @@ "pose = Rat7mPose(pose_matrix.numpy())\n", "image = res.image\n", "\n", - "plt.style.use('default')\n", + "plt.style.use(\"default\")\n", "fig = plt.figure(figsize=(20, 10))\n", "ax1 = fig.add_subplot(1, 2, 1)\n", "\n", "ax1.imshow(image)\n", "\n", - "ax2 = fig.add_subplot(1, 2, 2, projection='3d')\n", + "ax2 = fig.add_subplot(1, 2, 2, projection=\"3d\")\n", "ax2.get_proj = lambda: np.dot(Axes3D.get_proj(ax2), np.diag([1, 1, 0.75, 1]))\n", - "ax2.view_init(45, 90)\n", + "ax2.view_init(45, 90)\n", "ax2.set_xlim(3, -3)\n", "ax2.set_ylim(3, -3)\n", "ax2.set_zlim(-1, 1)\n", - "ax2.set_xlabel('x')\n", - "ax2.set_ylabel('y')\n", - "ax2.set_zlabel('z')\n", + "ax2.set_xlabel(\"x\")\n", + "ax2.set_ylabel(\"y\")\n", + "ax2.set_zlabel(\"z\")\n", "ax2.set_zticks([])\n", "\n", "pose.plot(ax=ax2)\n", - "plt.show()\n" + "plt.show()" ], "metadata": { "collapsed": false, diff --git a/notebooks/demo/static_loader_demo.ipynb b/notebooks/demo/static_loader_demo.ipynb index 81ffd1a..83a259f 100644 --- a/notebooks/demo/static_loader_demo.ipynb +++ b/notebooks/demo/static_loader_demo.ipynb @@ -28,7 +28,7 @@ "execution_count": 18, "outputs": [], "source": [ - "dirname = '/Users/paulpierzchlewicz/PycharmProjects/propose/data/rat7m/s4-d1' # Choose this such that it points to your dataset" + "dirname = \"/Users/paulpierzchlewicz/PycharmProjects/propose/data/rat7m/s4-d1\" # Choose this such that it points to your dataset" ], "metadata": { "collapsed": false, @@ -66,7 +66,7 @@ } ], "source": [ - "for i in dataloaders['train']:\n", + "for i in dataloaders[\"train\"]:\n", " print(i.pose_matrix.shape)\n", " print(i.adjacency_matrix.shape)\n", " print(i.image.shape)\n", diff --git a/notebooks/preprocess_rat7m.ipynb b/notebooks/preprocess_rat7m.ipynb index 52ad0bf..d8e1952 100644 --- a/notebooks/preprocess_rat7m.ipynb +++ b/notebooks/preprocess_rat7m.ipynb @@ -34,9 +34,9 @@ "execution_count": 2, "outputs": [], "source": [ - "dirname = '/Users/paulpierzchlewicz/PycharmProjects/propose/data/rat7m' # Choose this such that it points to your dataset\n", - "data_key = 's4-d1'\n", - "mocap_path = f'{dirname}/mocap/mocap-{data_key}.mat'" + "dirname = \"/Users/paulpierzchlewicz/PycharmProjects/propose/data/rat7m\" # Choose this such that it points to your dataset\n", + "data_key = \"s4-d1\"\n", + "mocap_path = f\"{dirname}/mocap/mocap-{data_key}.mat\"" ], "metadata": { "collapsed": false, @@ -159,10 +159,10 @@ "source": [ "from pathlib import Path\n", "\n", - "pose_dir = Path(f'{dirname}/{data_key}/poses')\n", + "pose_dir = Path(f\"{dirname}/{data_key}/poses\")\n", "pose_dir.mkdir(parents=True, exist_ok=True)\n", "\n", - "pose_path = pose_dir / f'{data_key}.npy'\n", + "pose_path = pose_dir / f\"{data_key}.npy\"\n", "\n", "mocap.save(pose_path)" ], @@ -179,13 +179,14 @@ "outputs": [], "source": [ "import pickle\n", - "camera_dir = Path(f'{dirname}/{data_key}/cameras')\n", + "\n", + "camera_dir = Path(f\"{dirname}/{data_key}/cameras\")\n", "camera_dir.mkdir(parents=True, exist_ok=True)\n", "\n", - "camera_path = camera_dir / f'{data_key}.pickle'\n", + "camera_path = camera_dir / f\"{data_key}.pickle\"\n", "\n", - "with open(camera_path, 'wb') as f:\n", - " pickle.dump(cameras, f)\n" + "with open(camera_path, \"wb\") as f:\n", + " pickle.dump(cameras, f)" ], "metadata": { "collapsed": false, @@ -209,7 +210,8 @@ ], "source": [ "from pathlib import PurePath\n", - "PurePath('/Users/paulpierzchlewicz/PycharmProjects/propose/data/rat7m').name" + "\n", + "PurePath(\"/Users/paulpierzchlewicz/PycharmProjects/propose/data/rat7m\").name" ], "metadata": { "collapsed": false, diff --git a/propose/cameras/Camera.py b/propose/cameras/Camera.py index f7327ab..9fbef3b 100644 --- a/propose/cameras/Camera.py +++ b/propose/cameras/Camera.py @@ -1,8 +1,8 @@ +from typing import Optional + import numpy as np import numpy.typing as npt -from typing import Optional - Point2D = npt.NDArray[float] Point3D = npt.NDArray[float] diff --git a/propose/datasets/graph_transforms.py b/propose/datasets/graph_transforms.py index 85fa1dc..60ee276 100644 --- a/propose/datasets/graph_transforms.py +++ b/propose/datasets/graph_transforms.py @@ -1,9 +1,8 @@ import numpy as np import torch import torch.distributions as D - -from torch_geometric.loader.dataloader import Collater from torch_geometric.data import HeteroData +from torch_geometric.loader.dataloader import Collater class ScaleGraphPose(object): diff --git a/propose/datasets/human36m/Human36mDataset.py b/propose/datasets/human36m/Human36mDataset.py index 9deb1d0..ed2fe13 100644 --- a/propose/datasets/human36m/Human36mDataset.py +++ b/propose/datasets/human36m/Human36mDataset.py @@ -1,20 +1,16 @@ import pickle - -import numpy as np - from pathlib import Path +import numpy as np +import torch +import torch.distributions as D from torch.utils.data import Dataset -from propose.poses.human36m import Human36mPose - from torch_geometric.data import HeteroData from torch_geometric.loader.dataloader import Collater - -import torch -import torch.distributions as D - from tqdm import tqdm +from propose.poses.human36m import Human36mPose + class Human36mDataset(Dataset): """ @@ -172,7 +168,7 @@ def __init__( for p in occlusion_fractions: mask = ~self.occlusions[i] - mask = np.insert(mask, 9, False) + mask = np.insert(mask, 8, False) mask[: int(p * context_edges.shape[-1])] = 0 @@ -182,7 +178,7 @@ def __init__( if mpii: mask = ~self.occlusions[i] - mask = np.insert(mask, 9, False) + mask = np.insert(mask, 8, False) rand_idx = np.random.choice( np.arange(0, len(mask)), int(len(mask) * p), replace=False ) diff --git a/propose/datasets/human36m/loaders.py b/propose/datasets/human36m/loaders.py index bcececf..2442ee4 100644 --- a/propose/datasets/human36m/loaders.py +++ b/propose/datasets/human36m/loaders.py @@ -1,14 +1,14 @@ import os -import cdflib -import numpy as np import pickle - -from xml.dom import minidom from pathlib import Path from typing import Union +from xml.dom import minidom + +import cdflib +import numpy as np -from propose.poses.utils import load_data_ids from propose.cameras import Camera +from propose.poses.utils import load_data_ids PathType = Union[str, Path] diff --git a/propose/datasets/human36m/preprocess.py b/propose/datasets/human36m/preprocess.py index 838fff6..7cfccd3 100644 --- a/propose/datasets/human36m/preprocess.py +++ b/propose/datasets/human36m/preprocess.py @@ -1,12 +1,12 @@ import os -import cdflib -import numpy as np import pickle - from pathlib import Path from typing import Union -from propose.datasets.human36m.loaders import load_poses, load_cameras +import cdflib +import numpy as np + +from propose.datasets.human36m.loaders import load_cameras, load_poses from propose.poses.human36m import MPII_2_H36M PathType = Union[str, Path] diff --git a/propose/datasets/rat7m/Rat7mDataset.py b/propose/datasets/rat7m/Rat7mDataset.py index 0393490..6e62612 100644 --- a/propose/datasets/rat7m/Rat7mDataset.py +++ b/propose/datasets/rat7m/Rat7mDataset.py @@ -1,9 +1,9 @@ import os - import pickle -import imageio +import imageio from neuralpredictors.data.datasets.base import TransformDataset + from propose.poses.rat7m import Rat7mPose CHUNK_SIZE = 3500 diff --git a/propose/datasets/rat7m/loaders.py b/propose/datasets/rat7m/loaders.py index c607495..8f064be 100644 --- a/propose/datasets/rat7m/loaders.py +++ b/propose/datasets/rat7m/loaders.py @@ -1,19 +1,16 @@ -from propose.cameras import Camera -from propose.poses import Rat7mPose -from propose.datasets.rat7m import Rat7mDataset -import propose.datasets.rat7m.transforms as tr - -from neuralpredictors.data.transforms import ScaleInputs, ToTensor - from collections import namedtuple -import scipy.io as sio - import numpy as np - +import scipy.io as sio +from neuralpredictors.data.transforms import ScaleInputs, ToTensor from torch.utils.data import DataLoader from torch.utils.data.sampler import SubsetRandomSampler +import propose.datasets.rat7m.transforms as tr +from propose.cameras import Camera +from propose.datasets.rat7m import Rat7mDataset +from propose.poses import Rat7mPose + TemporalSplit = namedtuple("TemporalSplit", ["train", "validation", "test"]) diff --git a/propose/datasets/rat7m/transforms.py b/propose/datasets/rat7m/transforms.py index 37ab510..0c0d7a3 100644 --- a/propose/datasets/rat7m/transforms.py +++ b/propose/datasets/rat7m/transforms.py @@ -1,12 +1,11 @@ -import propose.preprocessing.rat7m as pp -from propose.poses.rat7m import Rat7mPose - from collections import namedtuple -from torch_geometric.data import HeteroData - import torch import torch.nn.functional as F +from torch_geometric.data import HeteroData + +import propose.preprocessing.rat7m as pp +from propose.poses.rat7m import Rat7mPose class ScalePose(object): diff --git a/propose/datasets/toy/Pendulum.py b/propose/datasets/toy/Pendulum.py index 7bbeb8e..4c7c4af 100644 --- a/propose/datasets/toy/Pendulum.py +++ b/propose/datasets/toy/Pendulum.py @@ -1,12 +1,11 @@ +from collections import namedtuple +from itertools import combinations + import brax import torch +from torch.utils.data import Dataset from torch_geometric.data import HeteroData - from tqdm import tqdm -from torch.utils.data import Dataset -from collections import namedtuple - -from itertools import combinations from propose.training.utils import get_x_graph diff --git a/propose/datasets/toy/Point.py b/propose/datasets/toy/Point.py index da2fb0a..d6070f4 100644 --- a/propose/datasets/toy/Point.py +++ b/propose/datasets/toy/Point.py @@ -1,10 +1,9 @@ +from itertools import combinations + import torch import torch.distributions as D - from torch.utils.data.dataset import Dataset from torch_geometric.data import HeteroData -from itertools import combinations - from torch_geometric.loader.dataloader import Collater diff --git a/propose/evaluation/mpjpe.py b/propose/evaluation/mpjpe.py index 6f52000..7447521 100644 --- a/propose/evaluation/mpjpe.py +++ b/propose/evaluation/mpjpe.py @@ -1,5 +1,5 @@ -import torch import numpy as np +import torch def mpjpe(pred, gt, dim=None, mean=True): diff --git a/propose/models/detectors/hrnet/config/default.py b/propose/models/detectors/hrnet/config/default.py index 36aca27..1aad5a4 100644 --- a/propose/models/detectors/hrnet/config/default.py +++ b/propose/models/detectors/hrnet/config/default.py @@ -4,14 +4,10 @@ # Written by Bin Xiao (Bin.Xiao@microsoft.com) # ------------------------------------------------------------------------------ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - +from __future__ import absolute_import, division, print_function from yacs.config import CfgNode as CN - _C = CN() _C.OUTPUT_DIR = "" diff --git a/propose/models/detectors/hrnet/hrnet.py b/propose/models/detectors/hrnet/hrnet.py index 0def771..2ba7339 100644 --- a/propose/models/detectors/hrnet/hrnet.py +++ b/propose/models/detectors/hrnet/hrnet.py @@ -1,16 +1,16 @@ -import torch -import torch.backends.cudnn as cudnn - -from collections import OrderedDict - import os - -from .models.pose_hrnet import PoseHighResolutionNet -from .config import config +from collections import OrderedDict import numpy as np - +import torch import wandb +from tqdm import tqdm + +from propose.poses.human36m import MPIIPose + +from .config import config +from .models.pose_hrnet import PoseHighResolutionNet +from .utils import crop_image_to_human class HRNet(PoseHighResolutionNet): @@ -114,3 +114,25 @@ def pose_estimate(self, input: torch.Tensor) -> np.array: preds = coords.copy() * 4 return preds, maxvals + + @classmethod + def preprocess( + cls, images: torch.Tensor, detector: torch.nn.Module = None + ) -> torch.Tensor: + if detector is None: + detector = torch.hub.load("ultralytics/yolov5", "yolov5s", pretrained=True) + + detector.eval() + + if len(images.shape) == 3: + images = images.unsqueeze(0) + + cropped_images = [] + for image in images: + cropped_image = crop_image_to_human(image, detector) + cropped_images.append(torch.Tensor(cropped_image)) + + cropped_images = torch.stack(cropped_images) + pred_image = cropped_images.permute(0, 3, 1, 2) + + return pred_image diff --git a/propose/models/detectors/hrnet/models/pose_hrnet.py b/propose/models/detectors/hrnet/models/pose_hrnet.py index 237db9a..ff3dfed 100644 --- a/propose/models/detectors/hrnet/models/pose_hrnet.py +++ b/propose/models/detectors/hrnet/models/pose_hrnet.py @@ -4,17 +4,14 @@ # Written by Bin Xiao (Bin.Xiao@microsoft.com) # ------------------------------------------------------------------------------ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function -import os import logging +import os import torch import torch.nn as nn - BN_MOMENTUM = 0.1 logger = logging.getLogger(__name__) diff --git a/propose/models/detectors/hrnet/utils.py b/propose/models/detectors/hrnet/utils.py new file mode 100644 index 0000000..c2358fc --- /dev/null +++ b/propose/models/detectors/hrnet/utils.py @@ -0,0 +1,62 @@ +import torch +from neuralpredictors.data.transforms import rescale +from torchvision.transforms import Pad + + +def crop_image_to_human(input_image, detector): + if isinstance(input_image, torch.Tensor): + input_image = input_image.numpy() + + detections = detector(input_image) + detections = ( + detections.pandas() + .xyxy[0][detections.pandas().xyxy[0].name == "person"] + .reset_index() + ) + bbox = detections.iloc[0] + + xy = (bbox["xmin"], bbox["ymax"]) + width = bbox["xmax"] - bbox["xmin"] + height = bbox["ymax"] - bbox["ymin"] + + center = (xy[0] + width / 2, xy[1] - height / 2) + + side = max([width, height]) + 10 + + crop_size = [ + int(center[0] - side / 2), + int(center[0] + side / 2), + int(center[1] - side / 2), + int(center[1] + side / 2), + ] + for i in range(4): + crop_size[i] = max([crop_size[i], 0]) + + cropped_image = input_image[ + crop_size[2] : crop_size[3], crop_size[0] : crop_size[1] + ] + + padder = Pad( + ( + int((max(cropped_image.shape) - cropped_image.shape[0]) / 2), + int((max(cropped_image.shape) - cropped_image.shape[1]) / 2), + ) + ) + cropped_image = padder(torch.Tensor(cropped_image)).numpy() + cropped_image = cropped_image / 255 + + cropped_image = rescale( + cropped_image, 256 / cropped_image.shape[0], channel_axis=-1 + ) + cropped_image = cropped_image[:256, :256] + padder = Pad( + ( + 256 - cropped_image.shape[0], + 256 - cropped_image.shape[1], + ) + ) + + cropped_image = padder(torch.Tensor(cropped_image)).numpy() + cropped_image = cropped_image[:256, :256] + + return cropped_image diff --git a/propose/models/distributions/StandardNormal.py b/propose/models/distributions/StandardNormal.py index 4865fc5..01d0d6f 100644 --- a/propose/models/distributions/StandardNormal.py +++ b/propose/models/distributions/StandardNormal.py @@ -1,10 +1,8 @@ -from nflows.distributions.base import Distribution -from nflows.utils import torchutils import nflows.utils.typechecks as check - -import torch - import numpy as np +import torch +from nflows.distributions.base import Distribution +from nflows.utils import torchutils class StandardNormal(Distribution): diff --git a/propose/models/flows/CondGraphFlow.py b/propose/models/flows/CondGraphFlow.py index b5f483b..db9ec7d 100644 --- a/propose/models/flows/CondGraphFlow.py +++ b/propose/models/flows/CondGraphFlow.py @@ -1,18 +1,18 @@ import torch import wandb +from torch_geometric.data import HeteroData +import propose.poses +from propose.models.distributions import StandardNormal from propose.models.flows.GraphFlow import GraphFlow from propose.models.nn.CondGNN import CondGNN from propose.models.nn.embedding import embeddings - -from torch_geometric.data import HeteroData - from propose.models.transforms.transform import ( + GraphActNorm, GraphAffineCouplingTransform, GraphCompositeTransform, - GraphActNorm, ) -from propose.models.distributions import StandardNormal +from propose.poses.human36m import Human36mPose, MPIIPose class CondGraphFlow(GraphFlow): @@ -25,6 +25,7 @@ def __init__( embedding_net=None, relations=None, use_attention=False, + root_features=3, # mask_idx=[0, 2, 5, 8, 10, 12, 15] ): """ @@ -34,7 +35,9 @@ def __init__( :param context_features: Number of features in the context after embedding. :param hidden_features: Number of features in the hidden layers. :param embedding_net: (optional) Network to embed the context. default: nn.Identity - :param gcn_type: (optional) Type of GCN to use. default: slow + :param relations: (optional) List of relations to use. default: None + :param use_attention: (optional) Whether to use attention. default: False + :param root_features: (optional) Number of features in the root node. default: 3 """ def create_net(in_features, out_features): @@ -45,6 +48,7 @@ def create_net(in_features, out_features): hidden_features=hidden_features, relations=relations, use_attention=use_attention, + root_features=root_features, ) coupling_constructor = GraphAffineCouplingTransform @@ -126,3 +130,11 @@ def set_device(self) -> bool: return True return False + + @classmethod + def preprocess(cls, pose_2d: propose.poses.Human36mPose) -> HeteroData: + pose_2d.pose_matrix = pose_2d.pose_matrix * 0.0139 + pose_2d.pose_matrix = pose_2d.pose_matrix - pose_2d.pose_matrix[:, 0] + pose_2d.pose_matrix[..., 1] = -pose_2d.pose_matrix[..., 1] + + return Human36mPose().conditional_graph(pose_2d) diff --git a/propose/models/flows/Flow.py b/propose/models/flows/Flow.py index 1bc30e2..b5327e2 100644 --- a/propose/models/flows/Flow.py +++ b/propose/models/flows/Flow.py @@ -1,14 +1,11 @@ import numpy as np import torch +from nflows.distributions import StandardNormal +from nflows.flows import Flow +from nflows.transforms import AffineCouplingTransform, BatchNorm, CompositeTransform from torch import nn from torch.nn import functional as F -from nflows.flows import Flow - -from nflows.transforms import AffineCouplingTransform, CompositeTransform, BatchNorm - -from nflows.distributions import StandardNormal - class NF(Flow): def __init__( diff --git a/propose/models/flows/GraphFlow.py b/propose/models/flows/GraphFlow.py index c11a8a2..23d189e 100644 --- a/propose/models/flows/GraphFlow.py +++ b/propose/models/flows/GraphFlow.py @@ -1,11 +1,9 @@ """Basic definitions for the flows module.""" import nflows.utils.typechecks as check - +import torch from nflows.flows.base import Flow - from torch_geometric.data import HeteroData -import torch class GraphFlow(Flow): diff --git a/propose/models/layers/CondGCN.py b/propose/models/layers/CondGCN.py index 5db5584..15b9d87 100644 --- a/propose/models/layers/CondGCN.py +++ b/propose/models/layers/CondGCN.py @@ -1,12 +1,10 @@ +import itertools +from typing import Literal + import torch import torch.nn as nn - import torch_sparse as ts -from typing import Literal - -import itertools - class CondGCN(nn.Module): """ diff --git a/propose/models/nn/CondGNN.py b/propose/models/nn/CondGNN.py index e780146..77e312a 100644 --- a/propose/models/nn/CondGNN.py +++ b/propose/models/nn/CondGNN.py @@ -1,12 +1,11 @@ +from typing import Union + import torch import torch.nn as nn - from torch_geometric.data import HeteroData from propose.models.layers.CondGCN import CondGCN -from typing import Union - class CondGNN(nn.Module): """ diff --git a/propose/models/nn/embedding.py b/propose/models/nn/embedding.py index 997cc94..013ed6f 100644 --- a/propose/models/nn/embedding.py +++ b/propose/models/nn/embedding.py @@ -1,9 +1,8 @@ +from typing import Optional, Union + import torch import torch.nn as nn from torch import Tensor - -from typing import Optional, Union - from torch_geometric.nn.dense import DenseSAGEConv diff --git a/propose/models/transforms/transform.py b/propose/models/transforms/transform.py index 5fadc4f..8be7081 100644 --- a/propose/models/transforms/transform.py +++ b/propose/models/transforms/transform.py @@ -1,12 +1,10 @@ +import nflows.utils.typechecks as check import torch -from torch import nn - -from torch_geometric.data import HeteroData - +from nflows.transforms.base import CompositeTransform, Transform from nflows.transforms.coupling import CouplingTransform from nflows.utils import torchutils -import nflows.utils.typechecks as check -from nflows.transforms.base import Transform, CompositeTransform +from torch import nn +from torch_geometric.data import HeteroData class GraphAffineCouplingTransform(CouplingTransform): diff --git a/propose/poses/__init__.py b/propose/poses/__init__.py index 948821b..ec880e1 100644 --- a/propose/poses/__init__.py +++ b/propose/poses/__init__.py @@ -1,3 +1,3 @@ from .base import BasePose, YamlPose -from .rat7m import Rat7mPose from .human36m import Human36mPose +from .rat7m import Rat7mPose diff --git a/propose/poses/base.py b/propose/poses/base.py index b770474..0bf9cb7 100644 --- a/propose/poses/base.py +++ b/propose/poses/base.py @@ -1,15 +1,14 @@ -import numpy as np -import matplotlib.pyplot as plt - -from .utils import yaml_pose_loader - from abc import ABC, abstractmethod -from propose.cameras import Camera - +import matplotlib.pyplot as plt +import numpy as np import torch from torch_geometric.data import HeteroData +from propose.cameras import Camera + +from .utils import yaml_pose_loader + class BasePose(ABC): """ @@ -139,7 +138,10 @@ def _plot(self, ax, **kwargs): for j in range(edge_vals.shape[-1]) ] - def plot(self, ax, plot_type="groups", **kwargs): + def plot(self, ax=None, plot_type="groups", **kwargs): + if ax is None: + ax = plt.gca() + if plot_type == "groups": return self._plot_groups(ax, **kwargs) diff --git a/propose/poses/human36m.py b/propose/poses/human36m.py index 782e8de..8aea09c 100644 --- a/propose/poses/human36m.py +++ b/propose/poses/human36m.py @@ -1,11 +1,10 @@ -from propose.poses.base import YamlPose - import os import numpy as np - -from torch_geometric.data import HeteroData import torch +from torch_geometric.data import HeteroData + +from propose.poses.base import YamlPose MPII_2_H36M = [ 6, @@ -32,10 +31,13 @@ class Human36mPose(YamlPose): Pose Class for the Human3.6M dataset. """ - def __init__(self, pose_matrix, **kwargs): + def __init__(self, pose_matrix=None, **kwargs): dirname = os.path.dirname(__file__) path = os.path.join(dirname, "metadata/human36m.yaml") + if pose_matrix is None: + pose_matrix = np.zeros((1, 17, 3)) + super().__init__(pose_matrix, path) def conditional_graph(self, context: "BasePose") -> HeteroData: @@ -100,11 +102,12 @@ def to_human36m(self): Convert the pose to the Human3.6M format. :return: A Human3.6M pose. """ - pose_matrix = self.pose_matrix.copy() pose_matrix = pose_matrix[:, MPII_2_H36M] pose_matrix = np.insert(pose_matrix, 9, 0, axis=1) + pose = Human36mPose(pose_matrix) pose.occluded_markers = self.occluded_markers[0, MPII_2_H36M, 0] pose.occluded_markers = np.insert(pose.occluded_markers, 9, True, axis=0) + return pose diff --git a/propose/poses/rat7m.py b/propose/poses/rat7m.py index d307a96..1efd591 100644 --- a/propose/poses/rat7m.py +++ b/propose/poses/rat7m.py @@ -1,7 +1,7 @@ -from propose.poses.base import BasePose - import numpy as np +from propose.poses.base import BasePose + class Rat7mPose(BasePose): """ diff --git a/propose/poses/utils.py b/propose/poses/utils.py index 7516d9d..0c40087 100644 --- a/propose/poses/utils.py +++ b/propose/poses/utils.py @@ -1,6 +1,7 @@ -import yaml import functools +import yaml + def load_data_ids(path: str) -> list: """ diff --git a/propose/preprocessing/image.py b/propose/preprocessing/image.py index a8b235c..6b0cb5c 100644 --- a/propose/preprocessing/image.py +++ b/propose/preprocessing/image.py @@ -1,7 +1,7 @@ -from propose.poses import BasePose - import numpy.typing as npt +from propose.poses import BasePose + Image = npt.NDArray[float] diff --git a/propose/preprocessing/masks.py b/propose/preprocessing/masks.py index be65ec8..aee6409 100644 --- a/propose/preprocessing/masks.py +++ b/propose/preprocessing/masks.py @@ -1,9 +1,9 @@ -from propose.cameras import Camera -from propose.poses import BasePose - import numpy as np import numpy.typing as npt +from propose.cameras import Camera +from propose.poses import BasePose + Mask = npt.NDArray[bool] # bool array, where True means that frame is to be masked diff --git a/propose/preprocessing/pose.py b/propose/preprocessing/pose.py index 2e7c350..c80bb99 100644 --- a/propose/preprocessing/pose.py +++ b/propose/preprocessing/pose.py @@ -1,8 +1,8 @@ -from propose.poses import BasePose -from propose.cameras import Camera - import numpy as np +from propose.cameras import Camera +from propose.poses import BasePose + def normalize_std(pose: BasePose) -> BasePose: """ diff --git a/propose/preprocessing/rat7m/__init__.py b/propose/preprocessing/rat7m/__init__.py index de34e47..cd3c33b 100644 --- a/propose/preprocessing/rat7m/__init__.py +++ b/propose/preprocessing/rat7m/__init__.py @@ -1,5 +1,4 @@ -from propose.preprocessing.rat7m.masks import * +from propose.preprocessing import * from propose.preprocessing.rat7m.format import * +from propose.preprocessing.rat7m.masks import * from propose.preprocessing.rat7m.pose import * - -from propose.preprocessing import * diff --git a/propose/preprocessing/rat7m/format.py b/propose/preprocessing/rat7m/format.py index e39d9f1..a729310 100644 --- a/propose/preprocessing/rat7m/format.py +++ b/propose/preprocessing/rat7m/format.py @@ -1,8 +1,9 @@ +import os import re +from pathlib import Path + import ffmpeg from tqdm import tqdm -import os -from pathlib import Path CHUNK_SIZE = 3500 diff --git a/propose/preprocessing/rat7m/masks.py b/propose/preprocessing/rat7m/masks.py index 08f1532..8208f78 100644 --- a/propose/preprocessing/rat7m/masks.py +++ b/propose/preprocessing/rat7m/masks.py @@ -1,8 +1,8 @@ -from propose.poses import Rat7mPose - import numpy as np import numpy.typing as npt +from propose.poses import Rat7mPose + Mask = npt.NDArray[bool] # bool array, where True means that frame is to be masked diff --git a/propose/preprocessing/rat7m/pose.py b/propose/preprocessing/rat7m/pose.py index 65d3e70..04b24b8 100644 --- a/propose/preprocessing/rat7m/pose.py +++ b/propose/preprocessing/rat7m/pose.py @@ -1,7 +1,7 @@ -from propose.poses import Rat7mPose - import numpy as np +from propose.poses import Rat7mPose + def switch_arms_elbows(pose: Rat7mPose) -> Rat7mPose: """ diff --git a/propose/training/__init__.py b/propose/training/__init__.py index 1248a21..a8618c8 100644 --- a/propose/training/__init__.py +++ b/propose/training/__init__.py @@ -1,3 +1,3 @@ from .prior import prior_trainer -from .supervised import supervised_trainer from .semi_supervised import semi_supervised_trainer +from .supervised import supervised_trainer diff --git a/propose/training/prior.py b/propose/training/prior.py index 52268dd..51529a5 100644 --- a/propose/training/prior.py +++ b/propose/training/prior.py @@ -1,20 +1,15 @@ +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import seaborn as sns import torch import torch.nn as nn - -from tqdm import tqdm - -from .utils import get_x_graph - from torch_geometric.loader.dataloader import Collater +from tqdm import tqdm -import matplotlib.pyplot as plt from propose.poses.human36m import Human36mPose -import seaborn as sns -import numpy as np -import pandas as pd - -import torch.nn as nn +from .utils import get_x_graph def prior_trainer( diff --git a/propose/training/semi_supervised.py b/propose/training/semi_supervised.py index 6c54a31..ad0508b 100644 --- a/propose/training/semi_supervised.py +++ b/propose/training/semi_supervised.py @@ -1,12 +1,11 @@ +import warnings + import torch import torch.nn.functional as F - from tqdm import tqdm from .utils import get_x_graph -import warnings - def semi_supervised_trainer(dataloader, flow, optimizer=None, epochs=100): """ diff --git a/propose/training/supervised.py b/propose/training/supervised.py index 9b6aed6..98a186e 100644 --- a/propose/training/supervised.py +++ b/propose/training/supervised.py @@ -1,9 +1,7 @@ import torch import torch.nn as nn - -from tqdm import tqdm - from torch_geometric.loader.dataloader import Collater +from tqdm import tqdm from propose.evaluation.mpjpe import mpjpe @@ -66,7 +64,7 @@ def supervised_trainer( prior_loss = loss[n_posterior:].mean() posterior_loss = loss[:n_posterior].mean() - mse_mode_pose = torch.Tensor([0]) + mse_mode_pose = torch.Tensor([0]).to(device) if use_mode: scaling = 0.0036 # the std with which the data was normalized diff --git a/propose/utils/reproducibility.py b/propose/utils/reproducibility.py index 9a822bc..20b3475 100644 --- a/propose/utils/reproducibility.py +++ b/propose/utils/reproducibility.py @@ -1,6 +1,10 @@ +import random +import subprocess +from warnings import warn + import numpy as np +import pkg_resources import torch -import random def set_random_seed(seed): @@ -20,3 +24,39 @@ def set_random_seed(seed): # Make GPU operations deterministic torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False + + +def get_commit_hash() -> str: + """ + Returns the current commit hash shortend to 7 characters. + """ + + return ( + subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]) + .decode("utf-8") + .strip() + ) + + +def check_uncommitted_changes() -> bool: + """ + Checks if there are uncommited changes. + """ + uncommitted = ( + subprocess.check_output(["git", "diff", "--name-only"]).decode("utf-8").strip() + != "" + ) + + if uncommitted: + warn( + "There are uncommitted changes in the repository. The current logged commit hash will not be correct." + ) + + return uncommitted + + +def get_package_version(package_name: str) -> str: + """ + Returns the version of the package. + """ + return pkg_resources.get_distribution(package_name).version diff --git a/setup.py b/setup.py index 7868aca..5af7c3b 100644 --- a/setup.py +++ b/setup.py @@ -1,12 +1,30 @@ #!/usr/bin/env python -from setuptools import setup, find_packages +import subprocess from os import path +from setuptools import find_packages, setup + here = path.abspath(path.dirname(__file__)) +version = "0.0" + + +def _get_version_hash(): + """Talk to git and find out the tag/hash of our latest commit""" + try: + return ( + subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]) + .decode("utf-8") + .strip() + ) + except subprocess.CalledProcessError: + print("Couldn't run git to get a version number for setup.py") + return version + + setup( name="propose", - version="0.0", + version=_get_version_hash(), description="Probabilistic Pose Estimation", author="Paweł A. Pierzchlewicz", author_email="ppierzc@gmail.com", diff --git a/tests/cameras_test.py b/tests/cameras_test.py index 63e9627..2794391 100644 --- a/tests/cameras_test.py +++ b/tests/cameras_test.py @@ -1,6 +1,7 @@ -from propose.cameras import Camera import numpy as np +from propose.cameras import Camera + def set_global_vars(): np.random.seed(1) diff --git a/tests/datasets/human36m/Human36mDataset_test.py b/tests/datasets/human36m/Human36mDataset_test.py index d8b922b..2b2264e 100644 --- a/tests/datasets/human36m/Human36mDataset_test.py +++ b/tests/datasets/human36m/Human36mDataset_test.py @@ -1,11 +1,10 @@ -from propose.datasets.human36m.Human36mDataset import Human36mDataset +from unittest.mock import patch +from neuralpredictors.data.transforms import ToTensor from torch_geometric.loader import DataLoader -from unittest.mock import patch - import propose.datasets.rat7m.transforms as tr -from neuralpredictors.data.transforms import ToTensor +from propose.datasets.human36m.Human36mDataset import Human36mDataset # @patch('propose.datasets.human36m.Human36mDataset.Human36mPose') # def test_works_with_data_loader(): diff --git a/tests/datasets/human36m/human36m_loaders_test.py b/tests/datasets/human36m/human36m_loaders_test.py index 5547c50..8df8385 100644 --- a/tests/datasets/human36m/human36m_loaders_test.py +++ b/tests/datasets/human36m/human36m_loaders_test.py @@ -1,11 +1,10 @@ -from propose.datasets.human36m.loaders import load_poses # , pickle_poses - -from unittest.mock import patch, MagicMock +from pathlib import Path from unittest import TestCase +from unittest.mock import MagicMock, patch import numpy as np -from pathlib import Path +from propose.datasets.human36m.loaders import load_poses # , pickle_poses class TestHuman36MPoseLoader(TestCase): diff --git a/tests/datasets/rat7m/Rat7mDataset_test.py b/tests/datasets/rat7m/Rat7mDataset_test.py index 9ff9b4e..d8a8223 100644 --- a/tests/datasets/rat7m/Rat7mDataset_test.py +++ b/tests/datasets/rat7m/Rat7mDataset_test.py @@ -1,11 +1,10 @@ -from propose.datasets.rat7m.Rat7mDataset import Rat7mDataset -from propose.poses.rat7m import Rat7mPose +from unittest.mock import MagicMock, call, patch +import numpy as np from neuralpredictors.data.datasets.base import TransformDataset -from unittest.mock import MagicMock, patch, call - -import numpy as np +from propose.datasets.rat7m.Rat7mDataset import Rat7mDataset +from propose.poses.rat7m import Rat7mPose @patch("propose.datasets.rat7m.Rat7mDataset.Rat7mPose") diff --git a/tests/datasets/rat7m/rat7m_loaders_test.py b/tests/datasets/rat7m/rat7m_loaders_test.py index 11d6889..1a8ba95 100644 --- a/tests/datasets/rat7m/rat7m_loaders_test.py +++ b/tests/datasets/rat7m/rat7m_loaders_test.py @@ -1,20 +1,18 @@ +from unittest.mock import MagicMock, patch + +import numpy as np +from neuralpredictors.data.transforms import ScaleInputs, ToTensor +from torch.utils.data import DataLoader + +import propose.datasets.rat7m.transforms as tr from propose.cameras import Camera -from propose.poses import Rat7mPose from propose.datasets.rat7m.loaders import ( - load_mocap, load_cameras, - temporal_split_dataset, + load_mocap, static_loader, + temporal_split_dataset, ) - -import propose.datasets.rat7m.transforms as tr -from neuralpredictors.data.transforms import ScaleInputs, ToTensor - -from unittest.mock import MagicMock, patch - -from torch.utils.data import DataLoader - -import numpy as np +from propose.poses import Rat7mPose path = "./tests/mock/data/mocap-mock.mat" diff --git a/tests/datasets/rat7m/transforms_test.py b/tests/datasets/rat7m/transforms_test.py index 294c25e..01fc125 100644 --- a/tests/datasets/rat7m/transforms_test.py +++ b/tests/datasets/rat7m/transforms_test.py @@ -1,12 +1,11 @@ -from propose.poses import Rat7mPose -import propose.datasets.rat7m.transforms as tr -from tests.mock.cameras import create_mock_camera - -from unittest.mock import MagicMock, patch - from collections import namedtuple +from unittest.mock import MagicMock, patch import numpy as np +from tests.mock.cameras import create_mock_camera + +import propose.datasets.rat7m.transforms as tr +from propose.poses import Rat7mPose @patch("propose.datasets.rat7m.transforms.pp") diff --git a/tests/evaluation/mpjpe_test.py b/tests/evaluation/mpjpe_test.py index d824a4d..0e60309 100644 --- a/tests/evaluation/mpjpe_test.py +++ b/tests/evaluation/mpjpe_test.py @@ -1,10 +1,10 @@ -from propose.evaluation.mpjpe import mpjpe, pa_mpjpe -from propose.utils.reproducibility import set_random_seed +from unittest import TestCase -import torch import numpy.testing as npt +import torch -from unittest import TestCase +from propose.evaluation.mpjpe import mpjpe, pa_mpjpe +from propose.utils.reproducibility import set_random_seed class MPJPETests(TestCase): diff --git a/tests/evaluation/pck_test.py b/tests/evaluation/pck_test.py index 12d4c15..2e7b437 100644 --- a/tests/evaluation/pck_test.py +++ b/tests/evaluation/pck_test.py @@ -1,7 +1,7 @@ import unittest -import torch import numpy.testing as npt +import torch from propose.evaluation.pck import pck from propose.utils.reproducibility import set_random_seed diff --git a/tests/mock/cameras.py b/tests/mock/cameras.py index 3d76323..326429d 100644 --- a/tests/mock/cameras.py +++ b/tests/mock/cameras.py @@ -1,4 +1,5 @@ import numpy as np + from propose.cameras import Camera diff --git a/tests/models/detectors/hrnet_test.py b/tests/models/detectors/hrnet_test.py index c1be4ea..a5f4c09 100644 --- a/tests/models/detectors/hrnet_test.py +++ b/tests/models/detectors/hrnet_test.py @@ -1,9 +1,8 @@ import unittest +from unittest.mock import MagicMock, patch from propose.models.detectors import HRNet -from unittest.mock import MagicMock, patch - class HRNetTests(unittest.TestCase): @patch("propose.models.detectors.hrnet.hrnet.wandb") diff --git a/tests/models/distributions/StandardNormal_test.py b/tests/models/distributions/StandardNormal_test.py index e9ac065..a692817 100644 --- a/tests/models/distributions/StandardNormal_test.py +++ b/tests/models/distributions/StandardNormal_test.py @@ -1,7 +1,7 @@ -from propose.models.distributions.StandardNormal import StandardNormal - -import numpy.testing as npt import numpy as np +import numpy.testing as npt + +from propose.models.distributions.StandardNormal import StandardNormal def test_StandardNormal_smoke(): diff --git a/tests/models/flows/GraphFlow_test.py b/tests/models/flows/GraphFlow_test.py index cbd79ff..4331474 100644 --- a/tests/models/flows/GraphFlow_test.py +++ b/tests/models/flows/GraphFlow_test.py @@ -1,19 +1,15 @@ from unittest import TestCase - from unittest.mock import MagicMock -from propose.models.flows.GraphFlow import GraphFlow -from propose.models.distributions import StandardNormal -from propose.models.transforms.transform import GraphCompositeTransform -from propose.models.nn.embedding import embeddings - - -from propose.datasets.toy.Point import SinglePointDataset, SinglePointPriorDataset - -from torch_geometric.loader import DataLoader - import torch import torch.testing as tt +from torch_geometric.loader import DataLoader + +from propose.datasets.toy.Point import SinglePointDataset, SinglePointPriorDataset +from propose.models.distributions import StandardNormal +from propose.models.flows.GraphFlow import GraphFlow +from propose.models.nn.embedding import embeddings +from propose.models.transforms.transform import GraphCompositeTransform class TestGraphFlow(TestCase): diff --git a/tests/models/layers/CondGCN_test.py b/tests/models/layers/CondGCN_test.py index 7eddb25..d1850f2 100644 --- a/tests/models/layers/CondGCN_test.py +++ b/tests/models/layers/CondGCN_test.py @@ -1,11 +1,10 @@ -from propose.models.layers.CondGCN import CondGCN +import types +from unittest import TestCase import torch import torch.testing as tt -import types - -from unittest import TestCase +from propose.models.layers.CondGCN import CondGCN class CondGCNTests(TestCase): diff --git a/tests/models/nn/CondGNN_test.py b/tests/models/nn/CondGNN_test.py index 67c8752..0390467 100644 --- a/tests/models/nn/CondGNN_test.py +++ b/tests/models/nn/CondGNN_test.py @@ -1,13 +1,11 @@ -from propose.models.nn.CondGNN import CondGNN -from propose.models.layers.CondGCN import CondGCN - -from torch_geometric.data import HeteroData - -import torch.testing as tt +from unittest.mock import MagicMock, call, patch import torch +import torch.testing as tt +from torch_geometric.data import HeteroData -from unittest.mock import MagicMock, patch, call +from propose.models.layers.CondGCN import CondGCN +from propose.models.nn.CondGNN import CondGNN def test_smoke(): diff --git a/tests/models/nn/embedding_test.py b/tests/models/nn/embedding_test.py index 42ee2e6..265e9d7 100644 --- a/tests/models/nn/embedding_test.py +++ b/tests/models/nn/embedding_test.py @@ -1,14 +1,14 @@ +import torch +import torch.testing as tt + from propose.models.nn.embedding import ( Embedding, + JoinEmbedding, LinearEmbedding, SplitEmbedding, - JoinEmbedding, SplitLinearEmbedding, ) -import torch -import torch.testing as tt - def test_smoke(): Embedding() diff --git a/tests/poses/human36_pose_test.py b/tests/poses/human36_pose_test.py index fdac2e5..6fd46e9 100644 --- a/tests/poses/human36_pose_test.py +++ b/tests/poses/human36_pose_test.py @@ -1,7 +1,8 @@ -from propose.poses import Human36mPose +from unittest import TestCase + import numpy as np -from unittest import TestCase +from propose.poses import Human36mPose class Human36mPoseTest(TestCase): diff --git a/tests/poses/rat7m_pose_test.py b/tests/poses/rat7m_pose_test.py index fa0cd20..aa3056a 100644 --- a/tests/poses/rat7m_pose_test.py +++ b/tests/poses/rat7m_pose_test.py @@ -1,9 +1,11 @@ -from propose.poses import Rat7mPose, BasePose +from unittest import TestCase +from unittest.mock import Mock, call, patch + import numpy as np -from ..mock import create_mock_camera -from unittest.mock import Mock, patch, call -from unittest import TestCase +from propose.poses import BasePose, Rat7mPose + +from ..mock import create_mock_camera def test_pose_init(): diff --git a/tests/poses/utils_pose_test.py b/tests/poses/utils_pose_test.py index 48519b1..9cc93e8 100644 --- a/tests/poses/utils_pose_test.py +++ b/tests/poses/utils_pose_test.py @@ -1,8 +1,7 @@ -from propose.poses.utils import yaml_pose_loader, load_data_ids - +import os from unittest import TestCase -import os +from propose.poses.utils import load_data_ids, yaml_pose_loader dirname = os.path.dirname(__file__) path = os.path.join(dirname, "../mock/data/mock_pose.yaml") diff --git a/tests/poses/yaml_pose_test.py b/tests/poses/yaml_pose_test.py index 0f4e4ac..1c4a4ff 100644 --- a/tests/poses/yaml_pose_test.py +++ b/tests/poses/yaml_pose_test.py @@ -1,10 +1,10 @@ -from propose.poses import YamlPose +import os +from unittest import TestCase + import numpy as np import numpy.testing as npt -from unittest import TestCase - -import os +from propose.poses import YamlPose dirname = os.path.dirname(__file__) path = os.path.join(dirname, "../mock/data/mock_pose.yaml") diff --git a/tests/preprocessing/image_test.py b/tests/preprocessing/image_test.py index cbf4f07..a353a8b 100644 --- a/tests/preprocessing/image_test.py +++ b/tests/preprocessing/image_test.py @@ -1,11 +1,10 @@ -import propose.preprocessing.image as pp - from unittest.mock import MagicMock -from propose.poses import Rat7mPose - import numpy as np +import propose.preprocessing.image as pp +from propose.poses import Rat7mPose + def test_square_crop_to_pose(): np.random.seed(1) diff --git a/tests/preprocessing/mask_test.py b/tests/preprocessing/mask_test.py index de86fc8..ae305aa 100644 --- a/tests/preprocessing/mask_test.py +++ b/tests/preprocessing/mask_test.py @@ -1,9 +1,8 @@ +import numpy as np from tests.mock.cameras import create_mock_camera -from propose.poses import Rat7mPose import propose.preprocessing.rat7m as pp - -import numpy as np +from propose.poses import Rat7mPose def test_mask_nans(): diff --git a/tests/preprocessing/pose_test.py b/tests/preprocessing/pose_test.py index c9d6451..f6dd397 100644 --- a/tests/preprocessing/pose_test.py +++ b/tests/preprocessing/pose_test.py @@ -1,11 +1,9 @@ -from propose.poses import Rat7mPose -import propose.preprocessing as pp - -from tests.mock.cameras import create_mock_camera - import numpy as np - from scipy.spatial.transform import Rotation as R +from tests.mock.cameras import create_mock_camera + +import propose.preprocessing as pp +from propose.poses import Rat7mPose def test_normalize_std(): diff --git a/tests/preprocessing/rat7m/rat7m_format_test.py b/tests/preprocessing/rat7m/rat7m_format_test.py index e954618..7306485 100644 --- a/tests/preprocessing/rat7m/rat7m_format_test.py +++ b/tests/preprocessing/rat7m/rat7m_format_test.py @@ -1,4 +1,5 @@ -from unittest.mock import MagicMock, patch, call +from unittest.mock import MagicMock, call, patch + import propose.preprocessing.rat7m.format as pp diff --git a/tests/preprocessing/rat7m/rat7m_masks_test.py b/tests/preprocessing/rat7m/rat7m_masks_test.py index ccf1e0b..bf68e9b 100644 --- a/tests/preprocessing/rat7m/rat7m_masks_test.py +++ b/tests/preprocessing/rat7m/rat7m_masks_test.py @@ -1,8 +1,8 @@ -from propose.poses import Rat7mPose -import propose.preprocessing.rat7m as pp - import numpy as np +import propose.preprocessing.rat7m as pp +from propose.poses import Rat7mPose + def test_mask_marker_failure(): np.random.seed(1) diff --git a/tests/preprocessing/rat7m/rat7m_pose_test.py b/tests/preprocessing/rat7m/rat7m_pose_test.py index 3a64150..b7fe4dc 100644 --- a/tests/preprocessing/rat7m/rat7m_pose_test.py +++ b/tests/preprocessing/rat7m/rat7m_pose_test.py @@ -1,8 +1,8 @@ -from propose.poses import Rat7mPose -import propose.preprocessing.rat7m as pp - import numpy as np +import propose.preprocessing.rat7m as pp +from propose.poses import Rat7mPose + def test_switch_arms_legs(): np.random.seed(1) diff --git a/tests/training/prior_test.py b/tests/training/prior_test.py index 9bedff6..9fa61f7 100644 --- a/tests/training/prior_test.py +++ b/tests/training/prior_test.py @@ -1,9 +1,10 @@ -from propose.models.flows import CondGraphFlow -from propose.training.prior import prior_trainer -from propose.datasets.toy import SinglePointDataset, ThreePointDataset +from unittest.mock import MagicMock, call from torch_geometric.loader import DataLoader -from unittest.mock import MagicMock, call + +from propose.datasets.toy import SinglePointDataset, ThreePointDataset +from propose.models.flows import CondGraphFlow +from propose.training.prior import prior_trainer def test_smoke_single(): diff --git a/tests/training/semi_supervised_test.py b/tests/training/semi_supervised_test.py index b8f3105..785250b 100644 --- a/tests/training/semi_supervised_test.py +++ b/tests/training/semi_supervised_test.py @@ -1,9 +1,10 @@ -from propose.models.flows import CondGraphFlow -from propose.training.semi_supervised import semi_supervised_trainer -from propose.datasets.toy import SinglePointDataset, ThreePointDataset +from unittest.mock import MagicMock, call from torch_geometric.loader import DataLoader -from unittest.mock import MagicMock, call + +from propose.datasets.toy import SinglePointDataset, ThreePointDataset +from propose.models.flows import CondGraphFlow +from propose.training.semi_supervised import semi_supervised_trainer def test_smoke_single(): diff --git a/tests/training/supervised_test.py b/tests/training/supervised_test.py index e8937da..cfb4da5 100644 --- a/tests/training/supervised_test.py +++ b/tests/training/supervised_test.py @@ -1,10 +1,11 @@ -from propose.models.flows import CondGraphFlow -from propose.training.supervised import supervised_trainer -from propose.datasets.toy import SinglePointDataset, ThreePointDataset +from unittest.mock import MagicMock, call -from torch_geometric.loader import DataLoader import torch -from unittest.mock import MagicMock, call +from torch_geometric.loader import DataLoader + +from propose.datasets.toy import SinglePointDataset, ThreePointDataset +from propose.models.flows import CondGraphFlow +from propose.training.supervised import supervised_trainer def test_smoke_single(): diff --git a/tests/training/utils_test.py b/tests/training/utils_test.py index d791255..a7bf723 100644 --- a/tests/training/utils_test.py +++ b/tests/training/utils_test.py @@ -1,8 +1,7 @@ -import torch - import numpy.testing as npt - +import torch from torch_geometric.data import HeteroData + from propose.training.utils import get_x_graph