-
Notifications
You must be signed in to change notification settings - Fork 7
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #349 from Visual-Behavior/dev
Dev
- Loading branch information
Showing
18 changed files
with
676 additions
and
74 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,37 +1,62 @@ | ||
# tagged aloception-oss:cuda-11.3.1-pytorch1.10.1-lightning1.4.1 | ||
|
||
FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04 | ||
#FROM nvidia/cuda:11.6.0-cudnn8-devel-ubuntu20.04 | ||
|
||
#ARG py=3.9 | ||
#ARG pytorch=2.1.0.dev20230313+cu117 | ||
#ARG torchvision=0.15.0.dev20230313+cu117 | ||
#ARG torchaudio=2.0.0.dev20230313+cu117 | ||
#ARG pytorch_lightning=1.9.3 | ||
#ARG pycyda=11.7 | ||
ARG py=3.9 | ||
ARG pytorch=1.13.1 | ||
ARG torchvision=0.14.1 | ||
ARG torchaudio=0.13.1 | ||
ARG pytorch_lightning=1.9.0 | ||
ARG pytorch_lightning=1.9.3 | ||
ARG pycyda=11.7 | ||
|
||
|
||
ARG HOME=/home/aloception | ||
|
||
ENV TZ=Europe/Paris | ||
ENV DEBIAN_FRONTEND=noninteractive | ||
|
||
RUN apt-get update | ||
RUN apt-get -y update; apt-get -y install sudo | ||
|
||
RUN apt-get install -y build-essential nano git wget libgl1-mesa-glx | ||
|
||
# Usefull for scipy | ||
RUN apt-get install -y gfortran | ||
# required for aloscene | ||
RUN apt-get install -y libglib2.0-0 | ||
# Usefull for scipy / required for aloscene | ||
RUN apt-get install -y gfortran libglib2.0-0 | ||
|
||
# Create aloception user | ||
RUN useradd --create-home --uid 1000 --shell /bin/bash aloception && usermod -aG sudo aloception && echo "aloception ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers | ||
|
||
ENV HOME /home/aloception | ||
WORKDIR /home/aloception | ||
|
||
RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh | ||
RUN bash Miniconda3-latest-Linux-x86_64.sh -b -p /miniconda | ||
ENV PATH=$PATH:/miniconda/condabin:/miniconda/bin | ||
|
||
RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O /tmp/miniconda.sh && \ | ||
/bin/bash /tmp/miniconda.sh -b -p /opt/miniconda && \ | ||
rm /tmp/miniconda.sh | ||
ENV CONDA_HOME /opt/miniconda | ||
ENV PATH ${CONDA_HOME}/condabin:${CONDA_HOME}/bin:${PATH} | ||
RUN /bin/bash -c "source activate base" | ||
ENV HOME /workspace | ||
WORKDIR /workspace | ||
|
||
# The following so that any user can install packages inside this Image | ||
RUN chmod -R o+w /opt/miniconda && chmod -R o+w /home/aloception | ||
|
||
USER aloception | ||
|
||
# Pytorch & pytorch litning | ||
#RUN conda install py pytorch-cuda=${pycuda} -c pytorch -c nvidia | ||
#RUN pip install --pre torch==${pytorch} torchvision==${torchvision} torchaudio==${torchaudio} --index-url https://download.pytorch.org/whl/nightly/cu117 | ||
#RUN pip install pytorch_lightning==${pytorch_lightning} | ||
RUN conda install pytorch==${pytorch} torchvision==${torchvision} torchaudio==${torchaudio} pytorch-cuda=${pycuda} -c pytorch -c nvidia | ||
RUN pip install pytorch_lightning==${pytorch_lightning} | ||
|
||
COPY requirements-torch1.13.1.txt /install/requirements-torch1.13.1.txt | ||
RUN pip install -r /install/requirements-torch1.13.1.txt | ||
|
||
COPY --chown=aloception:aloception requirements/requirements-torch1.13.1.txt /home/aloception/install/requirements-torch1.13.1.txt | ||
RUN pip install -r /home/aloception/install/requirements-torch1.13.1.txt | ||
COPY --chown=aloception:aloception ./aloscene/utils /home/aloception/install/utils | ||
|
||
USER root | ||
COPY entrypoint.sh /entrypoint.sh | ||
ENTRYPOINT ["/entrypoint.sh"] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
Empty file.
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,177 @@ | ||
import pytorch_lightning as pl | ||
import alonet | ||
import torch | ||
from torch.utils.data.sampler import RandomSampler, SequentialSampler | ||
|
||
|
||
class BaseDataModule(pl.LightningDataModule): | ||
""" | ||
Base class for all data modules. | ||
""" | ||
|
||
def __init__( | ||
self, args, **kwargs, | ||
): | ||
super().__init__() | ||
alonet.common.pl_helpers.params_update(self, args, kwargs) | ||
|
||
@staticmethod | ||
def add_argparse_args(parent_parser): | ||
parser = parent_parser.add_argument_group("BaseDataModule") | ||
parser.add_argument("--batch_size", type=int, default=5, help="Batch size (Default: %(default)s)") | ||
parser.add_argument( | ||
"--num_workers", type=int, default=8, help="num_workers to use on the dataset (Default: %(default)s)" | ||
) | ||
parser.add_argument("--sequential_sampler", action="store_true", help="sample data sequentially (no shuffle)") | ||
parser.add_argument( | ||
"--sample", action="store_true", help="Download a sample for train/val process (Default: %(default)s)" | ||
) | ||
parser.add_argument("--train_on_val", action="store_true", help="Train on validation set (Default: %(default)s)") | ||
|
||
parser.add_argument("--no_aug", action="store_true", help="Disable data augmentation (Default: %(default)s)") | ||
return parent_parser | ||
|
||
@property | ||
def train_dataset(self): | ||
if not hasattr(self, "_train_dataset"): | ||
self.setup() | ||
return self._train_dataset | ||
|
||
@train_dataset.setter | ||
def train_dataset(self, new_dataset): | ||
self._train_dataset = new_dataset | ||
|
||
@property | ||
def val_dataset(self): | ||
if not hasattr(self, "_val_dataset"): | ||
self.setup() | ||
return self._val_dataset | ||
|
||
@val_dataset.setter | ||
def val_dataset(self, new_dataset): | ||
self._val_dataset = new_dataset | ||
|
||
@property | ||
def test_dataset(self): | ||
if not hasattr(self, "_test_dataset"): | ||
self.setup() | ||
return self._test_dataset | ||
|
||
@test_dataset.setter | ||
def test_dataset(self, new_dataset): | ||
self._test_dataset = new_dataset | ||
|
||
def train_transform(self, frames, **kwargs): | ||
""" | ||
A structure to select the train transform function. | ||
Parameters | ||
---------- | ||
frames : aloscene.Frame | ||
Input frames | ||
Returns | ||
------- | ||
aloscene.Frame | ||
""" | ||
if self.no_aug: | ||
return self._train_transform_no_aug(frames) | ||
else: | ||
return self._train_transform_aug(frames, **kwargs) | ||
|
||
def _train_transform_no_aug(self, frames): | ||
""" | ||
Train_transform with no data augmentation. | ||
Parameters | ||
---------- | ||
frames : aloscene.Frame | ||
Input frames | ||
Returns | ||
------- | ||
aloscene.Frame | ||
""" | ||
|
||
raise NotImplementedError("Should be implemented in child class.") | ||
|
||
def _train_transform_aug(self, frames): | ||
""" | ||
Train_transform with data augmentation. | ||
Parameters | ||
---------- | ||
frames : aloscene.Frame | ||
Input frames | ||
Returns | ||
------- | ||
aloscene.Frame | ||
""" | ||
|
||
raise NotImplementedError("Should be implemented in child class.") | ||
|
||
def val_transform(self, frames, **kwargs): | ||
""" | ||
Val transform. | ||
Parameters | ||
---------- | ||
frames : aloscene.Frame | ||
Input frames | ||
Returns | ||
------- | ||
aloscene.Frame | ||
""" | ||
|
||
raise NotImplementedError("Should be implemented in child class.") | ||
|
||
def setup(self, stage=None): | ||
""":attr:`train_dataset`, :attr:`val_dataset`, attr:`test_dataset` datasets setup | ||
Parameters | ||
---------- | ||
stage : str, optional | ||
Stage either `fit`, `validate`, `test` or `predict`, by default None""" | ||
|
||
raise NotImplementedError("Should be implemented in child class.") | ||
|
||
def train_dataloader(self, sampler: torch.utils.data = None): | ||
"""Get train dataloader | ||
Parameters | ||
---------- | ||
sampler : torch.utils.data, optional | ||
Sampler to load batches, by default None | ||
Returns | ||
------- | ||
torch.utils.data.DataLoader | ||
Dataloader for training process | ||
""" | ||
if sampler is None: | ||
sampler = RandomSampler if not self.sequential_sampler else SequentialSampler | ||
|
||
return self.train_dataset.train_loader(batch_size=self.batch_size, num_workers=self.num_workers, sampler=sampler) | ||
|
||
def val_dataloader(self, sampler: torch.utils.data = None): | ||
"""Get val dataloader | ||
Parameters | ||
---------- | ||
sampler : torch.utils.data, optional | ||
Sampler to load batches, by default None | ||
Returns | ||
------- | ||
torch.utils.data.DataLoader | ||
Dataloader for validation process | ||
""" | ||
if sampler is None: | ||
sampler = SequentialSampler | ||
|
||
return self.val_dataset.train_loader(batch_size=self.batch_size, num_workers=self.num_workers, sampler=sampler) | ||
|
||
def test_dataloader(self, sampler: torch.utils.data = None): | ||
"""Get test dataloader | ||
Parameters | ||
---------- | ||
sampler : torch.utils.data, optional | ||
Sampler to load batches, by default None | ||
Returns | ||
------- | ||
torch.utils.data.DataLoader | ||
Dataloader for inference process | ||
""" | ||
if sampler is None: | ||
sampler = SequentialSampler | ||
|
||
return self.test_dataset.train_loader(batch_size=self.batch_size, num_workers=self.num_workers, sampler=sampler) |
Oops, something went wrong.