diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..1393bbfe --- /dev/null +++ b/.flake8 @@ -0,0 +1,10 @@ +[flake8] +max_line_length = 99 +show_source = True +format = pylint +extend-ignore = E203,E501 +exclude = + .git + __pycache__ + logs/* + .vscode/* \ No newline at end of file diff --git a/.github/workflows/cicd.yaml b/.github/workflows/cicd.yaml index 7371105a..95cb134e 100644 --- a/.github/workflows/cicd.yaml +++ b/.github/workflows/cicd.yaml @@ -71,7 +71,7 @@ jobs: task.task_name=predict - name: Check code neatness (linter) - run: docker run myria3d flake8 + run: docker run myria3d python -m flake8 # Everything ran so we tag the valid docker image to keep it # This happens for push events, which are in particular diff --git a/CHANGELOG.md b/CHANGELOG.md index 189e2d73..0b1f6019 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # main +### 3.4.11 +- Unification of max length of lines (99) by applying black everywhere. + +### 3.4.10 +- Migrate from setup.cfg to pyproject.toml and .flake8. + ### 3.4.9 - Support edge-case where source LAZ has no valid subtile (i.e. pre_filter=False for all candidate subtiles) during hdf5 creation diff --git a/myria3d/callbacks/logging_callbacks.py b/myria3d/callbacks/logging_callbacks.py index 3e84eac0..aa983a54 100644 --- a/myria3d/callbacks/logging_callbacks.py +++ b/myria3d/callbacks/logging_callbacks.py @@ -127,7 +127,6 @@ def __init__( dist_sync_on_step: bool = False, process_group: Optional[Any] = None, ) -> None: - self.class_of_interest_idx = class_of_interest_idx super().__init__( diff --git a/myria3d/models/interpolation.py b/myria3d/models/interpolation.py index 1716358a..7ce062bd 100644 --- a/myria3d/models/interpolation.py +++ b/myria3d/models/interpolation.py @@ -47,7 +47,10 @@ def __init__( self.probas_to_save = probas_to_save # Maps ascending index (0,1,2,...) back to conventionnal LAS classification codes (6=buildings, etc.) - self.reverse_mapper: Dict[int, int] = {class_index: class_code for class_index, class_code in enumerate(classification_dict.keys())} + self.reverse_mapper: Dict[int, int] = { + class_index: class_code + for class_index, class_code in enumerate(classification_dict.keys()) + } self.logits: List[torch.Tensor] = [] self.idx_in_full_cloud_list: List[np.ndarray] = [] @@ -70,7 +73,9 @@ def load_full_las_for_update(self, src_las: str) -> np.ndarray: # Copy from Classification to preserve data type # Also preserves values of artefacts. if self.predicted_classification_channel != "Classification": - pipeline |= pdal.Filter.ferry(dimensions=f"Classification=>{self.predicted_classification_channel}") + pipeline |= pdal.Filter.ferry( + dimensions=f"Classification=>{self.predicted_classification_channel}" + ) if self.entropy_channel: pipeline |= pdal.Filter.ferry(dimensions=f"=>{self.entropy_channel}") @@ -166,7 +171,9 @@ def reduce_predictions_and_save(self, raw_path: str, output_dir: str) -> str: out_f = os.path.abspath(out_f) log.info(f"Updated LAS ({basename}) will be saved to: \n {output_dir}\n") log.info("Saving...") - pipeline = pdal.Writer.las(filename=out_f, extra_dims="all", minor_version=4, dataformat_id=8).pipeline(las) + pipeline = pdal.Writer.las( + filename=out_f, extra_dims="all", minor_version=4, dataformat_id=8 + ).pipeline(las) pipeline.execute() log.info("Saved.") diff --git a/myria3d/models/model.py b/myria3d/models/model.py index 0177f494..0f837f19 100755 --- a/myria3d/models/model.py +++ b/myria3d/models/model.py @@ -62,9 +62,7 @@ def __init__(self, **kwargs): # it also allows to access params with 'self.hparams' attribute self.save_hyperparameters() - neural_net_class = get_neural_net_class( - self.hparams.neural_net_class_name - ) + neural_net_class = get_neural_net_class(self.hparams.neural_net_class_name) self.model = neural_net_class(**self.hparams.neural_net_hparams) self.softmax = nn.Softmax(dim=1) @@ -100,9 +98,7 @@ def forward(self, batch: Batch) -> torch.Tensor: # During evaluation on test data and inference, we interpolate predictions back to original positions # KNN is way faster on CPU than on GPU by a 3 to 4 factor. logits = logits.cpu() - batch_y = self._get_batch_tensor_by_enumeration( - batch.idx_in_original_cloud - ) + batch_y = self._get_batch_tensor_by_enumeration(batch.idx_in_original_cloud) logits = knn_interpolate( logits.cpu(), batch.copies["pos_sampled_copy"].cpu(), @@ -139,9 +135,7 @@ def training_step(self, batch: Batch, batch_idx: int) -> dict: targets, logits = self.forward(batch) self.criterion = self.criterion.to(logits.device) loss = self.criterion(logits, targets) - self.log( - "train/loss", loss, on_step=True, on_epoch=True, prog_bar=False - ) + self.log("train/loss", loss, on_step=True, on_epoch=True, prog_bar=False) with torch.no_grad(): preds = torch.argmax(logits.detach(), dim=1) @@ -177,9 +171,7 @@ def validation_step(self, batch: Batch, batch_idx: int) -> dict: preds = torch.argmax(logits.detach(), dim=1) self.val_iou = self.val_iou.to(preds.device) self.val_iou(preds, targets) - self.log( - "val/iou", self.val_iou, on_step=True, on_epoch=True, prog_bar=True - ) + self.log("val/iou", self.val_iou, on_step=True, on_epoch=True, prog_bar=True) return {"loss": loss, "logits": logits, "targets": targets} def on_validation_epoch_end(self) -> None: @@ -257,15 +249,8 @@ def configure_optimizers(self): "monitor": self.hparams.monitor, } - def _get_batch_tensor_by_enumeration( - self, pos_x: torch.Tensor - ) -> torch.Tensor: + def _get_batch_tensor_by_enumeration(self, pos_x: torch.Tensor) -> torch.Tensor: """Get batch tensor (e.g. [0,0,1,1,2,2,...,B-1,B-1] ) from shape B,N,... to shape (N,...). """ - return torch.cat( - [ - torch.full((len(sample_pos),), i) - for i, sample_pos in enumerate(pos_x) - ] - ) + return torch.cat([torch.full((len(sample_pos),), i) for i, sample_pos in enumerate(pos_x)]) diff --git a/myria3d/models/modules/pyg_randla_net.py b/myria3d/models/modules/pyg_randla_net.py index 43fbd0c7..21b18094 100644 --- a/myria3d/models/modules/pyg_randla_net.py +++ b/myria3d/models/modules/pyg_randla_net.py @@ -49,9 +49,7 @@ def __init__( self.fp3 = FPModule(1, SharedMLP([256 + 128, 128])) self.fp2 = FPModule(1, SharedMLP([128 + 32, 32])) self.fp1 = FPModule(1, SharedMLP([32 + 32, d_bottleneck])) - self.mlp_classif = SharedMLP( - [d_bottleneck, 64, 32], dropout=[0.0, 0.5] - ) + self.mlp_classif = SharedMLP([d_bottleneck, 64, 32], dropout=[0.0, 0.5]) self.fc_classif = Linear(32, num_classes) def forward(self, x, pos, batch, ptr): @@ -117,9 +115,7 @@ class LocalFeatureAggregation(MessagePassing): def __init__(self, channels): super().__init__(aggr="add") self.mlp_encoder = SharedMLP([10, channels // 2]) - self.mlp_attention = SharedMLP( - [channels, channels], bias=False, act=None, norm=None - ) + self.mlp_attention = SharedMLP([channels, channels], bias=False, act=None, norm=None) self.mlp_post_attention = SharedMLP([channels, channels]) def forward(self, edge_index, x, pos): @@ -127,9 +123,7 @@ def forward(self, edge_index, x, pos): out = self.mlp_post_attention(out) # N, d_out return out - def message( - self, x_j: Tensor, pos_i: Tensor, pos_j: Tensor, index: Tensor - ) -> Tensor: + def message(self, x_j: Tensor, pos_i: Tensor, pos_j: Tensor, index: Tensor) -> Tensor: """Local Spatial Encoding (locSE) and attentive pooling of features. Args: @@ -146,13 +140,9 @@ def message( # Encode local neighboorhod structural information pos_diff = pos_j - pos_i distance = torch.sqrt((pos_diff * pos_diff).sum(1, keepdim=True)) - relative_infos = torch.cat( - [pos_i, pos_j, pos_diff, distance], dim=1 - ) # N * K, d + relative_infos = torch.cat([pos_i, pos_j, pos_diff, distance], dim=1) # N * K, d local_spatial_encoding = self.mlp_encoder(relative_infos) # N * K, d - local_features = torch.cat( - [x_j, local_spatial_encoding], dim=1 - ) # N * K, 2d + local_features = torch.cat([x_j, local_spatial_encoding], dim=1) # N * K, 2d # Attention will weight the different features of x # along the neighborhood dimension. @@ -199,9 +189,7 @@ def forward(self, x, pos, batch): return x, pos, batch -def decimation_indices( - ptr: LongTensor, decimation_factor: Number -) -> Tuple[Tensor, LongTensor]: +def decimation_indices(ptr: LongTensor, decimation_factor: Number) -> Tuple[Tensor, LongTensor]: """Get indices which downsample each point cloud by a decimation factor. Decimation happens separately for each cloud to prevent emptying smaller @@ -225,21 +213,12 @@ def decimation_indices( batch_size = ptr.size(0) - 1 bincount = ptr[1:] - ptr[:-1] - decimated_bincount = torch.div( - bincount, decimation_factor, rounding_mode="floor" - ) + decimated_bincount = torch.div(bincount, decimation_factor, rounding_mode="floor") # Decimation should not empty clouds completely. - decimated_bincount = torch.max( - torch.ones_like(decimated_bincount), decimated_bincount - ) + decimated_bincount = torch.max(torch.ones_like(decimated_bincount), decimated_bincount) idx_decim = torch.cat( [ - ( - ptr[i] - + torch.randperm(bincount[i], device=ptr.device)[ - : decimated_bincount[i] - ] - ) + (ptr[i] + torch.randperm(bincount[i], device=ptr.device)[: decimated_bincount[i]]) for i in range(batch_size) ], dim=0, @@ -301,15 +280,9 @@ def main(): transform=transform, pre_transform=pre_transform, ) - test_dataset = ShapeNet( - path, category, split="test", pre_transform=pre_transform - ) - train_loader = DataLoader( - train_dataset, batch_size=12, shuffle=True, num_workers=6 - ) - test_loader = DataLoader( - test_dataset, batch_size=12, shuffle=False, num_workers=6 - ) + test_dataset = ShapeNet(path, category, split="test", pre_transform=pre_transform) + train_loader = DataLoader(train_dataset, batch_size=12, shuffle=True, num_workers=6) + test_loader = DataLoader(test_dataset, batch_size=12, shuffle=False, num_workers=6) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = PyGRandLANet(3, category_num_classes).to(device) diff --git a/myria3d/pctl/datamodule/hdf5.py b/myria3d/pctl/datamodule/hdf5.py index 3f559ca3..0248ef4d 100644 --- a/myria3d/pctl/datamodule/hdf5.py +++ b/myria3d/pctl/datamodule/hdf5.py @@ -64,13 +64,19 @@ def __init__( t = transforms self.preparation_train_transform: TRANSFORMS_LIST = t.get("preparations_train_list", []) self.preparation_eval_transform: TRANSFORMS_LIST = t.get("preparations_eval_list", []) - self.preparation_predict_transform: TRANSFORMS_LIST = t.get("preparations_predict_list", []) + self.preparation_predict_transform: TRANSFORMS_LIST = t.get( + "preparations_predict_list", [] + ) self.augmentation_transform: TRANSFORMS_LIST = t.get("augmentations_list", []) self.normalization_transform: TRANSFORMS_LIST = t.get("normalizations_list", []) @property def train_transform(self) -> CustomCompose: - return CustomCompose(self.preparation_train_transform + self.normalization_transform + self.augmentation_transform) + return CustomCompose( + self.preparation_train_transform + + self.normalization_transform + + self.augmentation_transform + ) @property def eval_transform(self) -> CustomCompose: @@ -85,9 +91,13 @@ def prepare_data(self, stage: Optional[str] = None): if stage in ["fit", "test"] or stage is None: if self.split_csv_path and self.data_dir: - las_paths_by_split_dict = get_las_paths_by_split_dict(self.data_dir, self.split_csv_path) + las_paths_by_split_dict = get_las_paths_by_split_dict( + self.data_dir, self.split_csv_path + ) else: - log.warning("cfg.data_dir and cfg.split_csv_path are both null. Precomputed HDF5 dataset is used.") + log.warning( + "cfg.data_dir and cfg.split_csv_path are both null. Precomputed HDF5 dataset is used." + ) las_paths_by_split_dict = None # Create the dataset in prepare_data, so that it is done one a single GPU. self.las_paths_by_split_dict = las_paths_by_split_dict diff --git a/myria3d/pctl/dataset/copc.py b/myria3d/pctl/dataset/copc.py index a823031d..ba005e83 100644 --- a/myria3d/pctl/dataset/copc.py +++ b/myria3d/pctl/dataset/copc.py @@ -36,20 +36,15 @@ def __init__( data_dir=None, add_original_index: bool = True, ): - if len(tiles_basenames) == 0: raise KeyError("Given list of files is empty") - processed_basenames = [ - b.replace(".las", ".copc.laz") for b in tiles_basenames - ] + processed_basenames = [b.replace(".las", ".copc.laz") for b in tiles_basenames] self.copc_paths = [osp.join(copc_dir, b) for b in processed_basenames] if data_dir: # CONVERSION TO COPC IF NEEDED - raw_paths = [ - find_file_in_dir(data_dir, b) for b in tiles_basenames - ] + raw_paths = [find_file_in_dir(data_dir, b) for b in tiles_basenames] try: # IndexError if no file is found in dir. [find_file_in_dir(copc_dir, b) for b in processed_basenames] @@ -75,7 +70,6 @@ def load_points(idx) -> np.ndarray: raise NotImplementedError() def __getitem__(self, idx): - points = self.load_points(idx) # filter if empty @@ -96,9 +90,7 @@ def __getitem__(self, idx): data = self.transform(data) # filter if empty - if data is None or ( - self.pre_filter is not None and self.pre_filter(data) - ): + if data is None or (self.pre_filter is not None and self.pre_filter(data)): return None return data @@ -245,9 +237,7 @@ def __init__( ) -def write_las_to_copc_laz( - las_path: str, copc_laz_path: str, add_original_index: bool = False -): +def write_las_to_copc_laz(las_path: str, copc_laz_path: str, add_original_index: bool = False): """Convert from LAS to COPC, for optimized later loading. Resulting data starts at 0 on x and y. diff --git a/myria3d/pctl/dataset/hdf5.py b/myria3d/pctl/dataset/hdf5.py index f9b1dbc6..b63960a8 100644 --- a/myria3d/pctl/dataset/hdf5.py +++ b/myria3d/pctl/dataset/hdf5.py @@ -74,7 +74,9 @@ def __init__( self._samples_hdf5_paths = None if not las_paths_by_split_dict: - log.warning("No las_paths_by_split_dict given, pre-computed HDF5 dataset is therefore used.") + log.warning( + "No las_paths_by_split_dict given, pre-computed HDF5 dataset is therefore used." + ) return # Add data for all LAS Files into a single hdf5 file. @@ -168,7 +170,9 @@ def samples_hdf5_paths(self): # Load as variable if already indexed in hdf5 file. Need to decode b-string. with h5py.File(self.hdf5_file_path, "r") as hdf5_file: if "samples_hdf5_paths" in hdf5_file: - self._samples_hdf5_paths = [sample_path.decode("utf-8") for sample_path in hdf5_file["samples_hdf5_paths"]] + self._samples_hdf5_paths = [ + sample_path.decode("utf-8") for sample_path in hdf5_file["samples_hdf5_paths"] + ] return self._samples_hdf5_paths # Otherwise, index samples, and add the index as an attribute to the HDF5 file. @@ -203,7 +207,6 @@ def create_hdf5( subtile_overlap_train: Number = 0, points_pre_transform: Callable = lidar_hd_pre_transform, ): - """Create a HDF5 dataset file from las. Args: @@ -227,20 +230,24 @@ def create_hdf5( if split not in f: f.create_group(split) for las_path in tqdm(las_paths, desc=f"Preparing {split} set..."): - basename = os.path.basename(las_path) # Delete dataset for incomplete LAS entry, to start from scratch. # Useful in case data preparation was interrupted. with h5py.File(hdf5_file_path, "a") as hdf5_file: - if basename in hdf5_file[split] and "is_complete" not in hdf5_file[split][basename].attrs: + if ( + basename in hdf5_file[split] + and "is_complete" not in hdf5_file[split][basename].attrs + ): del hdf5_file[split][basename] # Parse and add subtiles to split group. with h5py.File(hdf5_file_path, "a") as hdf5_file: if basename in hdf5_file[split]: continue - subtile_overlap = subtile_overlap_train if split == "train" else 0 # No overlap at eval time. + subtile_overlap = ( + subtile_overlap_train if split == "train" else 0 + ) # No overlap at eval time. for sample_number, (sample_idx, sample_points) in enumerate( split_cloud_into_samples( las_path, @@ -264,7 +271,9 @@ def create_hdf5( dtype="f", data=data.x, ) - hdf5_file[hd5f_path_x].attrs["x_features_names"] = copy.deepcopy(data.x_features_names) + hdf5_file[hd5f_path_x].attrs["x_features_names"] = copy.deepcopy( + data.x_features_names + ) hdf5_file.create_dataset( os.path.join(hdf5_path, "pos"), data.pos.shape, diff --git a/myria3d/pctl/dataset/iterable.py b/myria3d/pctl/dataset/iterable.py index a6e6ea0f..6abdda9d 100644 --- a/myria3d/pctl/dataset/iterable.py +++ b/myria3d/pctl/dataset/iterable.py @@ -53,7 +53,9 @@ def get_iterator(self): ): sample_data = self.points_pre_transform(sample_points) sample_data["x"] = torch.from_numpy(sample_data["x"]) - sample_data["y"] = torch.LongTensor(sample_data["y"]) # Need input classification for DropPointsByClass + sample_data["y"] = torch.LongTensor( + sample_data["y"] + ) # Need input classification for DropPointsByClass sample_data["pos"] = torch.from_numpy(sample_data["pos"]) # for final interpolation - should be kept as a np.ndarray to be batched as a list later. sample_data["idx_in_original_cloud"] = idx_in_original_cloud diff --git a/myria3d/pctl/points_pre_transform/lidar_hd.py b/myria3d/pctl/points_pre_transform/lidar_hd.py index 922dfc27..dcd7e4ad 100644 --- a/myria3d/pctl/points_pre_transform/lidar_hd.py +++ b/myria3d/pctl/points_pre_transform/lidar_hd.py @@ -19,15 +19,11 @@ def lidar_hd_pre_transform(points): """ # Positions and base features - pos = np.asarray( - [points["X"], points["Y"], points["Z"]], dtype=np.float32 - ).transpose() + pos = np.asarray([points["X"], points["Y"], points["Z"]], dtype=np.float32).transpose() # normalization occluded_points = points["ReturnNumber"] > 1 - points["ReturnNumber"] = (points["ReturnNumber"]) / ( - RETURN_NUMBER_NORMALIZATION_MAX_VALUE - ) + points["ReturnNumber"] = (points["ReturnNumber"]) / (RETURN_NUMBER_NORMALIZATION_MAX_VALUE) points["NumberOfReturns"] = (points["NumberOfReturns"]) / ( RETURN_NUMBER_NORMALIZATION_MAX_VALUE ) @@ -46,9 +42,7 @@ def lidar_hd_pre_transform(points): ) # NDVI - ndvi = (points["Infrared"] - points["Red"]) / ( - points["Infrared"] + points["Red"] + 10**-6 - ) + ndvi = (points["Infrared"] - points["Red"]) / (points["Infrared"] + points["Red"] + 10**-6) # todo x = np.stack( diff --git a/myria3d/pctl/transforms/transforms.py b/myria3d/pctl/transforms/transforms.py index 38852322..22034d60 100755 --- a/myria3d/pctl/transforms/transforms.py +++ b/myria3d/pctl/transforms/transforms.py @@ -216,7 +216,10 @@ def _set_preprocessing_mapper(self, classification_preprocessing_dict): def _set_mapper(self, classification_dict): """Set mapper from source classification code to consecutive integers.""" - d = {class_code: class_index for class_index, class_code in enumerate(classification_dict.keys())} + d = { + class_code: class_index + for class_index, class_code in enumerate(classification_dict.keys()) + } # Here we update the dict so that code 65 remains unchanged. # Indeed, 65 is reserved for noise/artefacts points, that will be deleted by transform "DropPointsByClass". d.update({65: 65}) diff --git a/myria3d/predict.py b/myria3d/predict.py index 160238dc..ff43956e 100644 --- a/myria3d/predict.py +++ b/myria3d/predict.py @@ -56,7 +56,9 @@ def predict(config: DictConfig) -> str: interpolation_k=config.predict.interpolator.interpolation_k, classification_dict=config.dataset_description.get("classification_dict"), probas_to_save=config.predict.interpolator.probas_to_save, - predicted_classification_channel=config.predict.interpolator.get("predicted_classification_channel", "PredictedClassification"), + predicted_classification_channel=config.predict.interpolator.get( + "predicted_classification_channel", "PredictedClassification" + ), entropy_channel=config.predict.interpolator.get("entropy_channel", "entropy"), ) @@ -65,7 +67,5 @@ def predict(config: DictConfig) -> str: logits = model.predict_step(batch)["logits"] itp.store_predictions(logits, batch.idx_in_original_cloud) - out_f = itp.reduce_predictions_and_save( - config.predict.src_las, config.predict.output_dir - ) + out_f = itp.reduce_predictions_and_save(config.predict.src_las, config.predict.output_dir) return out_f diff --git a/myria3d/train.py b/myria3d/train.py index 50895827..e364990e 100755 --- a/myria3d/train.py +++ b/myria3d/train.py @@ -2,7 +2,9 @@ # It is safer to import comet before all other imports. import comet_ml # noqa except ImportError: - print("Warning: package comet_ml not found. This may break things if you use a comet callback.") + print( + "Warning: package comet_ml not found. This may break things if you use a comet callback." + ) import copy import os @@ -146,7 +148,9 @@ def train(config: DictConfig) -> Trainer: if task_name in [TASK_NAMES.FIT.value, TASK_NAMES.TEST.value]: log.info("Starting testing!") if trainer.checkpoint_callback.best_model_path: - log.info(f"Test will use just-trained best model checkpointed at \n {trainer.checkpoint_callback.best_model_path}") + log.info( + f"Test will use just-trained best model checkpointed at \n {trainer.checkpoint_callback.best_model_path}" + ) config.model.ckpt_path = trainer.checkpoint_callback.best_model_path log.info(f"Test will use specified model checkpointed at \n {config.model.ckpt_path}") trainer.test(model=model, datamodule=datamodule, ckpt_path=config.model.ckpt_path) @@ -157,7 +161,9 @@ def train(config: DictConfig) -> Trainer: # Instantiates the Model but overwrites everything with current config, # except module related params (nnet architecture) kwargs_to_override = copy.deepcopy(model.hparams) - kwargs_to_override.pop(NEURAL_NET_ARCHITECTURE_CONFIG_GROUP, None) # removes that key if it's there + kwargs_to_override.pop( + NEURAL_NET_ARCHITECTURE_CONFIG_GROUP, None + ) # removes that key if it's there model = Model.load_from_checkpoint(config.model.ckpt_path, **kwargs_to_override) trainer.fit(model=model, datamodule=datamodule, ckpt_path=None) log.info(f"Best checkpoint:\n{trainer.checkpoint_callback.best_model_path}") diff --git a/package_metadata.yaml b/package_metadata.yaml index 4898c136..807c8bee 100644 --- a/package_metadata.yaml +++ b/package_metadata.yaml @@ -1,4 +1,4 @@ -__version__: "3.4.9" +__version__: "3.4.11" __name__: "myria3d" __url__: "https://github.com/IGNF/myria3d" __description__: "Deep Learning for the Semantic Segmentation of Aerial Lidar Point Clouds" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..3df4ef7e --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,39 @@ +[tool.black] +line-length = 99 +target-version = ['py39'] +include = '\.pyi?$' +# 'extend-exclude' excludes files or directories in addition to the defaults +extend-exclude = ''' +# A regex preceded with ^/ will apply only to files and directories +# in the root of the project. +( + ^/foo.py # exclude a file named foo.py in the root of the project + | .*_pb2.py # exclude autogenerated Protocol Buffer files anywhere in the project +) +''' + +[tool.isort] +profile="black" +src_paths="myria3d,test" + +[tool.pytest.ini_options] +log_cli = true +testpaths = [ + "tests/myria3d/", +] +# Add coverage +addopts = "--cov ./myria3d/ --cov-report html --cov-fail-under 69 --cov-config pyproject.toml" + +filterwarnings = [ + "ignore::DeprecationWarning", + "ignore::UserWarning" +] +markers = [ + "slow: marks tests as slow (deselect with '-m \"not slow\"')", +] + +[tool.coverage.run] +branch = true + +[tool.coverage.report] +show_missing = true diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 30e36eb2..00000000 --- a/setup.cfg +++ /dev/null @@ -1,49 +0,0 @@ -[metadata] -project_name = "Myria3D" -author = "Charles GAYDON" -contact = "charles.gaydon@gmail.com" -license_file = LICENSE -description_file = README.md -project_template = https://github.com/ashleve/lightning-hydra-template - -[isort] -line_length = 99 -profile = black -filter_files = True - -[flake8] -max_line_length = 140 -show_source = True -format = pylint -extend-ignore = E203 -exclude = - .git - __pycache__ - logs/* - .vscode/* - -[tool:pytest] -python_files = tests/* -log_cli = True -addopts = - # Always use coverage when running pytest - --cov "./myria3d/" - # Percentage under which coverage is not reached - --cov-fail-under 69 - --cov-report html - # Needed for pytest to have access to it even though hydra changes current working directory - # and we run subprocess in some tests. - # see https://github.com/nedbat/coveragepy/issues/512#issuecomment-399707938 - --cov-config setup.cfg -markers = - slow: marks tests as slow (deselect with '-m "not slow"') -filterwarnings = - ignore::DeprecationWarning - ignore::UserWarning - -[coverage:run] -branch = True -# omit = file1 - -[coverage:report] -show_missing = True diff --git a/tests/myria3d/models/modules/test_randla_nets.py b/tests/myria3d/models/modules/test_randla_nets.py index 4b11b5b2..d128209f 100644 --- a/tests/myria3d/models/modules/test_randla_nets.py +++ b/tests/myria3d/models/modules/test_randla_nets.py @@ -5,9 +5,7 @@ from myria3d.models.modules.pyg_randla_net import PyGRandLANet -@pytest.mark.parametrize( - "num_nodes", [[12500, 12500], [50, 50], [12500, 10000]] -) +@pytest.mark.parametrize("num_nodes", [[12500, 12500], [50, 50], [12500, 10000]]) def test_fake_run_pyg_randlanet(num_nodes): """Documents expected data format and make a forward pass with PyG RandLa-Net. diff --git a/tests/myria3d/pctl/dataset/test_utils.py b/tests/myria3d/pctl/dataset/test_utils.py index 56a5991d..74205acf 100644 --- a/tests/myria3d/pctl/dataset/test_utils.py +++ b/tests/myria3d/pctl/dataset/test_utils.py @@ -9,9 +9,7 @@ zip([1000], [50], [25]), ) def test_get_mosaic_of_centers(tile_width, subtile_width, subtile_overlap): - mosaic = get_mosaic_of_centers( - tile_width, subtile_width, subtile_overlap=subtile_overlap - ) + mosaic = get_mosaic_of_centers(tile_width, subtile_width, subtile_overlap=subtile_overlap) for s in np.stack(mosaic).transpose(): assert min(s - subtile_width / 2) <= 0 assert max(s + subtile_width / 2) <= 1000