diff --git a/README.md b/README.md index a6bdba0..ec5bcc5 100644 --- a/README.md +++ b/README.md @@ -49,6 +49,35 @@ $ python -c "import JackFramework as jf; print(jf.version())" you can find the template project in: https://github.com/Archaic-Atom/FameworkTemplate +**Related Arguments for training or testing process** +| Args | Type | Description | Default | +|:-------------:|:-------:|:--------------------------------:|:---------------:| +| mode | [str] | train or test | train | +| gpu | [int] | the number of gpus | 2 | +| auto_save_num | [int] | the number of interval save | 1 | +| dataloaderNum | [int] | the number of dataloders | 8 | +| pretrain | [bool] | is a new traning process> | False | +| ip | [str] | used for distributed training | 127.0.0.1 | +| port | [str] | used for distributed training | 8086 | +| dist | [bool] | distributed training (DDP) | True | +| trainListPath | [str] | the list for training or testing | ./Datasets/*.csv| +| valListPath | [str] | the list for validate process | ./Datasets/*.csv| +| outputDir | [str] | the folder for log file | ./Result/ | +| modelDir | [str] | the folder for saving model | ./Checkpoint/ | +| resultImgDir | [str] | the folder for output | ./ResultImg/ | +| log | [str] | the folder for tensorboard | ./log/ | +| sampleNum | [int] | the number of sample for data | 1 | +| batchSize | [int] | batch size | 4 | +| lr | [float]| leanring rate | 0.001 | +| maxEpochs | [int] | training epoch | 30 | +| imgWidth | [int] | the croped width | 512 | +| imgHeight | [int] | the croped height | 256 | +| imgNum | [int] | the number of images for tranin | 35354 | +| valImgNum | [int] | the number of images for val | 200 | +| modelName | [str] | the model's name | NLCA-Net | +| dataset | [str] | the dataset's name | SceneFlow | + + **5) Clean the project (if you want to clean generating files)** ``` $ ./clean.sh diff --git a/Source/JackFramework/Evaluation/accuracy.py b/Source/JackFramework/Evaluation/accuracy.py index 05e05c4..a72d888 100644 --- a/Source/JackFramework/Evaluation/accuracy.py +++ b/Source/JackFramework/Evaluation/accuracy.py @@ -40,18 +40,18 @@ def r2_score(res: tensor, gt: tensor) -> tensor: gt_mean = torch.mean(gt) ss_tot = torch.sum((gt - gt_mean) ** 2) ss_res = torch.sum((gt - res) ** 2) - r2 = 1 - ss_res / ss_tot + r2 = ss_res / ss_tot return r2 @staticmethod - def rmse_score(res: tensor, gt: tensor) -> tensor: - return torch.sqrt(torch.mean((res - gt)**2)) + def rmspe_score(res: tensor, gt: tensor) -> tensor: + return torch.sqrt(torch.mean((res - gt)**2 / gt)) def debug_main(): pred = torch.rand(10, 600) gt = torch.rand(10, 600) - out = Accuracy.rmse_score(pred, gt) + out = Accuracy.r2_score(pred, gt) print(out) diff --git a/Source/JackFramework/NN/layer.py b/Source/JackFramework/NN/layer.py index 6b3e393..ec84abb 100644 --- a/Source/JackFramework/NN/layer.py +++ b/Source/JackFramework/NN/layer.py @@ -52,6 +52,25 @@ def norm_act_layer(layer: list, out_channels: int, return layer + @staticmethod + def conv_1d_layer(in_channels: int, out_channels: int, kernel_size: int, + stride: int = 1, padding: int = 1, dilation: int = 1, + bias: bool = False, norm: bool = True, + act: bool = True) -> object: + layer = [ + Ops.conv_1d( + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + bias=bias, + ) + ] + layer = Layer.norm_act_layer(layer, out_channels, norm, act) + return nn.Sequential(*layer) + @staticmethod def conv_2d_layer(in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, padding: int = 1, dilation: int = 1, diff --git a/Source/JackFramework/NN/ops.py b/Source/JackFramework/NN/ops.py index a37785e..2429374 100644 --- a/Source/JackFramework/NN/ops.py +++ b/Source/JackFramework/NN/ops.py @@ -15,6 +15,15 @@ def __new__(cls, *args: str, **kwargs: str) -> object: cls.__OPS = object.__new__(cls) return cls.__OPS + @staticmethod + def conv_1d(in_channels: int, out_channels: int, kernel_size: int, + stride: int = 1, padding: int = 0, dilation: int = 1, + groups: int = 1, bias: bool = False, + padding_mode: str = 'circular') -> object: + return nn.Conv1d(in_channels, out_channels, kernel_size, + stride, padding, dilation, groups, + bias, padding_mode) + @staticmethod def conv_2d(in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, padding: int = 0, dilation: int = 1,