diff --git a/configs/classification/mobnetv2_1.0_1bx16_300e_cifar10.py b/configs/classification/mobnetv2_1.0_1bx16_300e_cifar10.py index ecf3f94c..3fa144f5 100644 --- a/configs/classification/mobnetv2_1.0_1bx16_300e_cifar10.py +++ b/configs/classification/mobnetv2_1.0_1bx16_300e_cifar10.py @@ -16,7 +16,7 @@ # optimizer lr = 0.01 -epochs = 100 +epochs = 300 model = dict( type='edgelab.ImageClassifier', @@ -93,7 +93,7 @@ type='MultiStepLR', begin=1, end=500, - milestones=[30, 60, 90], + milestones=[100, 200, 250], gamma=0.1, by_epoch=True, ), diff --git a/configs/classification/mobnetv2_1.0_1bx16_300e_custom.py b/configs/classification/mobnetv2_1.0_1bx16_300e_custom.py index 2736d363..53d54770 100644 --- a/configs/classification/mobnetv2_1.0_1bx16_300e_custom.py +++ b/configs/classification/mobnetv2_1.0_1bx16_300e_custom.py @@ -7,7 +7,7 @@ # dataset settings dataset_type = 'mmcls.CustomDataset' -data_root = 'datasets/digit' +data_root = '' height = 96 width = 96 batch_size = 32 @@ -16,7 +16,7 @@ # optimizer lr = 0.01 -epochs = 100 +epochs = 300 data_preprocessor = dict( type='mmcls.ClsDataPreprocessor', @@ -85,7 +85,7 @@ test_dataloader = val_dataloader # evaluator -val_evaluator = dict(type='mmcls.Accuracy', topk=(1, 5)) +val_evaluator = dict(type='mmcls.Accuracy', topk=1) test_evaluator = val_evaluator diff --git a/configs/classification/mobnetv2_1.0_rep_1bx16_300e_cifar10.py b/configs/classification/mobnetv2_1.0_rep_1bx16_300e_cifar10.py new file mode 100644 index 00000000..c92b6c92 --- /dev/null +++ b/configs/classification/mobnetv2_1.0_rep_1bx16_300e_cifar10.py @@ -0,0 +1,104 @@ +_base_ = '../_base_/default_runtime_cls.py' +default_scope = 'edgelab' +custom_imports = dict(imports=['edgelab'], allow_failed_imports=False) + +# model settings +num_classes = 10 + +# dataset settings +dataset_type = 'mmcls.CIFAR10' +data_root = 'datasets' +height = 32 +width = 32 +batch_size = 32 +workers = 8 +persistent_workers = True + +# optimizer +lr = 0.01 +epochs = 300 + +model = dict( + type='edgelab.ImageClassifier', + data_preprocessor=dict(type='mmdet.DetDataPreprocessor', mean=[0.0, 0.0, 0.0], std=[255.0, 255.0, 255.0]), + backbone=dict(type='MobileNetv2', widen_factor=1, rep=True), + neck=dict(type='mmcls.GlobalAveragePooling'), + head=dict( + type='mmcls.LinearClsHead', + in_channels=64, + num_classes=num_classes, + loss=dict(type='mmcls.CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + ), +) + +train_pipeline = [ + dict(type='mmengine.Resize', scale=(height, width)), + dict(type='mmcls.ColorJitter', brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2), + dict(type='mmcls.Rotate', angle=30.0, prob=0.6), + dict(type='mmcls.RandomFlip', prob=0.5, direction='horizontal'), + dict(type='mmcls.PackClsInputs'), +] + +test_pipeline = [ + dict(type='mmengine.Resize', scale=(height, width)), + dict(type='mmcls.PackClsInputs'), +] + +train_dataloader = dict( + # Training dataset configurations + batch_size=batch_size, + num_workers=workers, + persistent_workers=persistent_workers, + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix='cifar10/', + test_mode=False, + pipeline=train_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=batch_size, + num_workers=workers, + persistent_workers=persistent_workers, + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix='cifar10/', + test_mode=True, + pipeline=test_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=False), +) + +test_dataloader = val_dataloader + +# evaluator +val_evaluator = dict(type='mmcls.Accuracy', topk=(1, 5)) +test_evaluator = val_evaluator + + +val_cfg = dict() +test_cfg = dict() + +# optimizer +optim_wrapper = dict(optimizer=dict(type='SGD', lr=lr, momentum=0.9, weight_decay=0.0001)) +# learning policy +param_scheduler = [ + dict(type='LinearLR', begin=0, end=30, start_factor=0.001, by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=1, + end=500, + milestones=[100, 200, 250], + gamma=0.1, + by_epoch=True, + ), +] + +auto_scale_lr = dict(base_batch_size=batch_size) + +train_cfg = dict(by_epoch=True, max_epochs=epochs, val_interval=5) diff --git a/configs/classification/mobnetv2_1.0_rep_1bx16_300e_custom.py b/configs/classification/mobnetv2_1.0_rep_1bx16_300e_custom.py new file mode 100644 index 00000000..fdc92c7a --- /dev/null +++ b/configs/classification/mobnetv2_1.0_rep_1bx16_300e_custom.py @@ -0,0 +1,120 @@ +_base_ = '../_base_/default_runtime_cls.py' +default_scope = 'edgelab' +custom_imports = dict(imports=['edgelab'], allow_failed_imports=False) + +# model settings +num_classes = 7 + +gray = True +# dataset settings +dataset_type = 'mmcls.CustomDataset' +data_root = '' +height = 96 +width = 96 +batch_size = 32 +workers = 8 +persistent_workers = True + +# optimizer +lr = 0.05 +epochs = 300 + +data_preprocessor = dict( + type='mmcls.ClsDataPreprocessor', + mean=[0, 0, 0], + std=[255.0, 255.0, 255.0], + to_rgb=True, +) + +model = dict( + type='edgelab.ImageClassifier', + data_preprocessor=dict( + type='mmdet.DetDataPreprocessor', + mean=[0.0] if gray else [0.0, 0.0, 0.0], + std=[255.0] if gray else [255.0, 255.0, 255.0], + ), + backbone=dict(type='MobileNetv2', widen_factor=1.0, rep=True, gray_input=gray), + neck=dict(type='mmcls.GlobalAveragePooling'), + head=dict( + type='mmcls.LinearClsHead', + in_channels=128, + num_classes=num_classes, + loss=dict(type='mmcls.CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + ), +) + + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='mmengine.Resize', scale=(height, width)), + dict(type='mmcls.ColorJitter', brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2), + dict(type='mmcls.Rotate', angle=30.0, prob=0.6), + dict(type='mmcls.RandomFlip', prob=0.5, direction='horizontal'), + dict(type='mmcls.PackClsInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='mmengine.Resize', scale=(height, width)), + dict(type='mmcls.PackClsInputs'), +] +if gray: + train_pipeline.insert(-2, dict(type='Color2Gray', one_channel=True)) + test_pipeline.insert(-2, dict(type='Color2Gray', one_channel=True)) + +train_dataloader = dict( + # Training dataset configurations + batch_size=batch_size, + num_workers=workers, + persistent_workers=persistent_workers, + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix='train/', + pipeline=train_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=batch_size, + num_workers=workers, + persistent_workers=persistent_workers, + dataset=dict( + type=dataset_type, + data_root=data_root, + data_prefix='valid/', + pipeline=test_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=False), +) + +test_dataloader = val_dataloader + +# evaluator +val_evaluator = dict(type='mmcls.Accuracy', topk=1) +test_evaluator = val_evaluator + + +val_cfg = dict() +test_cfg = dict() + +# optimizer +optim_wrapper = dict(optimizer=dict(type='SGD', lr=lr, momentum=0.95, weight_decay=0.0005)) +# learning policy +param_scheduler = [ + dict(type='LinearLR', begin=0, end=30, start_factor=0.001, by_epoch=False), # warm-up + dict( + type='MultiStepLR', + begin=1, + end=500, + milestones=[100, 200, 250], + gamma=0.1, + by_epoch=True, + ), +] + +auto_scale_lr = dict(base_batch_size=batch_size) + +train_cfg = dict(by_epoch=True, max_epochs=epochs, val_interval=1)