Skip to content

Commit

Permalink
add mobilenet rep for custome
Browse files Browse the repository at this point in the history
  • Loading branch information
mjq2020 committed Aug 19, 2023
1 parent bd5d5ce commit 679aaa1
Show file tree
Hide file tree
Showing 4 changed files with 229 additions and 5 deletions.
4 changes: 2 additions & 2 deletions configs/classification/mobnetv2_1.0_1bx16_300e_cifar10.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

# optimizer
lr = 0.01
epochs = 100
epochs = 300

model = dict(
type='edgelab.ImageClassifier',
Expand Down Expand Up @@ -93,7 +93,7 @@
type='MultiStepLR',
begin=1,
end=500,
milestones=[30, 60, 90],
milestones=[100, 200, 250],
gamma=0.1,
by_epoch=True,
),
Expand Down
6 changes: 3 additions & 3 deletions configs/classification/mobnetv2_1.0_1bx16_300e_custom.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

# dataset settings
dataset_type = 'mmcls.CustomDataset'
data_root = 'datasets/digit'
data_root = ''
height = 96
width = 96
batch_size = 32
Expand All @@ -16,7 +16,7 @@

# optimizer
lr = 0.01
epochs = 100
epochs = 300

data_preprocessor = dict(
type='mmcls.ClsDataPreprocessor',
Expand Down Expand Up @@ -85,7 +85,7 @@
test_dataloader = val_dataloader

# evaluator
val_evaluator = dict(type='mmcls.Accuracy', topk=(1, 5))
val_evaluator = dict(type='mmcls.Accuracy', topk=1)
test_evaluator = val_evaluator


Expand Down
104 changes: 104 additions & 0 deletions configs/classification/mobnetv2_1.0_rep_1bx16_300e_cifar10.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
_base_ = '../_base_/default_runtime_cls.py'
default_scope = 'edgelab'
custom_imports = dict(imports=['edgelab'], allow_failed_imports=False)

# model settings
num_classes = 10

# dataset settings
dataset_type = 'mmcls.CIFAR10'
data_root = 'datasets'
height = 32
width = 32
batch_size = 32
workers = 8
persistent_workers = True

# optimizer
lr = 0.01
epochs = 300

model = dict(
type='edgelab.ImageClassifier',
data_preprocessor=dict(type='mmdet.DetDataPreprocessor', mean=[0.0, 0.0, 0.0], std=[255.0, 255.0, 255.0]),
backbone=dict(type='MobileNetv2', widen_factor=1, rep=True),
neck=dict(type='mmcls.GlobalAveragePooling'),
head=dict(
type='mmcls.LinearClsHead',
in_channels=64,
num_classes=num_classes,
loss=dict(type='mmcls.CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
),
)

train_pipeline = [
dict(type='mmengine.Resize', scale=(height, width)),
dict(type='mmcls.ColorJitter', brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2),
dict(type='mmcls.Rotate', angle=30.0, prob=0.6),
dict(type='mmcls.RandomFlip', prob=0.5, direction='horizontal'),
dict(type='mmcls.PackClsInputs'),
]

test_pipeline = [
dict(type='mmengine.Resize', scale=(height, width)),
dict(type='mmcls.PackClsInputs'),
]

train_dataloader = dict(
# Training dataset configurations
batch_size=batch_size,
num_workers=workers,
persistent_workers=persistent_workers,
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix='cifar10/',
test_mode=False,
pipeline=train_pipeline,
),
sampler=dict(type='DefaultSampler', shuffle=True),
)

val_dataloader = dict(
batch_size=batch_size,
num_workers=workers,
persistent_workers=persistent_workers,
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix='cifar10/',
test_mode=True,
pipeline=test_pipeline,
),
sampler=dict(type='DefaultSampler', shuffle=False),
)

test_dataloader = val_dataloader

# evaluator
val_evaluator = dict(type='mmcls.Accuracy', topk=(1, 5))
test_evaluator = val_evaluator


val_cfg = dict()
test_cfg = dict()

# optimizer
optim_wrapper = dict(optimizer=dict(type='SGD', lr=lr, momentum=0.9, weight_decay=0.0001))
# learning policy
param_scheduler = [
dict(type='LinearLR', begin=0, end=30, start_factor=0.001, by_epoch=False), # warm-up
dict(
type='MultiStepLR',
begin=1,
end=500,
milestones=[100, 200, 250],
gamma=0.1,
by_epoch=True,
),
]

auto_scale_lr = dict(base_batch_size=batch_size)

train_cfg = dict(by_epoch=True, max_epochs=epochs, val_interval=5)
120 changes: 120 additions & 0 deletions configs/classification/mobnetv2_1.0_rep_1bx16_300e_custom.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
_base_ = '../_base_/default_runtime_cls.py'
default_scope = 'edgelab'
custom_imports = dict(imports=['edgelab'], allow_failed_imports=False)

# model settings
num_classes = 7

gray = True
# dataset settings
dataset_type = 'mmcls.CustomDataset'
data_root = ''
height = 96
width = 96
batch_size = 32
workers = 8
persistent_workers = True

# optimizer
lr = 0.05
epochs = 300

data_preprocessor = dict(
type='mmcls.ClsDataPreprocessor',
mean=[0, 0, 0],
std=[255.0, 255.0, 255.0],
to_rgb=True,
)

model = dict(
type='edgelab.ImageClassifier',
data_preprocessor=dict(
type='mmdet.DetDataPreprocessor',
mean=[0.0] if gray else [0.0, 0.0, 0.0],
std=[255.0] if gray else [255.0, 255.0, 255.0],
),
backbone=dict(type='MobileNetv2', widen_factor=1.0, rep=True, gray_input=gray),
neck=dict(type='mmcls.GlobalAveragePooling'),
head=dict(
type='mmcls.LinearClsHead',
in_channels=128,
num_classes=num_classes,
loss=dict(type='mmcls.CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
),
)


train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmengine.Resize', scale=(height, width)),
dict(type='mmcls.ColorJitter', brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2),
dict(type='mmcls.Rotate', angle=30.0, prob=0.6),
dict(type='mmcls.RandomFlip', prob=0.5, direction='horizontal'),
dict(type='mmcls.PackClsInputs'),
]

test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='mmengine.Resize', scale=(height, width)),
dict(type='mmcls.PackClsInputs'),
]
if gray:
train_pipeline.insert(-2, dict(type='Color2Gray', one_channel=True))
test_pipeline.insert(-2, dict(type='Color2Gray', one_channel=True))

train_dataloader = dict(
# Training dataset configurations
batch_size=batch_size,
num_workers=workers,
persistent_workers=persistent_workers,
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix='train/',
pipeline=train_pipeline,
),
sampler=dict(type='DefaultSampler', shuffle=True),
)

val_dataloader = dict(
batch_size=batch_size,
num_workers=workers,
persistent_workers=persistent_workers,
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix='valid/',
pipeline=test_pipeline,
),
sampler=dict(type='DefaultSampler', shuffle=False),
)

test_dataloader = val_dataloader

# evaluator
val_evaluator = dict(type='mmcls.Accuracy', topk=1)
test_evaluator = val_evaluator


val_cfg = dict()
test_cfg = dict()

# optimizer
optim_wrapper = dict(optimizer=dict(type='SGD', lr=lr, momentum=0.95, weight_decay=0.0005))
# learning policy
param_scheduler = [
dict(type='LinearLR', begin=0, end=30, start_factor=0.001, by_epoch=False), # warm-up
dict(
type='MultiStepLR',
begin=1,
end=500,
milestones=[100, 200, 250],
gamma=0.1,
by_epoch=True,
),
]

auto_scale_lr = dict(base_batch_size=batch_size)

train_cfg = dict(by_epoch=True, max_epochs=epochs, val_interval=1)

0 comments on commit 679aaa1

Please sign in to comment.