-
Notifications
You must be signed in to change notification settings - Fork 3
/
train.py
executable file
·158 lines (128 loc) · 5.82 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
from __future__ import division
import os.path as osp
import sys
import argparse
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
# from torch.nn.parallel import DistributedDataParallel
from config import config
from dataloader import get_train_loader
from network import BiSeNet
from datasets import Cityscapes
from utils.init_func import init_weight, group_weight
from utils.pyt_utils import all_reduce_tensor
from engine.lr_policy import PolyLR
from engine.engine import Engine
from seg_opr.loss_opr import SigmoidFocalLoss, ProbOhemCrossEntropy2d
try:
from apex.parallel import DistributedDataParallel, SyncBatchNorm
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex .")
parser = argparse.ArgumentParser()
with Engine(custom_parser=parser) as engine:
args = parser.parse_args()
cudnn.benchmark = True
seed = config.seed
if engine.distributed:
seed = engine.local_rank
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
# data loader
train_loader, train_sampler = get_train_loader(engine, Cityscapes)
# config network and criterion
min_kept = int(config.batch_size // len(
engine.devices) * config.image_height * config.image_width // 16)
criterion = ProbOhemCrossEntropy2d(ignore_label=255, thresh=0.7,
min_kept=min_kept,
use_weight=False)
if engine.distributed:
BatchNorm2d = SyncBatchNorm
model = BiSeNet(config.num_classes, is_training=True,
criterion=criterion,
pretrained_model=config.pretrained_model,
norm_layer=BatchNorm2d)
init_weight(model.business_layer, nn.init.kaiming_normal_,
BatchNorm2d, config.bn_eps, config.bn_momentum,
mode='fan_in', nonlinearity='relu')
# group weight and config optimizer
base_lr = config.lr
# if engine.distributed:
# base_lr = config.lr * engine.world_size
params_list = []
params_list = group_weight(params_list, model.context_path,
BatchNorm2d, base_lr)
params_list = group_weight(params_list, model.spatial_path,
BatchNorm2d, base_lr * 10)
params_list = group_weight(params_list, model.global_context,
BatchNorm2d, base_lr * 10)
params_list = group_weight(params_list, model.arms,
BatchNorm2d, base_lr * 10)
params_list = group_weight(params_list, model.refines,
BatchNorm2d, base_lr * 10)
params_list = group_weight(params_list, model.heads,
BatchNorm2d, base_lr * 10)
params_list = group_weight(params_list, model.ffm,
BatchNorm2d, base_lr * 10)
optimizer = torch.optim.SGD(params_list,
lr=base_lr,
momentum=config.momentum,
weight_decay=config.weight_decay)
# config lr policy
total_iteration = config.nepochs * config.niters_per_epoch
lr_policy = PolyLR(base_lr, config.lr_power, total_iteration)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
if engine.distributed:
model = DistributedDataParallel(model)
engine.register_state(dataloader=train_loader, model=model,
optimizer=optimizer)
if engine.continue_state_object:
engine.restore_checkpoint()
model.train()
for epoch in range(engine.state.epoch, config.nepochs):
if engine.distributed:
train_sampler.set_epoch(epoch)
bar_format = '{desc}[{elapsed}<{remaining},{rate_fmt}]'
pbar = tqdm(range(config.niters_per_epoch), file=sys.stdout,
bar_format=bar_format)
dataloader = iter(train_loader)
for idx in pbar:
optimizer.zero_grad()
engine.update_iteration(epoch, idx)
minibatch = dataloader.next()
imgs = minibatch['data']
gts = minibatch['label']
imgs = imgs.cuda(non_blocking=True)
gts = gts.cuda(non_blocking=True)
loss = model(imgs, gts)
# reduce the whole loss over multi-gpu
if engine.distributed:
reduce_loss = all_reduce_tensor(loss,
world_size=engine.world_size)
current_idx = epoch * config.niters_per_epoch + idx
lr = lr_policy.get_lr(current_idx)
for i in range(2):
optimizer.param_groups[i]['lr'] = lr
for i in range(2, len(optimizer.param_groups)):
optimizer.param_groups[i]['lr'] = lr * 10
loss.backward()
optimizer.step()
print_str = 'Epoch{}/{}'.format(epoch, config.nepochs) \
+ ' Iter{}/{}:'.format(idx + 1, config.niters_per_epoch) \
+ ' lr=%.2e' % lr \
+ ' loss=%.2f' % reduce_loss.item()
pbar.set_description(print_str, refresh=False)
if (epoch > config.nepochs - 20) or (epoch % config.snapshot_iter == 0):
if engine.distributed and (engine.local_rank == 0):
engine.save_and_link_checkpoint(config.snapshot_dir,
config.log_dir,
config.log_dir_link)
elif not engine.distributed:
engine.save_and_link_checkpoint(config.snapshot_dir,
config.log_dir,
config.log_dir_link)