-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain_disparity.py
executable file
·131 lines (103 loc) · 3.99 KB
/
train_disparity.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 21 15:13:48 2017
@author: li
please use "npy_save_database_5views.py" to produce training samples in ./npy_all_four_closest_corners/
"""
from __future__ import print_function
import sys
sys.path.insert(0,'./models')
sys.path.insert(0,'./custom')
import argparse
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import Custom_data_disparity as cd
from torchsample import transforms as tensor_tf
import glob
from custom_loss_r_weight_TH_5views import CustomLoss
from pure_conv import Disparity_Net
model_dir='./model_para/'
parser = argparse.ArgumentParser()
parser.add_argument('--imageSize', type=int,default=64)
parser.add_argument('--train_size', type=int,default=8)
parser.add_argument('--workers', type=int,default=8)
parser.add_argument('--input_ch', type=int,default=5)
parser.add_argument('--output_ch', type=int,default=1)
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--lr', type=float, default=0.00002, help='learning rate, default=0.0001')
parser.add_argument('--Model_pkl',default=model_dir+'Model_Disparity.pkl')
parser.add_argument('--train_label_dir',default='./npy_all_four_closest_corners/') #please use "npy_save_my_database_5views.py" to produce training samples
opt = parser.parse_args()
print(opt)
cudnn.benchmark = True
opt.cuda=True
def list_images1(folder, pattern='/*_sub_imgs', ext='npy'):
filenames = sorted(glob.glob(folder + pattern + '.' + ext))
return filenames
def list_images(folder, pattern='*'):
filenames = sorted(glob.glob(folder + pattern))
return filenames
train_label_feature_list=list_images1(opt.train_label_dir)
## data augmentation
data_transform1 = tensor_tf.Compose([
tensor_tf.RandomFlip(),
])
data_transform2 = tensor_tf.Compose([
tensor_tf.RandomFlip(),
])
affine_transform=tensor_tf.AffineCompose([
tensor_tf.RandomAffine(
rotation_range=10,
translation_range=None,
shear_range=10,
zoom_range=(0.9,1.1),
interp='bilinear',
lazy=False
)
]
)
affine_transform1=tensor_tf.RandomChoiceRotate([0,90,180,270])
affine_transform2=tensor_tf.RandomChoiceRotate([0,90,180,270])
train_set=cd.CustomDataset(train_label_feature_list,
data_transform1=data_transform1,
data_transform2=data_transform2,
affine_transform1=affine_transform1,
affine_transform2=affine_transform2
)
RdSpCrop = tensor_tf.RandomChoiceCompose([
tensor_tf.SpecialCrop((opt.imageSize, opt.imageSize),0)
])
trainloader = torch.utils.data.DataLoader(train_set, batch_size=opt.train_size,
shuffle=True, num_workers=opt.workers)
Model = Disparity_Net(opt)
# Model.load_state_dict(torch.load(opt.Model_pkl))
criterion = CustomLoss()
if opt.cuda:
Model.cuda()
criterion.cuda()
# setup optimizer
optimizer = optim.Adam(Model.parameters(), lr=opt.lr/30, betas=(0.9, 0.999))
for epoch in range(0,100000):
Model.train()
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
LF,mag,LF1,LF2 = data
batch_size = LF.size(0)
mag=Variable(mag.cuda())
LF=Variable(LF.cuda())
LF1=Variable(LF1.cuda())
LF2=Variable(LF2.cuda())
optimizer.zero_grad()
disparity = Model(LF)
loss = criterion(LF,disparity,mag) # gradient emphasized loss function
loss.backward()
optimizer.step()
running_loss += loss.data[0]
if i % 50 == 0 and i!=0:
print('[%d, %5d] train loss: %.5f' % (epoch, i, 2000*running_loss/50.))
running_loss = 0.0