forked from dmlc/cxxnet
-
Notifications
You must be signed in to change notification settings - Fork 1
/
bowl.conf
113 lines (99 loc) · 1.91 KB
/
bowl.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
# Configuration for ImageNet
# Acknowledgement:
# Ref: http://www.cs.toronto.edu/~fritz/absps/imagenet.pdf
# The scheduling parameters is adapted from Caffe(http://caffe.berkeleyvision.org/)
dev = cpu
data = train
iter = imgbin
image_list = "./tr.lst"
image_bin = "./tr.bin"
image_mean = "models/image_mean.bin"
rand_mirror=1
rand_crop=1
max_rotate_angle=180
max_aspect_ratio = 0.5
max_shear_ratio = 0.3
min_crop_size=32
max_crop_size=48
iter = threadbuffer
iter = end
eval = val
iter = imgbin
image_list = "./va.lst"
image_bin = "./va.bin"
image_mean = "models/image_mean.bin"
iter = threadbuffer
iter = end
netconfig=start
layer[+1] = conv
kernel_size = 4
stride = 1
nchannel = 48
pad = 2
layer[+1] = relu
layer[+1] = max_pooling
kernel_size = 3
stride = 2
###############
layer[+1] = conv
nchannel = 96
kernel_size = 3
stride = 1
pad = 1
layer[+1] = relu
layer[+1] = conv
nchannel = 96
kernel_size = 3
stride = 1
pad = 1
layer[+1] = relu
layer[+1] = max_pooling
kernel_size = 3
stride = 2
##############
layer[+1] = conv
nchannel = 128
kernel_size = 2
stride = 1
layer[+1] = relu
layer[+1] = conv
nchannel = 128
kernel_size = 3
stride = 1
layer[+1] = max_pooling
kernel_size = 3
stride = 2
layer[+1] = flatten
layer[+1] = fullc
nhidden = 256
layer[+0] = dropout
threshold = 0.5
layer[+1] = fullc
nhidden = 121
layer[+0] = softmax
netconfig=end
# evaluation metric
metric = error
dev = gpu:1
max_round = 100
num_round = 100
# input shape not including batch
input_shape = 3,40,40
batch_size = 64
# global parameters in any sectiion outside netconfig, and iter
momentum = 0.9
wmat:lr = 0.001
wmat:wd = 0.0005
bias:wd = 0.000
bias:lr = 0.002
# all the learning rate schedule starts with lr
lr:schedule = expdecay
lr:gamma = 0.1
lr:step = 20000
save_model=1
model_dir=models
print_step = 1
# random config
random_type = xavier
init_sigma = 0.01
# new line