forked from google-research/deeplab2
-
Notifications
You must be signed in to change notification settings - Fork 0
/
resnet50_os32_semseg.textproto
113 lines (112 loc) · 3.04 KB
/
resnet50_os32_semseg.textproto
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
# proto-file: deeplab2/config.proto
# proto-message: ExperimentOptions
#
# Panoptic-DeepLab with ResNet-50 and output stride 32.
#
############### PLEASE READ THIS BEFORE USING THIS CONFIG ###############
# Before using this config, you need to update the following fields:
# - experiment_name: Use a unique experiment name for each experiment.
# - initial_checkpoint: Update the path to the initial checkpoint.
# - train_dataset_options.file_pattern: Update the path to the
# training set. e.g., your_dataset/train*.tfrecord
# - eval_dataset_options.file_pattern: Update the path to the
# validation set, e.g., your_dataset/eval*.tfrecord
#########################################################################
#
# This config provides an example of training Panoptic-DeepLab with ONLY
# semantic segmentation (i.e., the instance/panoptic segmentation is not
# trained). This could be used for some datasets that provide only
# semantic segmentation annotations.
#
# For ResNet, see
# - Kaiming He, et al. "Deep Residual Learning for Image Recognition."
# In CVPR, 2016.
# For Panoptic-DeepLab, see
# - Bowen Cheng, et al. "Panoptic-DeepLab: A Simple, Strong, and Fast Baseline
# for Bottom-Up Panoptic Segmentation." In CVPR, 2020.
# Use a unique experiment_name for each experiment.
experiment_name: "${EXPERIMENT_NAME}"
model_options {
# Update the path to the initial checkpoint (e.g., ImageNet
# pretrained checkpoint).
initial_checkpoint: "${INIT_CHECKPOINT}"
backbone {
name: "resnet50"
output_stride: 32
}
decoder {
feature_key: "res5"
decoder_channels: 256
aspp_channels: 256
atrous_rates: 3
atrous_rates: 6
atrous_rates: 9
}
panoptic_deeplab {
low_level {
feature_key: "res3"
channels_project: 64
}
low_level {
feature_key: "res2"
channels_project: 32
}
instance {
enable: false
}
semantic_head {
output_channels: 19
head_channels: 256
}
}
}
trainer_options {
save_checkpoints_steps: 1000
save_summaries_steps: 100
steps_per_loop: 100
loss_options {
semantic_loss {
name: "softmax_cross_entropy"
weight: 1.0
top_k_percent: 0.2
}
}
solver_options {
base_learning_rate: 0.0005
training_number_of_steps: 60000
}
}
train_dataset_options {
dataset: "cityscapes_panoptic"
# Update the path to training set.
file_pattern: "${TRAIN_SET}"
# Adjust the batch_size accordingly to better fit your GPU/TPU memory.
# Also see Q1 in g3doc/faq.md.
batch_size: 8
crop_size: 1025
crop_size: 2049
# Skip resizing.
min_resize_value: 0
max_resize_value: 0
augmentations {
min_scale_factor: 0.5
max_scale_factor: 2.0
scale_factor_step_size: 0.1
}
}
eval_dataset_options {
dataset: "cityscapes_panoptic"
# Update the path to validation set.
file_pattern: "${VAL_SET}"
batch_size: 1
crop_size: 1025
crop_size: 2049
# Skip resizing.
min_resize_value: 0
max_resize_value: 0
}
evaluator_options {
continuous_eval_timeout: 43200
save_predictions: true
save_raw_predictions: false
}