-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy patharguments.py
306 lines (239 loc) · 10.4 KB
/
arguments.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
DEFAULT_IMAGES_KITTI = 57874
DEFAULT_ADAPTATION_BATCH_SIZE = 6
DEFAULT_SEG_BATCH_SIZE = 6
DEFAULT_BATCHES_PER_EPOCH = DEFAULT_IMAGES_KITTI // DEFAULT_ADAPTATION_BATCH_SIZE
class ArgumentsBase(object):
DESCRIPTION = 'UBNA Arguments'
def __init__(self):
self.ap = ArgumentParser(
description=self.DESCRIPTION,
formatter_class=ArgumentDefaultsHelpFormatter
)
def _harness_init_system(self):
self.ap.add_argument(
'--sys-cpu', default=False, action='store_true',
help='Disable GPU acceleration'
)
self.ap.add_argument(
'--sys-num-workers', type=int, default=3,
help='Number of worker processes to spawn per DataLoader'
)
self.ap.add_argument(
'--sys-best-effort-determinism', default=False, action='store_true',
help='Try and make some parts of the training/validation deterministic'
)
def _harness_init_model(self):
self.ap.add_argument(
'--model-type', type=str, default='resnet', choices=('resnet', 'vgg'),
help='Type of model, which determines the network architecture'
)
self.ap.add_argument(
'--model-num-layers', type=int, default=18, choices=(18, 34, 50, 101, 152),
help='Number of ResNet Layers in the adaptation and segmentation encoder'
)
self.ap.add_argument(
'--model-num-layers-vgg', type=int, default=16, choices=(11, 13, 16, 19),
help='Number of VGG Layers in the adaptation and segmentation encoder'
)
self.ap.add_argument(
'--experiment-class', type=str, default='iccv_experiments',
help='Folder containing the current experiment series'
)
self.ap.add_argument(
'--model-name', type=str, default='ubna',
help='A nickname for this model'
)
self.ap.add_argument(
'--model-load', type=str, default=None,
help='Load a model state from a state directory containing *.pth files'
)
self.ap.add_argument(
'--model-disable-lr-loading', default=False, action='store_true',
help='Do not load the training state but only the model weights'
)
def _harness_init_segmentation(self):
self.ap.add_argument(
'--segmentation-validation-resize-height', type=int, default=512,
help='Segmentation images are resized to this height prior to cropping'
)
self.ap.add_argument(
'--segmentation-validation-resize-width', type=int, default=1024,
help='Segmentation images are resized to this width prior to cropping'
)
self.ap.add_argument(
'--segmentation-validation-loaders', type=str, default='cityscapes_validation',
help='Comma separated list of segmentation dataset loaders from loaders/segmentation/validation.py to '
'use for validation'
)
self.ap.add_argument(
'--segmentation-validation-batch-size', type=int, default=1,
help='Batch size for segmentation validation'
)
self.ap.add_argument(
'--segmentation-eval-num-images', type=int, default=20,
help='Number of generated images to store to disk during evaluation'
)
self.ap.add_argument(
'--segmentation-eval-remaps', type=str, default='none',
help='Segmentation label remap modes for reduced number of classes, can be "none" (19 classes), '
'"synthia_16" (16 classes) or "synthia_13" (13 classes)'
)
def _training_init_train(self):
self.ap.add_argument(
'--train-batches-per-epoch', type=int, default=DEFAULT_BATCHES_PER_EPOCH,
help='Number of batches we consider in an epoch'
)
self.ap.add_argument(
'--train-num-epochs', type=int, default=20,
help='Number of epochs to train for'
)
self.ap.add_argument(
'--train-checkpoint-frequency', type=int, default=5,
help='Number of epochs between model checkpoint dumps'
)
self.ap.add_argument(
'--train-tb-frequency', type=int, default=500,
help='Number of steps between each info dump to tensorboard'
)
self.ap.add_argument(
'--train-print-frequency', type=int, default=2500,
help='Number of steps between each info dump to stdout'
)
self.ap.add_argument(
'--train-learning-rate', type=float, default=1e-4,
help='Initial learning rate to train with',
)
self.ap.add_argument(
'--train-scheduler-step-size', type=int, default=15,
help='Number of epochs between learning rate reductions',
)
self.ap.add_argument(
'--train-weight-decay', type=float, default=0.0,
help='Weight decay to train with',
)
self.ap.add_argument(
'--train-weights-init', type=str, default='pretrained', choices=('pretrained', 'scratch'),
help='Initialize the encoder networks with Imagenet pretrained weights or start from scratch'
)
def _training_init_adaptation(self):
self.ap.add_argument(
'--adaptation-training-loaders', type=str, default='kitti_kitti_train',
help='Comma separated list of adaptation dataset loaders from loaders/adaptation/train.py to use '
'for training'
)
self.ap.add_argument(
'--adaptation-training-batch-size', type=int, default=DEFAULT_ADAPTATION_BATCH_SIZE,
help='Batch size for adaptation training'
)
self.ap.add_argument(
'--adaptation-num-batches', type=int, default=50,
help='Trains the model for only this number of batches and then stops the whole training.'
'Mind that default value of 0 means, that this option will not be used'
)
self.ap.add_argument(
'--adaptation-xlsx-frequency', type=int, default=1,
help='Number of steps between each info dump to xlsx file'
)
self.ap.add_argument(
'--adaptation-resize-height', type=int, default=192,
help='adaptation images are resized to this height'
)
self.ap.add_argument(
'--adaptation-resize-width', type=int, default=640,
help='Adaptation images are resized to this width'
)
self.ap.add_argument(
'--adaptation-crop-height', type=int, default=192,
help='Adaptation images are cropped to this height'
)
self.ap.add_argument(
'--adaptation-crop-width', type=int, default=640,
help='Adaptation images are cropped to this width'
)
self.ap.add_argument(
'--adaptation-mode-sequential', type=str, default='none',
choices=('none', 'batch_shrinking', 'layer_shrinking')
)
self.ap.add_argument(
'--adaptation-alpha-batch', type=float, default=0.1,
help='Determine how fast momentum is shrinking depending on the number of batches trained'
)
self.ap.add_argument(
'--adaptation-alpha-layer', type=float, default=0.1,
help='Determines how fast momentum is shrinking depending on the layer depth of the BN Layer within '
'the architecture'
)
self.ap.add_argument(
'--adaptation-batchnorm-momentum', type=float, default=0.1,
help='Momentum for the BatchNorm layer of the shared encoder'
)
def _training_init_segmentation(self):
self.ap.add_argument(
'--segmentation-training-loaders', type=str, default='cityscapes_train',
help='Comma separated list of segmentation dataset loaders from loaders/segmentation/train.py to use '
'for training'
)
self.ap.add_argument(
'--segmentation-training-batch-size', type=int, default=DEFAULT_SEG_BATCH_SIZE,
help='Batch size for segmentation training'
)
self.ap.add_argument(
'--segmentation-resize-height', type=int, default=512,
help='Segmentation images are resized to this height prior to cropping'
)
self.ap.add_argument(
'--segmentation-resize-width', type=int, default=1024,
help='Segmentation images are resized to this width prior to cropping'
)
self.ap.add_argument(
'--segmentation-crop-height', type=int, default=192,
help='Segmentation images are cropped to this height'
)
self.ap.add_argument(
'--segmentation-crop-width', type=int, default=640,
help='Segmentation images are cropped to this width'
)
def _parse(self):
return self.ap.parse_args()
class TrainingArguments(ArgumentsBase):
DESCRIPTION = 'UBNA training arguments'
def __init__(self):
super().__init__()
self._harness_init_system()
self._harness_init_model()
self._harness_init_segmentation()
self._training_init_train()
self._training_init_segmentation()
def parse(self):
opt = self._parse()
return opt
class AdaptationArguments(ArgumentsBase):
DESCRIPTION = 'UBNA training arguments'
def __init__(self):
super().__init__()
self._harness_init_system()
self._harness_init_model()
self._harness_init_segmentation()
self._training_init_train()
self._training_init_adaptation()
def parse(self):
opt = self._parse()
return opt
class SegmentationEvaluationArguments(ArgumentsBase):
DESCRIPTION = 'UBNA Segmentation Evaluation'
def __init__(self):
super().__init__()
self._harness_init_system()
self._harness_init_model()
self._harness_init_segmentation()
def parse(self):
opt = self._parse()
# These options are required by the StateManager
# but are effectively ignored when evaluating so
# they can be initialized to arbitrary values
opt.train_learning_rate = 0
opt.train_scheduler_step_size = 1000
opt.train_weight_decay = 0
opt.train_weights_init = 'scratch'
return opt