-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain_gd.py
More file actions
321 lines (263 loc) · 12.9 KB
/
train_gd.py
File metadata and controls
321 lines (263 loc) · 12.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
from absl import app
from absl import flags
import tensorflow as tf
import sys
import gin
import time
from uflow import uflow_data
from uflow import uflow_main
from uflow import dis_net
from uflow import uflow_plotting
from uflow import uflow_flags
FLAGS = flags.FLAGS
def create_discriminator(learning_rate):
dis = dis_net.Dis_model(FLAGS.checkpoint_dir_dis, learning_rate=learning_rate)
return dis
def main(unused_argv):
kitti_EPE_1 = 100.0 # kitti-2015
sintel_EPE_1 = 100.0 # sintel-clean
sintel_EPE_2 = 100.0 # sintel-final
chairs_EPE = 100.0
num = 5
# gpus = tf.config.experimental.list_physical_devices('GPU')
# #
# tf.config.experimental.set_virtual_device_configuration(gpus[0], [
# tf.config.experimental.VirtualDeviceConfiguration(memory_limit=12000)])
if FLAGS.no_tf_function:
tf.config.experimental_run_functions_eagerly(True)
print('TFFUNCTION DISABLED')
gin.parse_config_files_and_bindings(FLAGS.config_file, FLAGS.gin_bindings)
# Make directories if they do not exist yet.
if FLAGS.checkpoint_dir and not tf.io.gfile.exists(FLAGS.checkpoint_dir):
print('Making new checkpoint directory', FLAGS.checkpoint_dir)
tf.io.gfile.makedirs(FLAGS.checkpoint_dir)
if FLAGS.checkpoint_dir1 and not tf.io.gfile.exists(FLAGS.checkpoint_dir1):
print('Making new checkpoint directory', FLAGS.checkpoint_dir1)
tf.io.gfile.makedirs(FLAGS.checkpoint_dir1)
if FLAGS.checkpoint_dir2 and not tf.io.gfile.exists(FLAGS.checkpoint_dir2):
print('Making new checkpoint directory', FLAGS.checkpoint_dir2)
tf.io.gfile.makedirs(FLAGS.checkpoint_dir2)
if FLAGS.checkpoint_dir_dis and not tf.io.gfile.exists(FLAGS.checkpoint_dir_dis):
print('Making new checkpoint directory', FLAGS.checkpoint_dir_dis)
tf.io.gfile.makedirs(FLAGS.checkpoint_dir_dis)
if FLAGS.plot_dir and not tf.io.gfile.exists(FLAGS.plot_dir):
print('Making new plot directory', FLAGS.plot_dir)
tf.io.gfile.makedirs(FLAGS.plot_dir)
uflow = uflow_main.create_uflow()
dis = create_discriminator(uflow._learning_rate)
if not FLAGS.from_scratch:
# First restore from init_checkpoint_dir, which is only restored from but
# not saved to, and then restore from checkpoint_dir if there is already
# a model there (e.g. if the run was stopped and restarted).
if FLAGS.init_checkpoint_dir:
print('Initializing model from checkpoint {}.'.format(
FLAGS.init_checkpoint_dir))
uflow.update_checkpoint_dir(FLAGS.init_checkpoint_dir)
uflow.restore(
reset_optimizer=FLAGS.reset_optimizer,
reset_global_step=FLAGS.reset_global_step)
uflow.update_checkpoint_dir(FLAGS.checkpoint_dir)
elif FLAGS.checkpoint_dir:
print('Restoring model from checkpoint {}.'.format(FLAGS.checkpoint_dir))
uflow.restore()
else:
print('Starting from scratch.')
if FLAGS.updata_dis:
print('Restoring discriminator model from checkpoint_dis {}.'.format(FLAGS.checkpoint_dir_dis))
dis.restore(
reset_optimizer=FLAGS.reset_optimizer,
reset_global_step=FLAGS.reset_global_step)
if FLAGS.eval_on:
print('Making eval datasets and eval functions.')
evaluate, _ = uflow_data.make_eval_function(
FLAGS.eval_on,
FLAGS.height,
FLAGS.width,
progress_bar=True,
plot_dir=FLAGS.plot_dir,
num_plots=50)
if FLAGS.train_on:
# Build training iterator.
print('Making training iterator.')
train_it = uflow_data.make_train_iterator(
FLAGS.train_on,
FLAGS.height,
FLAGS.width,
FLAGS.shuffle_buffer_size,
FLAGS.batch_size,
FLAGS.seq_len,
crop_instead_of_resize=FLAGS.crop_instead_of_resize,
apply_augmentation=True,
include_ground_truth=FLAGS.use_supervision,
resize_gt_flow=FLAGS.resize_gt_flow_supervision,
include_occlusions=FLAGS.use_gt_occlusions
)
if FLAGS.use_supervision:
weights = {'supervision': 1.}
else:
weights = {
'photo': FLAGS.weight_photo,
'ssim': FLAGS.weight_ssim,
'census': FLAGS.weight_census,
'smooth1': FLAGS.weight_smooth1,
'smooth2': FLAGS.weight_smooth2,
'edge_constant': FLAGS.smoothness_edge_constant,
}
weights = {
k: v for (k, v) in weights.items() if v > 1e-7 or k == 'edge_constant'
}
def weight_selfsup_fn():
step = tf.compat.v1.train.get_or_create_global_step(
) % FLAGS.selfsup_step_cycle
# Start self-supervision only after a certain number of steps.
# Linearly increase self-supervision weight for a number of steps.
ramp_up_factor = tf.clip_by_value(
float(step - (FLAGS.selfsup_after_num_steps - 1)) /
float(max(FLAGS.selfsup_ramp_up_steps, 1)), 0., 1.)
return FLAGS.weight_selfsup * ramp_up_factor
distance_metrics = {
'photo': FLAGS.distance_photo,
'census': FLAGS.distance_census,
}
print('Starting training loop.')
log = dict()
epoch = 0
teacher_feature_model = None
teacher_flow_model = None
test_frozen_flow = None
while True:
current_step = tf.compat.v1.train.get_or_create_global_step().numpy()
occ_active = {
'uflow':
FLAGS.occlusion_estimation == 'uflow',
'brox':
current_step > FLAGS.occ_after_num_steps_brox,
'wang':
current_step > FLAGS.occ_after_num_steps_wang,
'wang4':
current_step > FLAGS.occ_after_num_steps_wang,
'wangthres':
current_step > FLAGS.occ_after_num_steps_wang,
'wang4thres':
current_step > FLAGS.occ_after_num_steps_wang,
'fb_abs':
current_step > FLAGS.occ_after_num_steps_fb_abs,
'forward_collision':
current_step > FLAGS.occ_after_num_steps_forward_collision,
'backward_zero':
current_step > FLAGS.occ_after_num_steps_backward_zero,
}
current_weights = {k: v for k, v in weights.items()}
# Prepare self-supervision if it will be used in the next epoch.
if FLAGS.weight_selfsup > 1e-7 and (
current_step % FLAGS.selfsup_step_cycle
) + FLAGS.epoch_length > FLAGS.selfsup_after_num_steps:
# Add selfsup weight with a ramp-up schedule. This will cause a
# recompilation of the training graph defined in uflow.train(...).
current_weights['selfsup'] = weight_selfsup_fn
# Freeze model for teacher distillation.
if teacher_feature_model is None and FLAGS.frozen_teacher:
# Create a copy of the existing models and freeze them as a teacher.
# Tell uflow about the new, frozen teacher model.
teacher_feature_model, teacher_flow_model = uflow_main.create_frozen_teacher_models(
uflow)
uflow.set_teacher_models(
teacher_feature_model=teacher_feature_model,
teacher_flow_model=teacher_flow_model)
test_frozen_flow = uflow_main.check_model_frozen(
teacher_feature_model, teacher_flow_model, prev_flow_output=None)
# Check that the model actually is frozen.
if FLAGS.frozen_teacher and test_frozen_flow is not None:
uflow_main.check_model_frozen(
teacher_feature_model,
teacher_flow_model,
prev_flow_output=test_frozen_flow)
#train procedure
num_steps = FLAGS.epoch_length
log1 = dict()
# Support constant lr values and callables (for learning rate schedules).
if callable(uflow._learning_rate):
lr = uflow._learning_rate()
else:
lr = uflow._learning_rate
start_time_data = time.time()
losses = {}
for num, batch in zip(range(num_steps), train_it):
stop_time_data = time.time()
sys.stdout.write('.')
sys.stdout.flush()
images, labels = batch
flow1, occlusion1 = uflow.batch_infer_no_tf_function(images, infer_occlusion=True)
images2 = tf.stack([images[:, 1], images[:, 0]], axis=1)
flow2, occlusion2 = uflow.batch_infer_no_tf_function(images2, infer_occlusion=True)
# occlusion = tf.stop_gradient(occlusion)
# dis.vision(images, flow1, flow2, occlusion1, occlusion2, FLAGS.plot_dir, num)
# print('complete the vision part, good luck to you')
start_time_train_step = time.time()
# 鉴别器的训练
d_loss, LYF = dis.train_step(images, flow1, occlusion1, flow2, occlusion2)
g_loss = uflow.train_step(images, dis, weights=current_weights,
distance_metrics=distance_metrics, occ_active=occ_active)
#d_loss, d_model_fake, d_logits_fake = discriminator_net.train_step(images, flow, dis, bn_decay=None)
#d_loss, out_put_true, out_put_false, laji = dis.train_step(images, flow)
#d_loss, out_put_true, out_put_fake, g_loss = dis.test(images, flow)
stop_time_train_step = time.time()
losses = g_loss
losses['d-loss'] = d_loss
log_update = losses
log_update['data-time'] = (stop_time_data - start_time_data) * 1000
log_update['train-time'] = (stop_time_train_step - start_time_train_step) * 1000
for key in log_update:
if key in log1:
log1[key].append(log_update[key])
else:
log1[key] = [log_update[key]]
start_time_data = time.time()
# uflow_plotting.print_step_log(log1, num)
for key in log1:
log1[key] = tf.reduce_mean(input_tensor=log1[key])
sys.stdout.write('\n')
sys.stdout.flush()
for key in log1:
if key in log:
log[key].append(log1[key])
else:
log[key] = [log1[key]]
if FLAGS.checkpoint_dir and not FLAGS.no_checkpointing:
uflow.save()
dis.save()
uflow_plotting.print_log(log, epoch, lr.numpy())
# print(lr)
if FLAGS.eval_on and FLAGS.evaluate_during_train and epoch % 5 == 0:
# Evaluate
eval_results = evaluate(uflow)
uflow_plotting.print_eval(eval_results)
status = ''.join(
['{}: {:.6f}, '.format(key, eval_results[key]) for key in sorted(eval_results)])
eval_on = FLAGS.eval_on
for format_and_path in eval_on.split(';'):
data_format, path = format_and_path.split(':')
if 'kitti' in data_format:
EPE_1 = float(status.split(',')[1].split(':')[1][1:])
if EPE_1 < kitti_EPE_1: # kitti-2015
uflow.save_1()
kitti_EPE_1 = EPE_1
elif 'sintel' in data_format:
EPE_1 = float(status.split(',')[0].split(':')[1][1:])
EPE_2 = float(status.split(',')[6].split(':')[1][1:])
if EPE_1 < sintel_EPE_1: # sintel-clean
uflow.save_1()
sintel_EPE_1 = EPE_1
if EPE_2 < sintel_EPE_2: # sintel-fianl
uflow.save_2()
sintel_EPE_2 = EPE_2
elif 'chairs' in data_format:
EPE = float(status[12:20])
if EPE < chairs_EPE:
uflow.save_1()
chairs_EPE = EPE
if current_step >= FLAGS.num_train_steps:
break
epoch += 1
if __name__ == '__main__':
app.run(main)