-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathTrainingManager.py
More file actions
576 lines (500 loc) · 31.9 KB
/
TrainingManager.py
File metadata and controls
576 lines (500 loc) · 31.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
# Class that manages the training loop. This class will also deal with logging
import NetworkManager
import time, copy, random # TODO change to numpy random
import BatchHandler
import numpy as np
import pandas as pd
import os
import tensorflow as tf
import sys
import signal
import ReportWriter
import MDN_clustering
import multiprocessing as mp
import utils_draw_graphs
import utils
class TrainingManager:
def __init__(self, cf_pool, test_pool, encoder_means, encoder_stddev, parameter_dict):
self.cf_pool = cf_pool
self.test_pool = test_pool
self.parameter_dict = parameter_dict
self.hyper_results_logfile = "hyper.csv"
self.encoder_means = encoder_means
self.encoder_stddev = encoder_stddev
self.sigint_caught = False
return
def train_network(self,netManager,training_batch_handler,validation_batch_handler,hyper_search=False):
fold_time = time.time()
current_step = 0
previous_losses = []
previous_val_losses = []
step_time, loss = 0.0, 0.0
steps_per_checkpoint = self.parameter_dict['steps_per_checkpoint']
print "Starting Network training for:"
print str(self.parameter_dict)
overfitting_steps = 0
final_run = False
training_log_df = pd.DataFrame()
# Define and register a SIGINT handler here
original_sigint_handler = signal.getsignal(signal.SIGINT)
def sigint_handler(signum, frame):
print "TrainingManager caught SIGINT. Stopping training, writing report, and exiting."
self.sigint_caught = True
signal.signal(signal.SIGINT, sigint_handler)
loss_a = []
val_step_loss_a = []
accuracy_a = []
val_accuracy_a = []
while True:
#### TRAINING
if not final_run:
step_start_time = time.time()
batch_frame = training_batch_handler.get_minibatch()
# print "Time to get batch: " + str(time.time()-step_start_time)
train_x, train_future, weights, train_labels, track_padded = \
training_batch_handler.format_minibatch_data(
batch_frame['encoder_sample'],
batch_frame['dest_1_hot'] if self.parameter_dict['model_type'] == 'classifier' else
batch_frame['decoder_sample'] if self.parameter_dict['model_type'] == 'MDN' else exit(2),
batch_frame['batchwise_padding'],
batch_frame['trackwise_padding'] if self.parameter_dict['track_padding'] else None)
train_y = train_labels if self.parameter_dict['model_type'] == 'classifier' else \
train_future if self.parameter_dict['model_type'] == 'MDN' else exit(3)
accuracy, step_loss, _, _, _ = netManager.run_training_step(train_x, train_y, weights,
True, track_padded)
# print "Time to step: " + str(time.time() - step_start_time)
# Periodically, run without training for the summary logs
# This will always run in the same loop as the checkpoint fn below.
# Explicit check in case of rounding errors
step_time += (time.time() - step_start_time) / steps_per_checkpoint
loss += step_loss / steps_per_checkpoint
current_step += 1
netManager.decay_learning_rate() # decay every step by 0.9999 as per sketchrnn
#### TENSORBOARD LOGGING
if current_step % (steps_per_checkpoint/2) == 0 or \
current_step % steps_per_checkpoint == 0 or \
final_run:
train_acc, train_step_loss, _, _, _ = netManager.run_training_step(train_x, train_y, weights, False,
track_padded,
summary_writer=netManager.train_writer)
#val_time = time.time()
val_accuracy, val_step_loss, _, _ = netManager.run_validation(validation_batch_handler,
summary_writer=netManager.val_writer,
quick=(not final_run))
loss_a.append(train_step_loss)
val_step_loss_a.append(val_step_loss)
accuracy_a.append(accuracy)
val_accuracy_a.append(val_accuracy)
sys.stdout.write("\rg_step %06d lr %.1e step %.4f avTL %.4f VL %.4f "
% (netManager.get_global_step(),
netManager.get_learning_rate(),
step_time, np.mean(loss_a), np.mean(val_step_loss_a)))
sys.stdout.flush()
#print "valbatch Time: " + str(time.time()-val_time)
#### EVALUATION / CHECKPOINTING
sys.stdout.write("\rg_step %06d " % (current_step))
sys.stdout.flush()
if (current_step % steps_per_checkpoint == 0) or final_run:
sys.stdout.write("\rg_step %06d lr %.1e step %.4f avTL %.4f VL %.4f "
% (netManager.get_global_step(),
netManager.get_learning_rate(),
step_time, np.mean(loss_a), np.mean(val_step_loss_a)))
sys.stdout.flush()
# TODO make this run every n minutes, not a multiple of steps. Also add duration reporting to console
if (((not self.parameter_dict['debug']) and current_step % (steps_per_checkpoint*10) == 0) or final_run)\
and self.parameter_dict['model_type'] == 'classifier':
# Compute Distance Metric
dist_results = netManager.compute_result_per_dis(validation_batch_handler, plot=False)
# f1_scores = netManager.compute_distance_f1_report(dist_results)
metric_results, metric_labels = netManager.evaluate_pdis_metric(dist_results)
metric_string = " "
for metric_idx in range(len(metric_results)):
metric_string += metric_labels[metric_idx][0]
metric_string += "%0.1f " % metric_results[metric_idx]
# DOn't log hyper search graphs, it explodes the log directory.
if not hyper_search:
graphs = netManager.draw_categorical_png_graphs_perf_dist(dist_results)
# netManager.log_graphs_to_tensorboard(graphs)
netManager.log_metric_to_tensorboard(metric_results)
sys.stdout.write("p_dis" + metric_string)
elif (((not self.parameter_dict['debug']) and current_step % (steps_per_checkpoint*10) == 0) or final_run)\
and self.parameter_dict['model_type'] == 'MDN':
# print "Write PNG graphing functions here."
netManager.draw_generative_png_graphs(validation_batch_handler,multi_sample=1, final_run=final_run)
# netManager.draw_generative_png_graphs(validation_batch_handler, multi_sample=20,
# draw_prediction_track=False, final_run=final_run)
# I rarely use this, and now the multithreader cannot return a value if it is backgrounded.
# netManager.log_graphs_to_tensorboard(graphs)
metric_results = -999
sys.stdout.write("\r\n")
sys.stdout.flush()
netManager.checkpoint_model()
# Log all things
results_dict = {'g_step': netManager.get_global_step(),
'training_loss': np.mean(loss_a),
'training_acc': np.mean(accuracy_a),
'validation_loss': np.mean(val_step_loss_a),
'validation_acc': np.mean(val_accuracy_a)}
training_log_df = training_log_df.append(results_dict, ignore_index=True)
### Decay learning rate checks
# if (len(previous_losses) > self.parameter_dict['decrement_steps']-1
# and
# loss > 0.99*(max(previous_losses))): #0.95 is float fudge factor
# netManager.decay_learning_rate()
# previous_losses = []
# previous_losses.append(loss)
# previous_losses = previous_losses[-self.parameter_dict['decrement_steps']:]
previous_val_losses.append(val_step_loss)
previous_val_losses = previous_val_losses[-self.parameter_dict['decrement_steps']:]
##### Training stop conditions:
if final_run:
break
# Check for significant divergence of val_loss and train_loss
model_is_overfit = False
if (loss < (val_step_loss)*0.9 and # train / val have diverged
val_step_loss > 0.95*max(previous_val_losses) and
not self.parameter_dict['first_loss_only']): # val is ~increasing
overfitting_steps += 1
print "Warning, overfitting detected. Will stop training if it continues"
if overfitting_steps > 20:
model_is_overfit = True
else:
overfitting_steps = 0
learning_rate_too_low = (netManager.get_learning_rate() <
self.parameter_dict['loss_decay_cutoff'] *
self.parameter_dict['learning_rate'])
if learning_rate_too_low:
print "Stopping due to low learning rate"
out_of_time = time.time() - fold_time > 60 * self.parameter_dict['training_early_stop']
if out_of_time:
print "Stopping due to time cutoff"
out_of_steps = (self.parameter_dict['long_training_steps'] is not None and
current_step > self.parameter_dict['long_training_steps'])
if out_of_steps:
print "Stopping due to step cutoff"
if learning_rate_too_low or out_of_time or model_is_overfit or out_of_steps or self.sigint_caught:
# Lookup best model based on val_step_loss
# Load best model.
# Run one more loop for final network scores
best_g_step = training_log_df.sort_values('validation_loss',ascending=True).iloc[0].g_step
print "FINAL RUN, Best model was at step: " + str(best_g_step)
netManager.load_from_checkpoint(best_g_step)
netManager.clean_checkpoint_dir(best_g_step)
final_run = True
step_time, loss = 0.0, 0.0
val_step_loss_a = []
accuracy_a = []
val_accuracy_a = []
# Now restore old signal handler so that the sig capture function doesn't fall out of scope.
signal.signal(signal.SIGINT, original_sigint_handler)
fold_results = copy.copy(self.parameter_dict)
fold_results['input_columns'] = ",".join(fold_results['input_columns'])
fold_results['eval_accuracy'] = train_acc
fold_results['final_learning_rate'] = netManager.get_learning_rate()
fold_results['training_accuracy'] = accuracy
fold_results['training_loss'] = train_step_loss
fold_results['network_chkpt_dir'] = netManager.log_file_name
fold_results['validation_accuracy'] = val_accuracy
fold_results['validation_loss'] = val_step_loss
if self.parameter_dict['model_type'] == 'classifier':
for class_idx in range(len(metric_results)):
key_str = 'perfect_distance_' + str(class_idx)
fold_results[key_str] = metric_results[class_idx]
fold_results['perfect_distance'] = np.max(metric_results) # worst distance
else:
fold_results['perfect_distance'] = 0
return fold_results
def test_network(self, netManager, test_batch_handler, distance=0):
# Function that takes the currently built network and runs the test data through it (each data point is run once
# and only once). Graphs are generated. Make it easy to generate many graphs as this will be helpful for the
# sequence generation model
# This section and its affiliate long_train_network is going to change a lot. There are several things I want to
# do here.
# 1. Add the d=0 constraint for the test data.
# 2. Run graphs in their own test_results folder
# 3. Redefine the metric used on the network here for scoring. Maybe even have multiple scoring types reported
# I think I want to spin out all the graph drawers from NetworkManager, and for the final report here, I want
# the data being fed to the graphs saved, such that it is easy to cross-compile graphs between methods
test_accuracy, test_loss, report_df, _ = netManager.run_validation(test_batch_handler,
summary_writer=netManager.test_writer,
quick=False, report_writing=True,
distance_threshold=distance)
return test_accuracy, test_loss, report_df
def run_hyperparameter_search(self):
hyperparam_results_list = []
hyper_time = time.time()
self.parameter_dict['training_early_stop'] = self.parameter_dict['early_stop_cf']
training_batch_handler_cache = {}
validation_batch_handler_cache = {}
def hyper_training_helper(hyper_learning_rate,
hyper_rnn_size,
hyper_reg_embedding_beta,
hyper_reg_l2_beta,
hyper_learning_rate_decay,
hyper_learning_rate_min,
padding_loss_logit_weight,
padding_loss_mixture_weight):
"""
Function used to wrap the hyperparameters and settings such that it fits the format used by dlib.
Some variables need to be side-loaded, mostly reporting values.
"""
############# SELECT NEW PARAMS
self.parameter_dict['learning_rate'] = 10 ** hyper_learning_rate
self.parameter_dict['rnn_size'] = int(hyper_rnn_size)
self.parameter_dict['reg_embedding_beta'] = 10 ** hyper_reg_embedding_beta
self.parameter_dict['l2_reg_beta'] = 10 ** hyper_reg_l2_beta
self.parameter_dict['learning_rate_decay_factor'] = hyper_learning_rate_decay
self.parameter_dict['learning_rate_min'] = \
(10 ** hyper_learning_rate_min) * self.parameter_dict['learning_rate']
self.parameter_dict['embedding_size'] = self.parameter_dict['rnn_size']
self.parameter_dict['padding_loss_logit_weight'] = padding_loss_logit_weight
self.parameter_dict['padding_loss_mixture_weight'] = padding_loss_mixture_weight
# Update Cutoffs
self.parameter_dict['long_training_time'] = self.parameter_dict['early_stop_cf']
self.parameter_dict['long_training_steps'] = self.parameter_dict['hyper_search_step_cutoff']
######### / PARAMS
print 'learning_rate ' + str(10 ** hyper_learning_rate)
print 'rnn_size ' + str(hyper_rnn_size)
print 'reg_embedding_beta ' + str(10 ** hyper_reg_embedding_beta)
print 'l2_reg_beta ' + str(10 ** hyper_reg_l2_beta)
print 'learning_rate_decay_factor ' + str(hyper_learning_rate_decay)
print 'padding_loss_logit_weight ' + str(padding_loss_logit_weight)
print 'padding_loss_mixture_weight ' + str(padding_loss_mixture_weight)
cf_fold = -1
# I should call this outside the crossfold, so it occurs once
# This way all the crossfolds for the same hyperparameters are adjacent in the checkpoint dirs
log_file_time = str(time.time())
cf_results_list = []
for train_pool, val_pool in self.cf_pool:
cf_fold += 1
log_file_name = log_file_time + "-cf-" + str(cf_fold)
print "Starting crossfold"
# Collect batch_handlers, and check if they've been cached.
try:
training_batch_handler = training_batch_handler_cache[hash(tuple(np.sort(train_pool.uniqueId.unique())))]
except KeyError:
training_batch_handler = BatchHandler.BatchHandler(train_pool, self.parameter_dict, True)
except AttributeError:
print 'This should not be attainable, as crossfold==2 is invalid'
try:
validation_batch_handler = validation_batch_handler_cache[hash(tuple(np.sort(val_pool.uniqueId.unique())))]
except KeyError:
validation_batch_handler = BatchHandler.BatchHandler(val_pool, self.parameter_dict, False)
except AttributeError:
print 'This should not be attainable, as crossfold==2 is invalid'
# Add input_size, num_classes
self.parameter_dict['input_size'] = training_batch_handler.get_input_size()
self.parameter_dict['num_classes'] = training_batch_handler.get_num_classes()
netManager = NetworkManager.NetworkManager(self.parameter_dict, log_file_name)
netManager.build_model(self.encoder_means,self.encoder_stddev)
try:
cf_results = self.train_network(netManager,training_batch_handler,validation_batch_handler,hyper_search=True)
except tf.errors.InvalidArgumentError:
print "**********************caught error, probably gradients have exploded"
return 99999999 # HUGE LOSS --> this was caused by bad init conditions, so it should be avoided.
# Now assign the handlers to the cache IF AND ONLY IF the training was successful.
# If it dies before the first pool sort in the training, the whole thing falls over.
validation_batch_handler_cache[
hash(tuple(np.sort(val_pool.uniqueId.unique())))] = validation_batch_handler
training_batch_handler_cache[
hash(tuple(np.sort(train_pool.uniqueId.unique())))] = training_batch_handler
cf_results['crossfold_number'] = cf_fold
# As pandas does not like lists when adding a list to a row of a dataframe, set to None (the lists are
# a large amount of redundant data). This is why I copy out parameters.py
for key, value in cf_results.iteritems():
if (type(value) is list or
type(value) is np.ndarray or
type(value) is tuple):
cf_results[key] = pd.Series([value],dtype=object)
cf_results_list.append(pd.DataFrame(cf_results, index=[0]))
# plot
print "Drawing html graph"
if self.parameter_dict['model_type'] == 'categorical':
netManager.draw_categorical_html_graphs(validation_batch_handler)
else:
netManager.draw_generative_html_graphs(validation_batch_handler,multi_sample=1)
netManager.draw_generative_html_graphs(validation_batch_handler,multi_sample=20)
# Here we have a fully trained model, but we are still in the cross fold.
# FIXME Only do 1 fold per hyperparams. Its not neccessary to continue
break
# Run reportwriter here and return all_tracks..... euclidean loss?
val_acc, val_loss, report_df =\
self.test_network(netManager, validation_batch_handler)
cf_df = pd.concat(cf_results_list)
# Condense results from cross fold (Average, best, worst, whatever selection method)
hyperparam_results = copy.copy(self.parameter_dict)
#hyperparam_results['input_columns'] = ",".join(hyperparam_results['input_columns'])
hyperparam_results['eval_accuracy'] = np.min(cf_df['eval_accuracy'])
hyperparam_results['final_learning_rate'] = np.min(cf_df['final_learning_rate'])
hyperparam_results['training_accuracy'] = np.min(cf_df['training_accuracy'])
hyperparam_results['training_loss'] = np.average(cf_df['training_loss'])
hyperparam_results['validation_accuracy'] = np.average(cf_df['validation_accuracy'])
hyperparam_results['validation_loss'] =np.average(cf_df['validation_loss'])
track_scores = ReportWriter.ReportWriter.score_model_on_metric(self.parameter_dict, report_df)
hyperparam_results['euclidean_err_sum'] = sum(track_scores['euclidean'])
hyperparam_results['crossfold_number'] = -1
#FIXME What is this line doing?
hyperparam_results['network_chkpt_dir'] = (
cf_df.sort_values('eval_accuracy',ascending=False).iloc[[0]]['network_chkpt_dir'])
hyperparam_results['cf_summary'] = True
for key, value in hyperparam_results.iteritems():
if (type(value) is list or
type(value) is np.ndarray or
type(value) is tuple):
hyperparam_results[key] = pd.Series([value],dtype=object) # str(cf_results[key])
hyperparam_results_list.append(pd.DataFrame(hyperparam_results, index=[0]))
hyperparam_results_list.append(cf_df)
#Write results and hyperparams to hyperparameter_results_dataframe
return hyperparam_results['euclidean_err_sum']
################################
import dlib
# http://blog.dlib.net/2017/12/a-global-optimization-algorithm-worth.html
lowers = [
min(self.parameter_dict['hyper_learning_rate_args']),
min(self.parameter_dict['hyper_rnn_size_args']),
min(self.parameter_dict['hyper_reg_embedding_beta_args']),
min(self.parameter_dict['hyper_reg_l2_beta_args']),
min(self.parameter_dict['hyper_learning_rate_decay_args']),
min(self.parameter_dict['hyper_learning_rate_min_args']),
min(self.parameter_dict['hyper_padding_loss_logit_weight_args']),
min(self.parameter_dict['hyper_padding_loss_mixture_weight_args'])
]
uppers = [
max(self.parameter_dict['hyper_learning_rate_args']),
max(self.parameter_dict['hyper_rnn_size_args']),
max(self.parameter_dict['hyper_reg_embedding_beta_args']),
max(self.parameter_dict['hyper_reg_l2_beta_args']),
max(self.parameter_dict['hyper_learning_rate_decay_args']),
max(self.parameter_dict['hyper_learning_rate_min_args']),
max(self.parameter_dict['hyper_padding_loss_logit_weight_args']),
max(self.parameter_dict['hyper_padding_loss_mixture_weight_args'])
]
x,y = dlib.find_min_global(hyper_training_helper, lowers, uppers,
[False, True, False, False, False, False, False, False], # Is integer Variable
self.parameter_dict['hyper_search_folds'])
hyper_df = pd.concat(hyperparam_results_list, ignore_index=True)
hyper_df.to_csv(os.path.join(self.parameter_dict['master_dir'], self.hyper_results_logfile))
summary_df = hyper_df[hyper_df['cf_summary'] == True]
# Distance at which the classifier can make a sound judgement, lower is better
if self.parameter_dict['evaluation_metric_type'] == 'perfect_distance':
best_params = summary_df.sort_values('perfect_distance',ascending=True).iloc[0].to_dict()
if self.parameter_dict['evaluation_metric_type'] == 'validation_accuracy': # Higher better
best_params = summary_df.sort_values('validation_accuracy', ascending=False).iloc[0].to_dict()
if self.parameter_dict['evaluation_metric_type'] == 'validation_loss': # Lower better
best_params = summary_df.sort_values('validation_loss', ascending=True).iloc[0].to_dict()
if self.parameter_dict['evaluation_metric_type'] == 'euclidean_err_sum': # Lower better
best_params = summary_df.sort_values('euclidean_err_sum', ascending=True).iloc[0].to_dict()
# TODO eval_metric_type_reportwriter?
return best_params
def long_train_network(self, params, train_pool, val_pool, test_pool, checkpoint=None, test_network_only=False):
self.parameter_dict = params
# Run for many minutes, or until loss decays significantly.
self.parameter_dict['training_early_stop'] = self.parameter_dict['long_training_time']
if checkpoint is not None:
log_file_name = checkpoint
else:
log_file_name = "best-" + str(time.time())
training_batch_handler = BatchHandler.BatchHandler(train_pool, self.parameter_dict, True)
validation_batch_handler = BatchHandler.BatchHandler(val_pool, self.parameter_dict, False)
test_batch_handler = BatchHandler.BatchHandler(test_pool, self.parameter_dict, False)
# Add input_size, num_classes
self.parameter_dict['input_size'] = training_batch_handler.get_input_size()
self.parameter_dict['num_classes'] = training_batch_handler.get_num_classes()
netManager = NetworkManager.NetworkManager(self.parameter_dict, log_file_name)
if not test_network_only:
netManager.build_model(self.encoder_means, self.encoder_stddev)
best_results = self.train_network(netManager,training_batch_handler,validation_batch_handler)
else:
# We are loading a network from a checkpoint
netManager.build_model()
best_results = {}
best_results['test_accuracy'], best_results['test_loss'], report_df = self.test_network(netManager,
test_batch_handler)
#print "Drawing html graph" # I don't read these graphs anymore
#netManager.draw_html_graphs(netManager.compute_result_per_dis(test_batch_handler))
#if self.parameter_dict['model_type'] == 'categorical':
# netManager.draw_categorical_html_graphs(test_batch_handler)
#else:
# netManager.draw_generative_html_graphs(test_batch_handler, multi_sample=1)
#netManager.draw_generative_html_graphs(test_batch_handler, multi_sample=20)
# FIXME maybe this needs its own function?
for key, value in best_results.iteritems():
if (type(value) is list or
type(value) is np.ndarray or
type(value) is tuple):
best_results[key] = pd.Series([value], dtype=object)
best_results = pd.DataFrame(best_results, index=[0])
if not test_network_only:
best_results.to_csv(os.path.join(self.parameter_dict['master_dir'],"best.csv"))
# Check that every track in the report_df is at distance zero.
reports = ReportWriter.ReportWriter(training_batch_handler, validation_batch_handler, test_batch_handler,
self.parameter_dict, report_df ) #'results/20180412-120830/plots_img_final'
#reports.get_results().to_csv(os.path.join(self.parameter_dict['master_dir'],"metrics.csv"))
for key, value in reports.get_results().iteritems():
pd.DataFrame(value).to_csv(
os.path.join(self.parameter_dict['master_dir'], key + '-' + "metrics.csv"))
# SPAGHETTI WARNING
# This was done in a rush for a journal plot
# it will plot the output graphs for several distance horizons, to create an effective animation
# return best_results
plot_tracks_at_distances = True
if plot_tracks_at_distances:
for d in [-5, 0, 5, 10, 20]: #np.arange(-16,22,2): #[-15, -10, -5, 0, 5, 10, 15, 20]:
print "Now running report for distance: " + str(d) + " meters"
try:
_, _, report_df = self.test_network(netManager, test_batch_handler, distance=d)
print "Number of tracks is: " + str(len(report_df.track_idx.unique()))
except ValueError:
# No data for this distance
continue
try:
cluster_mix_weight_threshold = self.parameter_dict['cluster_mix_weight_threshold']
except KeyError:
cluster_mix_weight_threshold = 0.5
try:
cluster_eps = float(self.parameter_dict['cluster_eps'])
except KeyError:
cluster_eps = 1.0
try:
cluster_min_samples = self.parameter_dict['cluster_min_samples']
except KeyError:
cluster_min_samples = 1
pool = mp.Pool(processes=7, maxtasksperchild=1)
args = []
plt_size = (6, 6) # (10, 10)
plot_dir = os.path.join(self.parameter_dict['master_dir'], 'sequential_test_data_plots')
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
for track_idx in report_df.track_idx:
model_predictions = {}
track_df = report_df[report_df.track_idx == track_idx]
if 'right' not in track_df.relative_destination.iloc[0]:
continue
#if track_df.track_idx.iloc[0] not in [16780]:
# continue
#model_predictions["RNN-FL"] = track_df.outputs.iloc[0]
path_MDN_clusters, path_centroids, path_weights = MDN_clustering.cluster_MDN_into_sets(
report_df[report_df.track_idx == track_idx].mixtures.iloc[0],
mix_weight_threshold=cluster_mix_weight_threshold, eps=cluster_eps, min_samples=cluster_min_samples)
for centroid_idx in range(len(path_centroids)):
model_predictions['multipath_' + str(centroid_idx)] = np.array(path_centroids[centroid_idx])
for padding_mask in ['Network']:
args.append([track_df.encoder_sample.iloc[0],
model_predictions,
track_df.decoder_sample.iloc[0], # Ground Truth
track_df.mixtures.iloc[0],
track_df.padding_logits.iloc[0],
track_df.trackwise_padding.iloc[0],
plt_size,
False, # draw_prediction_track,
plot_dir, # self.plot_directory,
"best", # self.log_file_name,
False, # multi_sample,
0, # self.get_global_step(),
track_idx, # graph_number,
plot_dir, # fig_dir,
track_df.csv_name.iloc[0],
track_df.relative_destination.iloc[0],
utils.sanitize_params_dict(self.parameter_dict), padding_mask, d])
results = pool.map(utils_draw_graphs.multiprocess_helper, args)
return best_results