diff --git a/data/generate_dataset.py b/data/generate_dataset.py index 37d82a8..bc70d31 100644 --- a/data/generate_dataset.py +++ b/data/generate_dataset.py @@ -2,6 +2,9 @@ import time import numpy as np import argparse +import multiprocessing +import math + parser = argparse.ArgumentParser() parser.add_argument('--simulation', type=str, default='springs', @@ -39,21 +42,29 @@ print(suffix) +def wrapper(args): + """To be pickled, we need to define it here and manually unstar the arguments.""" + t = time.time() + res = sim.sample_trajectory(T=args[1], sample_freq=args[2]) + if args[0] % 100 == 0: + print("Iter: {}, Simulation time: {}".format(args[0], time.time() - t)) + return res def generate_dataset(num_sims, length, sample_freq): - loc_all = list() - vel_all = list() - edges_all = list() - - for i in range(num_sims): - t = time.time() - loc, vel, edges = sim.sample_trajectory(T=length, - sample_freq=sample_freq) - if i % 100 == 0: - print("Iter: {}, Simulation time: {}".format(i, time.time() - t)) - loc_all.append(loc) - vel_all.append(vel) - edges_all.append(edges) + + # It is recommended to use num(cpu) / 2 to use skip hyperthreading (a bit dirty). + pool = multiprocessing.Pool(math.ceil(multiprocessing.cpu_count() / 2)) + + def arguments(num_sims): + """Iterator that returns for every sim the args (step, length, sample_freq) + to be fed to the wrapper. + """ + for i in range(num_sims): + yield i, length, sample_freq + + loc_all, vel_all, edges_all = zip(*list(pool.imap(wrapper, arguments(num_sims)))) + pool.close() + pool.join() loc_all = np.stack(loc_all) vel_all = np.stack(vel_all) @@ -61,7 +72,6 @@ def generate_dataset(num_sims, length, sample_freq): return loc_all, vel_all, edges_all - print("Generating {} training simulations".format(args.num_train)) loc_train, vel_train, edges_train = generate_dataset(args.num_train, args.length, diff --git a/data/synthetic_sim.py b/data/synthetic_sim.py index 76ef2ab..a8dfc4d 100644 --- a/data/synthetic_sim.py +++ b/data/synthetic_sim.py @@ -165,6 +165,7 @@ def _l2(self, A, B): A_norm = (A ** 2).sum(axis=1).reshape(A.shape[0], 1) B_norm = (B ** 2).sum(axis=1).reshape(1, B.shape[0]) dist = A_norm + B_norm - 2 * A.dot(B.transpose()) + dist[dist < 0] = 0 return dist def _energy(self, loc, vel, edges): diff --git a/lstm_baseline.py b/lstm_baseline.py index 1655394..0198252 100644 --- a/lstm_baseline.py +++ b/lstm_baseline.py @@ -237,9 +237,9 @@ def train(epoch, best_val_loss): loss.backward() optimizer.step() - loss_train.append(loss.data[0]) - mse_train.append(mse.data[0]) - mse_baseline_train.append(mse_baseline.data[0]) + loss_train.append(loss.item()) + mse_train.append(mse.item()) + mse_baseline_train.append(mse_baseline.item()) model.eval() for batch_idx, (data, relations) in enumerate(valid_loader): @@ -257,9 +257,9 @@ def train(epoch, best_val_loss): mse = F.mse_loss(output, target) mse_baseline = F.mse_loss(data[:, :, :-1, :], data[:, :, 1:, :]) - loss_val.append(loss.data[0]) - mse_val.append(mse.data[0]) - mse_baseline_val.append(mse_baseline.data[0]) + loss_val.append(loss.item()) + mse_val.append(mse.item()) + mse_baseline_val.append(mse_baseline.item()) print('Epoch: {:04d}'.format(epoch), 'nll_train: {:.10f}'.format(np.mean(loss_train)), @@ -315,9 +315,9 @@ def test(): mse = F.mse_loss(output, target) mse_baseline = F.mse_loss(ins_cut[:, :, :-1, :], ins_cut[:, :, 1:, :]) - loss_test.append(loss.data[0]) - mse_test.append(mse.data[0]) - mse_baseline_test.append(mse_baseline.data[0]) + loss_test.append(loss.item()) + mse_test.append(mse.item()) + mse_baseline_test.append(mse_baseline.item()) if args.motion or args.non_markov: # RNN decoder evaluation setting diff --git a/train.py b/train.py index ba44aab..a56e842 100644 --- a/train.py +++ b/train.py @@ -229,9 +229,9 @@ def train(epoch, best_val_loss): loss.backward() optimizer.step() - mse_train.append(F.mse_loss(output, target).data[0]) - nll_train.append(loss_nll.data[0]) - kl_train.append(loss_kl.data[0]) + mse_train.append(F.mse_loss(output, target).item()) + nll_train.append(loss_nll.item()) + kl_train.append(loss_kl.item()) nll_val = [] acc_val = [] @@ -260,9 +260,9 @@ def train(epoch, best_val_loss): acc = edge_accuracy(logits, relations) acc_val.append(acc) - mse_val.append(F.mse_loss(output, target).data[0]) - nll_val.append(loss_nll.data[0]) - kl_val.append(loss_kl.data[0]) + mse_val.append(F.mse_loss(output, target).item()) + nll_val.append(loss_nll.item()) + kl_val.append(loss_kl.item()) print('Epoch: {:04d}'.format(epoch), 'nll_train: {:.10f}'.format(np.mean(nll_train)), @@ -329,9 +329,9 @@ def test(): acc = edge_accuracy(logits, relations) acc_test.append(acc) - mse_test.append(F.mse_loss(output, target).data[0]) - nll_test.append(loss_nll.data[0]) - kl_test.append(loss_kl.data[0]) + mse_test.append(F.mse_loss(output, target).item()) + nll_test.append(loss_nll.item()) + kl_test.append(loss_kl.item()) # For plotting purposes if args.decoder == 'rnn': diff --git a/train_dec.py b/train_dec.py index 16d23bb..58b3136 100644 --- a/train_dec.py +++ b/train_dec.py @@ -192,9 +192,9 @@ def train(epoch, best_val_loss): optimizer.step() - loss_train.append(loss.data[0]) - mse_train.append(mse.data[0]) - mse_baseline_train.append(mse_baseline.data[0]) + loss_train.append(loss.item()) + mse_train.append(mse.item()) + mse_baseline_train.append(mse_baseline.item()) model.eval() for batch_idx, (inputs, relations) in enumerate(valid_loader): @@ -227,9 +227,9 @@ def train(epoch, best_val_loss): mse = F.mse_loss(output, target) mse_baseline = F.mse_loss(inputs[:, :, :-1, :], inputs[:, :, 1:, :]) - loss_val.append(loss.data[0]) - mse_val.append(mse.data[0]) - mse_baseline_val.append(mse_baseline.data[0]) + loss_val.append(loss.item()) + mse_val.append(mse.item()) + mse_baseline_val.append(mse_baseline.item()) print('Epoch: {:04d}'.format(epoch), 'nll_train: {:.10f}'.format(np.mean(loss_train)), @@ -298,9 +298,9 @@ def test(): mse = F.mse_loss(output, target) mse_baseline = F.mse_loss(ins_cut[:, :, :-1, :], ins_cut[:, :, 1:, :]) - loss_test.append(loss.data[0]) - mse_test.append(mse.data[0]) - mse_baseline_test.append(mse_baseline.data[0]) + loss_test.append(loss.item()) + mse_test.append(mse.item()) + mse_baseline_test.append(mse_baseline.item()) # For plotting purposes if args.decoder == 'rnn': diff --git a/train_enc.py b/train_enc.py index 1469c75..dd6295c 100644 --- a/train_enc.py +++ b/train_enc.py @@ -148,7 +148,7 @@ def train(epoch, best_val_accuracy): correct = pred.eq(target.data.view_as(pred)).cpu().sum() acc = correct / pred.size(0) - loss_train.append(loss.data[0]) + loss_train.append(loss.item()) acc_train.append(acc) model.eval() @@ -169,7 +169,7 @@ def train(epoch, best_val_accuracy): correct = pred.eq(target.data.view_as(pred)).cpu().sum() acc = correct / pred.size(0) - loss_val.append(loss.data[0]) + loss_val.append(loss.item()) acc_val.append(acc) print('Epoch: {:04d}'.format(epoch), @@ -217,7 +217,7 @@ def test(): correct = pred.eq(target.data.view_as(pred)).cpu().sum() acc = correct / pred.size(0) - loss_test.append(loss.data[0]) + loss_test.append(loss.item()) acc_test.append(acc) print('--------------------------------') print('--------Testing-----------------')