-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmonitoring.py
129 lines (90 loc) · 3.28 KB
/
monitoring.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import os
import numpy as np
import random
import torch
import shutil
import models
import datetime
import pandas as pd
class TranscriptomeGenerator(object):
"""creating a genome with genes and reads and kmers"""
def __init__(self, arg):
super(ClassName, self).__init__()
self.arg = arg
def create_experiment_folder(opt):
params = vars(opt).copy()
params = str(params)
# create a experiment folder
this_hash = random.getrandbits(128)
this_hash = "%032x" % this_hash # in hex
exp_dir = os.path.join(opt.save_dir, this_hash)
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
f = open(os.path.join(exp_dir,'run_parameters'), 'w')
f.write(params+'\n')
f.close()
print (vars(opt))
print (f"Saving the everything in {exp_dir}")
with open(os.path.join(opt.save_dir, 'experiment_table.txt'), 'a') as f:
f.write('time: {} folder: {} experiment: {}\n'.format(datetime.datetime.now(), this_hash, params))
return exp_dir
def plot_sample_embs():
pass
plt.figure()
plt.title(title)
plt.xlabel('emb1')
plt.ylabel('emb2')
plt.legend()
img_path = os.path.join(output, f'{fname}.png')
plt.savefig(img_path)
def save_checkpoint(model, optimizer, epoch, opt, exp_dir, filename='checkpoint.pth.tar'):
state = {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'opt' : opt
}
filename = os.path.join(exp_dir, filename)
torch.save(state, filename)
def load_checkpoint(load_folder, opt, input_size, filename='checkpoint.pth.tar'):
# Model
model_state = None
# Epoch
epoch = 0
# Optimizser
optimizer_state = None
# Options
new_opt = opt
# Load the states if we saved them.
if opt.load_folder:
# Loading all the state
filename = os.path.join(load_folder, filename)
if os.path.isfile(filename):
print (f"=> loading checkpoint '{filename}'")
checkpoint = torch.load(filename)
start_epoch = checkpoint['epoch']
# Loading the options
new_opt = checkpoint['opt']
print(f"Loading the model with these parameters: {new_opt}")
# Loading the state
model_state = checkpoint['state_dict']
optimizer_state = checkpoint['optimizer']
epoch = checkpoint['epoch']
# We override some of the options between the runs, otherwise it might be a pain.
new_opt.epoch = opt.epoch
print(f"=> loaded checkpoint '{filename}' (epoch {epoch})")
else:
print(f"=> no checkpoint found at '{filename}'")
# Get the network
my_model = models.get_model(new_opt, input_size, model_state)
### Moving the model to GPU if it was on the GPU according to the opts.
if not opt.cpu:
print ("Putting the model on gpu...")
my_model.cuda(opt.gpu_selection)
# Get the optimizer
optimizer = torch.optim.RMSprop(my_model.parameters(), lr=new_opt.lr, weight_decay=new_opt.weight_decay)
if optimizer_state is not None:
optimizer.load_state_dict(optimizer_state)
print ("Our model:")
print (my_model)
return my_model, optimizer, epoch, new_opt