-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
127 lines (99 loc) · 4.41 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import torch
from sklearn.model_selection import KFold
from base import BaseGP, CosineKernel, enforce_type
from pp import PointProcessGPFA
from iomm import OrthogonalMixingModel
from torch import optim
import pdb
# Example structure of a training loop within pytorch
def train_loop(y, model, loss):
# Training process
optimizer = optim.SGD(model.parameters(), lr=1e-3)
for i in range(10):
optimizer.zero_grad()
l = -1*loss(y)
# print('l:%f' % l)
# This takes the calculate value, and automatically calculates gradients with respect to parameters
l.backward(retain_graph=True)
# Optimizer will take the gradients, and then update parameters accordingly
optimizer.step()
# Calculate new loss given the parameter update
l1 = -1*loss(y).detach()
delta_loss = torch.abs(l1 - l)
print('delta_loss:%f' % delta_loss.detach().numpy())
parameters = [param for param in model.parameters()]
return parameters
# Cross-Validation of pp model
def Cross_Validation_pp(y):
marginal_log_likelihood = torch.tensor(0)
dimension = 0
# Check each possible latent dimension
for i in range(1, 5):
# Divid the data
sk = KFold(10)
sk = sk.split(y)
log_likelihood = torch.tensor(0.0)
for train, test in sk:
traindata = y[train]
testdata = y[test]
base_models = [BaseGP(CosineKernel) for j in range(i)]
PG0 = PointProcessGPFA(4, i, base_models)
parameters = train_loop(traindata, PG0, PG0.approximate_marginal_likelihood)
W = parameters[0]
sigma = []
mu = []
for t, _ in enumerate(parameters):
if t==0:
pass
elif t%2==1:
sigma.append(_)
elif t%2==0:
mu.append(_)
base_models = [BaseGP(CosineKernel, sigma=sigma[j], mu=mu[j]) for j in range(i)]
PG0 = PointProcessGPFA(4, 3, base_models, W)
likelihood = PG0.approximate_marginal_likelihood(testdata)
log_likelihood += likelihood
log_likelihood = torch.div(log_likelihood, 10)
if log_likelihood >= marginal_log_likelihood:
marginal_log_likelihood = log_likelihood
dimension = i
print('Final result is %f' % log_likelihood)
print('The current test dimension is %f' % i)
print('The most possible latent dimension is %f' % dimension)
def Cross_Validation_iomm(y):
marginal_log_likelihood = torch.tensor(0)
dimension = 0
# Check each possible latent dimension
for i in range(1, 5):
# Divid the data
sk = KFold(10)
sk = sk.split(y)
log_likelihood = torch.tensor(0.0)
for train, test in sk:
traindata = y[train]
testdata = y[test]
base_models = [BaseGP(CosineKernel) for j in range(i)]
PG0 = OrthogonalMixingModel(4, i, base_models)
parameters = train_loop(traindata, PG0, PG0.OrthogonalMixingLikelihood)
U = parameters[0]
logS = parameters[1]
sigma_sq = parameters[2]
logD = parameters[3]
sigma = []
mu = []
for t in range(4, len(parameters)):
if t%2==0:
sigma.append(parameters[t])
else:
mu.append(parameters[t])
base_models = [BaseGP(CosineKernel, sigma=sigma[j], mu=mu[j]) for j in range(i)]
PG0 = OrthogonalMixingModel(4, 3, base_models, U, logS, sigma_sq, logD)
likelihood = PG0.OrthogonalMixingLikelihood(testdata)
log_likelihood += likelihood
log_likelihood = torch.div(log_likelihood, 10)
if log_likelihood >= marginal_log_likelihood:
marginal_log_likelihood = log_likelihood
dimension = i
print('Final result is %f' % log_likelihood)
print('The current test dimension is %f' % i)
print('The most possible latent dimension is %f' % dimension)