-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathattractor_analysis.py
78 lines (47 loc) · 1.65 KB
/
attractor_analysis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
"""
For figuring out whether or not neural networks possess attractors, and where they are.
"""
import math
import torch
import numpy as np
from src.neural_nets.models import get_model
my_dict = {'architecture': "TANH",
'readout': "linear",
'input_size': 88,
'hidden_size': 120,
'num_layers': 1,
'output_size': 88,
'gradient_clipping': 1
}
def generate_trajectories(model_dict: dict, sd_path: str, filename: str):
model = get_model(model_dict, {'init': "default"}, False)
sd = torch.load(sd_path)
model.load_state_dict(sd)
initial_conditions = torch.randn((8192, 1, 88))
last_tensor = initial_conditions
result_tensor = torch.zeros((8192, 257, 88))
result_tensor[:, 0, :] = initial_conditions[:, 0, :]
for t in range(256):
output, hiddens = model(last_tensor)
last_tensor = 2*output
result_tensor[:, t + 1, :] = output[:, 0, :]
result = result_tensor.detach().numpy()
np.save(filename, result)
def get_variances(array):
result = []
for current in array:
mu = np.mean(current)
shifted = current - mu
sum = 0
for v in shifted:
sum += np.linalg.norm(v)**2
variance = math.sqrt(sum)/(len(current) - 1)
result.append(variance)
return result
def get_variance_over_window(filename: str, window: int, mindex: int, maxdex: int):
array = np.load(filename)
variances = []
for i in range(0, 257, window):
current = array[mindex : maxdex, i : i + window]
variances.append(get_variances(current))
return np.array(variances)