-
Notifications
You must be signed in to change notification settings - Fork 0
/
backprop.py
272 lines (188 loc) · 7.66 KB
/
backprop.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
import numpy as np
# from tqdm._tqdm_notebook import tqdm_notebook as tqdm
# tqdm().pandas()
class Layer():
def __init__(self, model, f, d_f, input_dims = None, output_dims = None, input_layer=False, output_layer=False, learning_rate=0.001):
self.model = model
self.input_dims = input_dims
self.output_dims = output_dims
self.learning_rate = learning_rate
# Parameters
self.a = None
self.z = None
self.W = None
self.b = None
self.dW = None
self.db = None
self.da = None
self.dz = None
self.input_layer = input_layer
self.output_layer = output_layer
# Activation Functions
self.f = f
self.d_f = d_f
# Adjacent layers set during backpropagation
self.next_layer = None
self.prev_layer = None
def random_init(self):
# Kaiming Weight Initialization
self.W = np.random.randn(self.output_dims, self.input_dims)*np.sqrt(2)/np.sqrt(self.input_dims)
# Xavier Weight Initialization
# B = np.sqrt(6)/np.sqrt(self.input_dims + self.output_dims)
# self.W = np.random.uniform(low=-B , high=B ,size=(self.output_dims, self.input_dims))
self.b = np.zeros(shape=(self.output_dims, 1))
def get_prev_a(self):
if self.input_layer:
return self.model.data
return self.prev_layer.a
def forward_pass(self):
prev_a = self.get_prev_a()
self.z = self.W.dot(prev_a) + self.b
self.a = self.f(self.z)
def backpropagate(self):
prev_a = self.get_prev_a()
if self.output_layer:
delta = self.model.calc_d_J(self.a)
else:
delta = self.next_layer.da
m = prev_a.shape[1]
self.dz = delta * self.d_f(self.z)
self.dW = self.dz.dot(prev_a.T)/m
self.db = np.sum(self.dz, axis=1, keepdims=True)
self.da = self.W.T.dot(self.dz)
def learn(self):
self.W = self.W - self.learning_rate * self.dW
self.b = self.b - self.learning_rate * self.db
class NeuralNetwork():
def __init__(self, architecture, input_size, cost_function, train_data=None, train_labels=None, learning_rate=0.001):
self.learning_rate = learning_rate
self.architecture = architecture
self.cost_function = cost_function
# Create Layers
self.layers = self.create_layers(architecture, input_size)
# Data
self.data = train_data
self.labels = train_labels
# Cost Function
self.J, self.d_J = cost_functions[cost_function]
def calc_J(self, y_hat):
return self.J(self.labels, y_hat)
def calc_d_J(self, y_hat):
return self.d_J(self.labels, y_hat)
def calc_accuracy(self, test_data, test_labels, error_func="MSE"):
self.data = test_data
self.labels = test_labels
# Forward Pass and get output
self.forward_pass()
y_hat = self.layers[-1].a
if error_func == "MSE":
# return np.sqrt(np.sum(y_hat-self.labels))/self.labels.shape[1]
return np.sum((self.labels - y_hat)**2 ).squeeze() / (y_hat.shape[1]*2)
elif error_func == "MAE":
return np.sum(np.abs(y_hat-self.labels))/self.labels.shape[1]
elif error_func == "RMSE":
return np.sqrt(np.sum((self.labels - y_hat)**2 ).squeeze() / (y_hat.shape[1]*2))
else:
y_pred = np.where(y_hat > 0.5, 1, 0)
return (y_pred == self.labels).mean()
def create_layers(self, architecture, input_size):
layers = []
for i, config in enumerate(architecture):
input_dims = input_size if i == 0 else layers[-1].output_dims
output_dims = config["num_nodes"]
f, d_f = activation_functions[config["activation"]]
layer = Layer(self, f, d_f, input_dims, output_dims, input_layer=(i==0), output_layer=(i==len(architecture)-1), learning_rate=self.learning_rate)
if i != 0:
layers[-1].next_layer = layer
layer.prev_layer = layers[-1]
layers.append(layer)
for layer in layers:
layer.random_init()
return layers
def add_data(self, train_data, train_labels):
self.data = train_data
self.labels = train_labels
def forward_pass(self):
for layer in self.layers:
layer.forward_pass()
def backward_pass(self):
for layer in reversed(self.layers):
layer.backpropagate()
def learn(self):
for layer in self.layers:
layer.learn()
def train(self, epochs):
history = []
for i in range(epochs):
self.forward_pass()
cost = self.calc_J(self.layers[-1].a)
history.append(cost)
# if i % 50 == 0:
# print ("Cost after iteration %i: %f" %(i, cost))
self.backward_pass()
self.learn()
# Training done. Return history
return history
# COST FUNCTIONS
def cross_entropy_sigmoid(y, y_hat):
m = y.shape[1]
cost = (1./m) * (-np.dot(y,np.log(y_hat).T) - np.dot(1-y, np.log(1-y_hat).T))
cost = np.squeeze(cost)
return cost
def cross_entropy_sigmoid_derivative(y, y_hat):
m = y.shape[1]
return (-(np.divide(y, y_hat) - np.divide(1 - y, 1 - y_hat)))
def mean_squared(y, y_hat):
return np.sum((y - y_hat)**2 ).squeeze() / (y_hat.shape[1]*2)
def d_mean_squared(y, y_hat):
return (y_hat - y)
cost_functions = {"cross_entropy_sigmoid" : (cross_entropy_sigmoid, cross_entropy_sigmoid_derivative),
"mean_squared" : (mean_squared, d_mean_squared)
}
# ACTIVATION FUNCTIONS
import numpy as np
def sigmoid(x):
s = 1/(1+np.exp(-x))
return s
def d_sigmoid(x):
s = sigmoid(x)
return s*(1-s)
def relu(x):
return np.maximum(0,x)
def d_relu(x):
r = np.where(x > 0, 1, 0)
return r
def tanh(x):
return np.tanh(x)
def d_tanh(x):
d = tanh(x)
return 1 - d*d
activation_functions = {"sigmoid" : (sigmoid, d_sigmoid) , "relu" : (relu, d_relu), "tanh" : (tanh, d_tanh)}
"""# Application on Cancer Dataset"""
# import matplotlib.pyplot as plt
# from sklearn.model_selection import train_test_split
# from sklearn.datasets import load_breast_cancer
# X, y = load_breast_cancer(return_X_y=True)
# y = y.reshape((len(y), 1))
# # Split Data
# train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.2)
# train_X = train_X.T
# test_X = test_X.T
# # Normalize
# mean = np.mean(train_X, axis = 1, keepdims=True)
# std_dev = np.std(train_X, axis = 1, keepdims=True)
# train_X = (train_X - mean)/std_dev
# test_X = (test_X - mean)/std_dev
# train_y = train_y.T
# test_y = test_y.T
# train_X.shape, train_y.shape, test_X.shape, test_y.shape
# description = [{"num_nodes" : 100, "activation" : "relu"},
# {"num_nodes" : 50, "activation" : "relu"},
# {"num_nodes" : 1, "activation" : "sigmoid"}]
# model = NeuralNetwork(description,30,"cross_entropy_sigmoid", train_X, train_y, learning_rate=0.001)
# history = model.train(1000)
# plt.plot(history)
# acc = model.calc_accuracy(train_X, train_y)
# print("Accuracy of the model on the training set is = {}".format(acc))
# acc = model.calc_accuracy(test_X, test_y)
# print("Accuracy of the model on the test set is = {}".format(acc))