-
Notifications
You must be signed in to change notification settings - Fork 54
/
gradient_descent.py
108 lines (84 loc) · 3.68 KB
/
gradient_descent.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
import numpy as np
import argparse
from os import sys
def sigmoid_activation(x):
# compute the sigmoid activation value for a given input
return 1.0 / (1 + np.exp(-x))
def predict(X, W):
# take the dot product between our features and weight matrix
preds = sigmoid_activation(X.dot(W))
# apply a step function to threshold the outputs
# to binary class labels
preds[preds <= 0.5] = 0
preds[preds > 0] = 1
return preds
def next_batch(X, y, batch_size):
# loop over dataset 'X' in mini-batches yielding a tuple
# of the current batched data and labels.
for i in np.arange(0, X.shape[0], batch_size):
yield (X[i:i + batch_size], y[i:i + batch_size])
ap = argparse.ArgumentParser()
ap.add_argument("-e", "--epochs", type=float, default=100, help="# of epochs")
ap.add_argument("-a", "--alpha", type=float, default=0.01, help="learning rate")
ap.add_argument("-b", "--batch-size", type=int, default=32, help="size of SGD mini-batches")
args = vars(ap.parse_args())
# generate a 2-class classification problem with 1,000 data points
# where each data point is a 2d feature vector.
(X, y) = make_blobs(n_samples=1000, n_features=2, centers=2, cluster_std=1.5, random_state=1)
y = y.reshape((y.shape[0], 1))
# insert a column of 1's as the last entry in the feature matrix -- this little trick allows us to
# treat the bias as a trainable parameter within the weight matrix
X = np.c_[X, np.ones((X.shape[0]))]
# partition the data into training and testing splits using 50%
# of the data for training and the remaining 50% for testing
(trainX, testX, trainY, testY) = train_test_split(X, y, test_size=0.5, random_state=42)
# initialize weight matrix and list of losses
print("[INFO] training...")
W = np.random.randn(X.shape[1], 1)
losses = []
# loop over the desired number of ephocs
for epoch in np.arange(0, args["epochs"]):
# initialize the total loss for the epoch
epoch_loss = []
for (batchX, batchY) in next_batch(X, y, args["batch_size"]):
# take the dot product between our features "X" and the
# weight matrix "W", then pass this value through our signoid activation function
# thereby giving us our predictions on the dataset
preds = sigmoid_activation(batchX.dot(W))
# now that we have our predictions, we need to determine the "error" which is the difference
# between our predictions and true values.
error = preds - batchY
epoch_loss.append(np.sum(error ** 2))
# the gradient descent update is the dot product between
# our features and the error of the predictions
gradient = batchX.T.dot(error)
# in the update stage, all we need to do is "nudge" at the weight matrix
# in the negative direction of the gradient (hence the term "gradient descent")
# by taking a small step towards a set of "more optimal" parameters
W += -args["alpha"] * gradient
# update our loss history by taking the average loss across all batches
loss = np.average(epoch_loss)
losses.append(loss)
# a check to see if an update should be displayed
if epoch == 0 or (epoch + 1) % 5 == 0:
print("[INFO] epoch={}, loss={:.7f}".format(int(epoch + 1), loss))
print("[INFO] evaluating...")
preds = predict(testX, W)
print(classification_report(testY, preds))
# plot the (testing) classification data
plt.style.use("ggplot")
plt.figure()
plt.title("Data")
plt.scatter(testX[:, 0], testX[:, 1], c=testY[:,0], marker="o", s=30)
#construct a figure that plots the loss over time
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, args["epochs"]), losses)
plt.title("Training Loss")
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.show()