forked from hunkim/DeepLearningZeroToAll
-
Notifications
You must be signed in to change notification settings - Fork 1
/
lab-11-4-mnist_cnn_layers.py
120 lines (92 loc) · 4.68 KB
/
lab-11-4-mnist_cnn_layers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
# Lab 11 MNIST and Deep learning CNN
import tensorflow as tf
# import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
tf.set_random_seed(777) # reproducibility
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Check out https://www.tensorflow.org/get_started/mnist/beginners for
# more information about the mnist dataset
# hyper parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
class Model:
def __init__(self, sess, name):
self.sess = sess
self.name = name
self._build_net()
def _build_net(self):
with tf.variable_scope(self.name):
# dropout (keep_prob) rate 0.7~0.5 on training, but should be 1
# for testing
self.training = tf.placeholder(tf.bool)
# input place holders
self.X = tf.placeholder(tf.float32, [None, 784])
# img 28x28x1 (black/white), Input Layer
X_img = tf.reshape(self.X, [-1, 28, 28, 1])
self.Y = tf.placeholder(tf.float32, [None, 10])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(inputs=X_img, filters=32, kernel_size=[3, 3],
padding="SAME", activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2],
padding="SAME", strides=2)
dropout1 = tf.layers.dropout(inputs=pool1,
rate=0.7, training=self.training)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(inputs=dropout1, filters=64, kernel_size=[3, 3],
padding="SAME", activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2],
padding="SAME", strides=2)
dropout2 = tf.layers.dropout(inputs=pool2,
rate=0.7, training=self.training)
# Convolutional Layer #2 and Pooling Layer #2
conv3 = tf.layers.conv2d(inputs=dropout2, filters=128, kernel_size=[3, 3],
padding="same", activation=tf.nn.relu)
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2],
padding="same", strides=2)
dropout3 = tf.layers.dropout(inputs=pool3,
rate=0.7, training=self.training)
# Dense Layer with Relu
flat = tf.reshape(dropout3, [-1, 128 * 4 * 4])
dense4 = tf.layers.dense(inputs=flat,
units=625, activation=tf.nn.relu)
dropout4 = tf.layers.dropout(inputs=dense4,
rate=0.5, training=self.training)
# Logits (no activation) Layer: L5 Final FC 625 inputs -> 10 outputs
self.logits = tf.layers.dense(inputs=dropout4, units=10)
# define cost/loss & optimizer
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits, labels=self.Y))
self.optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(self.cost)
correct_prediction = tf.equal(
tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def predict(self, x_test, training=False):
return self.sess.run(self.logits,
feed_dict={self.X: x_test, self.training: training})
def get_accuracy(self, x_test, y_test, training=False):
return self.sess.run(self.accuracy,
feed_dict={self.X: x_test,
self.Y: y_test, self.training: training})
def train(self, x_data, y_data, training=True):
return self.sess.run([self.cost, self.optimizer], feed_dict={
self.X: x_data, self.Y: y_data, self.training: training})
# initialize
sess = tf.Session()
m1 = Model(sess, "m1")
sess.run(tf.global_variables_initializer())
print('Learning Started!')
# train my model
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
c, _ = m1.train(batch_xs, batch_ys)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
print('Learning Finished!')
# Test model and check accuracy
print('Accuracy:', m1.get_accuracy(mnist.test.images, mnist.test.labels))