-
Notifications
You must be signed in to change notification settings - Fork 0
/
ops.py
104 lines (92 loc) · 3.32 KB
/
ops.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import tensorflow as tf
# weight and bais wrappers
def weight_variable(shape):
"""
Create a weight variable with appropriate initialization
:param name: weight name
:param shape: weight shape
:return: initialized weight variable
"""
initer = tf.truncated_normal_initializer(stddev=0.01)
return tf.get_variable('W',
dtype=tf.float32,
shape=shape,
initializer=initer)
def bias_variable(shape):
"""
Create a bias variable with appropriate initialization
:param name: bias variable name
:param shape: bias variable shape
:return: initialized bias variable
"""
initial = tf.constant(0., shape=shape, dtype=tf.float32)
return tf.get_variable('b',
dtype=tf.float32,
initializer=initial)
def fc_layer(x, num_units, name, use_relu=True):
"""
Create a fully-connected layer
:param x: input from previous layer
:param num_units: number of hidden units in the fully-connected layer
:param name: layer name
:param use_relu: boolean to add ReLU non-linearity (or not)
:return: The output array
"""
with tf.variable_scope(name):
in_dim = x.get_shape()[1]
W = weight_variable(shape=[in_dim, num_units])
tf.summary.histogram('weight', W)
b = bias_variable(shape=[num_units])
tf.summary.histogram('bias', b)
layer = tf.matmul(x, W)
layer += b
if use_relu:
layer = tf.nn.relu(layer)
return layer
def conv_layer(x, filter_size, num_filters, stride, name):
"""
Create a 2D convolution layer
:param x: input from previous layer
:param filter_size: size of each filter
:param num_filters: number of filters (or output feature maps)
:param stride: filter stride
:param name: layer name
:return: The output array
"""
with tf.variable_scope(name):
num_in_channel = x.get_shape().as_list()[-1]
shape = [filter_size, filter_size, num_in_channel, num_filters]
W = weight_variable(shape=shape)
tf.summary.histogram('weight', W)
b = bias_variable(shape=[num_filters])
tf.summary.histogram('bias', b)
layer = tf.nn.conv2d(x, W,
strides=[1, stride, stride, 1],
padding="SAME")
layer += b
return tf.nn.relu(layer)
def flatten_layer(layer):
"""
Flattens the output of the convolutional layer to be fed into fully-connected layer
:param layer: input array
:return: flattened array
"""
with tf.variable_scope('Flatten_layer'):
layer_shape = layer.get_shape()
num_features = layer_shape[1:4].num_elements()
layer_flat = tf.reshape(layer, [-1, num_features])
return layer_flat
def max_pool(x, ksize, stride, name):
"""
Create a max pooling layer
:param x: input to max-pooling layer
:param ksize: size of the max-pooling filter
:param stride: stride of the max-pooling filter
:param name: layer name
:return: The output array
"""
return tf.nn.max_pool(x,
ksize=[1, ksize, ksize, 1],
strides=[1, stride, stride, 1],
padding="SAME",
name=name)