-
Notifications
You must be signed in to change notification settings - Fork 21
/
Copy pathvisualize_outputs_keras_model1.py
103 lines (83 loc) · 2.87 KB
/
visualize_outputs_keras_model1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import keras.backend as K
import matplotlib.pyplot as plt
import numpy as np
from keras.layers import Activation, Conv2D, Dense, Dropout, Flatten, MaxPool2D
from keras.models import Sequential
model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3), padding='same', input_shape=(64, 64, 1)))
conv1out = Activation('relu')
model.add(conv1out)
maxpool1out = MaxPool2D()
model.add(maxpool1out)
model.add(Conv2D(128, kernel_size=(3, 3), padding='same'))
conv2out = Activation('relu')
model.add(conv2out)
model.add(MaxPool2D())
model.add(Conv2D(256, kernel_size=(3, 3), padding='same'))
conv3out = Activation('relu')
model.add(conv3out)
model.add(MaxPool2D())
model.add(Conv2D(512, kernel_size=(3, 3), padding='same'))
conv4out = Activation('relu')
model.add(conv4out)
model.add(MaxPool2D())
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.6))
model.add(Dense(3, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
model.load_weights('test.h5')
def get_image():
img_size = 64
min_rect_size = 5
max_rect_size = 50
img = np.zeros((img_size, img_size, 1))
num = np.random.choice(range(3))
if num == 0: # equal
xsize = np.random.randint(min_rect_size, max_rect_size)
ysize = xsize
print('Shape: {}'.format('equal'))
elif num == 1: # width
ysize = np.random.randint(max_rect_size / 2, max_rect_size)
ratio = np.random.choice([1.5, 2, 3])
xsize = int(ysize / ratio)
print('Shape: {}'.format('width'))
elif num == 2: # long
xsize = np.random.randint(max_rect_size / 2, max_rect_size)
ratio = np.random.choice([1.5, 2, 3])
ysize = int(xsize / ratio)
print('Shape: {}'.format('long'))
x = np.random.randint(0, img_size - xsize)
y = np.random.randint(0, img_size - ysize)
img[x:x + xsize, y:y + ysize, 0] = 1.
return img
img_to_visualize = get_image()
plt.imshow(img_to_visualize[..., 0])
plt.show()
img_to_visualize = np.expand_dims(img_to_visualize, 0)
def layer_to_visualize(layer):
inputs = [K.learning_phase()] + model.inputs
_convout1_f = K.function(inputs, [layer.output])
def convout1_f(X):
# The [0] is to disable the training phase flag
return _convout1_f([0] + [X])
convolutions = convout1_f(img_to_visualize)
convolutions = np.squeeze(convolutions)
print ('Shape of conv:', convolutions.shape)
num = convolutions.shape[2]
n = int(np.ceil(np.sqrt(num)))
# Visualization of each filter of the layer
fig = plt.figure()
for i in range(num):
ax = fig.add_subplot(n, n, i + 1)
ax.imshow(convolutions[..., i], cmap='gray')
plt.show()
fig.close()
# Specify the layer to want to visualize
layer_to_visualize(conv1out)
# layer_to_visualize(conv2out)
# layer_to_visualize(conv3out)
# layer_to_visualize(conv4out)