-
Notifications
You must be signed in to change notification settings - Fork 41
/
05.cifar10_cnn.py
85 lines (82 loc) · 3.27 KB
/
05.cifar10_cnn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
batch_size = 32
nb_classes = 10
nb_epoch = 20
kernel_size = (3, 3)
data_augmentation = True
# input image dimensions
img_rows, img_cols = 32, 32
# the CIFAR10 images are RGB
img_channels = 3
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
#可将数据归一化
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(32, kernel_size, padding='same',
input_shape=X_train.shape[1:]))
model.add(Activation('relu'))
model.add(Convolution2D(32, kernel_size))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64,kernel_size, padding='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, kernel_size))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.summary()
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
if not data_augmentation:
print('Not using data augmentation.')
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# 这将做预处理和实时数据增加
datagen = ImageDataGenerator(
featurewise_center=False, # 在数据集上将输入平均值设置为0
samplewise_center=False, # 将每个样本均值设置为0
featurewise_std_normalization=False, # 将输入除以数据集的std
samplewise_std_normalization=False, # 将每个输入除以其std
zca_whitening=False, # 应用ZCA白化
rotation_range=0, # 在一个范围下随机旋转图像(degrees, 0 to 180)
width_shift_range=0.1, # 水平随机移位图像(总宽度的分数)
height_shift_range=0.1, # 随机地垂直移动图像(总高度的分数)
horizontal_flip=True, # 随机翻转图像
vertical_flip=False) # 随机翻转图像
# 计算特征方向归一化所需的数量
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)
# fit the model on the batches generated by datagen.flow()
model.fit_generator(datagen.flow(X_train, Y_train,
batch_size=batch_size),
samples_per_epoch=X_train.shape[0],
epochs=nb_epoch,
validation_data=(X_test, Y_test))