forked from titu1994/Keras-ResNeXt
-
Notifications
You must be signed in to change notification settings - Fork 0
/
cifar10.py
90 lines (66 loc) · 2.63 KB
/
cifar10.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
from __future__ import print_function
from __future__ import division
import os
import numpy as np
import sklearn.metrics as metrics
from keras import backend as K
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras.datasets import cifar10
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from resnext import ResNeXt
batch_size = 100
nb_classes = 10
nb_epoch = 100
img_rows, img_cols = 32, 32
img_channels = 3
img_dim = (img_channels, img_rows, img_cols) if K.image_dim_ordering() == "th" else (img_rows, img_cols, img_channels)
depth = 29
cardinality = 8
width = 16
model = ResNeXt(img_dim, depth=depth, cardinality=cardinality, width=width, weights=None, classes=nb_classes)
print("Model created")
model.summary()
optimizer = Adam(lr=1e-3) # Using Adam instead of SGD to speed up training
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=["accuracy"])
print("Finished compiling")
print("Building model...")
(trainX, trainY), (testX, testY) = cifar10.load_data()
trainX = trainX.astype('float32')
testX = testX.astype('float32')
trainX /= 255.
testX /= 255.
Y_train = np_utils.to_categorical(trainY, nb_classes)
Y_test = np_utils.to_categorical(testY, nb_classes)
generator = ImageDataGenerator(rotation_range=15,
width_shift_range=5./32,
height_shift_range=5./32,
horizontal_flip=True)
generator.fit(trainX, seed=0)
out_dir = "weights/"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Load model
weights_file = "weights/ResNext-8-64d.h5"
if os.path.exists(weights_file):
model.load_weights(weights_file)
print("Model loaded.")
lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=np.sqrt(0.1),
cooldown=0, patience=10, min_lr=1e-6)
model_checkpoint = ModelCheckpoint(weights_file, monitor="val_acc", save_best_only=True,
save_weights_only=True, mode='auto')
callbacks = [lr_reducer, model_checkpoint]
model.fit_generator(generator.flow(trainX, Y_train, batch_size=batch_size),
steps_per_epoch=len(trainX) // batch_size,
epochs=nb_epoch,
callbacks=callbacks,
validation_data=(testX, Y_test),
validation_steps=testX.shape[0] // batch_size, verbose=1)
yPreds = model.predict(testX)
yPred = np.argmax(yPreds, axis=1)
yTrue = testY
accuracy = metrics.accuracy_score(yTrue, yPred) * 100
error = 100 - accuracy
print("Accuracy : ", accuracy)
print("Error : ", error)