-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathCNN for mod.py
109 lines (98 loc) · 3.81 KB
/
CNN for mod.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
# importing the necessary packages
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import sys
sys.path.append('../modulation')
import numpy as np
import cPickle, keras
import keras.models as models
import keras.optimizers
import matplotlib.pyplot as plt
from keras.layers.core import Reshape, Dropout, Flatten, Dense, Activation
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
# load the RadioMl.10A data
Xd = cPickle.load(open("RML2016.10a_dict.dat", 'rb'))
snrs, mods = map(lambda j: sorted(list(set(map(lambda x: x[j], Xd.keys())))), [1, 0])
X = []
lbl = []
for mod in mods:
for snr in snrs:
X.append(Xd[(mod, snr)])
for i in range(Xd[(mod, snr)].shape[0]):
lbl.append((mod, snr))
X = np.vstack(X)
# Split the data into train, Validation and test data
# initialize randomly the seed
np.random.seed(1567)
n_example = X.shape[0]
# we choose 70 % of input data for training
n_train = n_example * 0.8
# we randomly choose the input training signals
train_idx = np.random.choice(range(0, n_example), size=int(n_train), replace=False)
# the rest will be used fo test
test_idx = list(set(range(0, n_example)) - set(train_idx))
X_train = X[train_idx]
X_test = X[test_idx]
# Function for Hot Encoding the vectors
def to_onehot(vec):
vec_hot = np.zeros([len(vec), max(vec) + 1])
vec_hot[np.arange(len(vec)), vec] = 1
return vec_hot
Y_train = to_onehot(map(lambda x: mods.index(lbl[x][0]), train_idx))
Y_test = to_onehot(map(lambda x: mods.index(lbl[x][0]), test_idx))
in_shp = list(X_train.shape[1:])
print(X_train.shape, in_shp)
# types of modulation
classes = mods
# For the architecture of CNN, it consists of 4 conv layer
# first conv layer of 256 filters of size 1*3
# second conv layer of 256 filters of size 2*3
# third and fourth conv layer of 80 filters of size 1*3
# first dense layer of 128 units
# second one for output containing number of output classes
# building the model
dr = 0.5 # dropout rate 50 %
model = models.Sequential()
model.add(Reshape([1]+in_shp, input_shape=in_shp))
model.add(ZeroPadding2D((0, 2)))
model.add(Convolution2D(256, (1, 3), border_mode='valid', activation="relu", name="conv1", init='glorot_uniform'))
model.add(Dropout(dr))
model.add(ZeroPadding2D((0, 2)))
model.add(Convolution2D(256, (1, 3), border_mode='valid', activation="relu", name="conv3", init='glorot_uniform'))
model.add(Dropout(dr))
model.add(ZeroPadding2D((0, 2)))
model.add(Convolution2D(80, (1, 3), border_mode='valid', activation="relu", name="conv4", init='glorot_uniform'))
model.add(Dropout(dr))
model.add(Flatten())
model.add(Dense(128, activation='relu', init='he_normal', name="dense1"))
model.add(Dropout(dr))
model.add(Dense(len(classes), init='he_normal', name="dense2"))
model.add(Activation('softmax'))
model.add(Reshape([len(classes)]))
myadam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.99, epsilon=1e-08)
model.compile(loss='categorical_crossentropy', optimizer=myadam, metrics=['accuracy'])
model.summary()
# Set up some params
nb_epoch = 100 # number of epochs to train on
batch_size = 1024 # training batch size
# perform training ...
# - call the main training loop in keras for our network+dataset
filepath = 'CNN.wts.h5'
history = model.fit(X_train,
Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
verbose=2,
validation_data=(X_test, Y_test))
# we re-load the best weights once training is finished
model.load_weights(filepath)
# evaluate the performance of the neural network
score = model.evaluate(X_test, Y_test, verbose=0, batch_size=batch_size)
print(score)
# Show loss curves
plt.figure()
plt.title('Training performance')
plt.plot(history.epoch, history.history['loss'], label='train loss+error')
plt.plot(history.epoch, history.history['val_loss'], label='val_error')
plt.legend()
plt.savefig('%s Training performance' %('CNN'))