-
Notifications
You must be signed in to change notification settings - Fork 164
/
face_mask_detection.py
88 lines (72 loc) · 2.88 KB
/
face_mask_detection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
# Import necessary libraries
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2
from tensorflow.keras.layers import AveragePooling2D, Flatten, Dense, Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
# Set some constants
INIT_LR = 1e-4
EPOCHS = 20
BS = 32
# Load and preprocess the dataset
data = []
labels = []
# Load your dataset of "with_mask" and "without_mask" images
# Add the images and corresponding labels to the data and labels lists
# Convert the data and labels to NumPy arrays
data = np.array(data, dtype="float32")
labels = np.array(labels)
# Perform one-hot encoding on the labels
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
# Split the data into training and testing sets
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.20, stratify=labels, random_state=42)
# Data augmentation for training images
aug = ImageDataGenerator(
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest"
)
# Load MobileNetV2 model and customize it
baseModel = MobileNetV2(weights="imagenet", include_top=False, input_tensor=Input(shape=(224, 224, 3)))
# Construct the head of the model
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(128, activation="relu")(headModel)
headModel = Dense(2, activation="softmax")(headModel)
# Combine the base model and head model
model = Model(inputs=baseModel.input, outputs=headModel)
# Freeze layers in the base model
for layer in baseModel.layers:
layer.trainable = False
# Compile the model
opt = Adam(learning_rate=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
# Train the model
H = model.fit(
aug.flow(trainX, trainY, batch_size=BS),
steps_per_epoch=len(trainX) // BS,
validation_data=(testX, testY),
validation_steps=len(testX) // BS,
epochs=EPOCHS
)
# Make predictions on the test set
predIdxs = model.predict(testX, batch_size=BS)
# For each image in the test set, find the index of the label with the largest predicted probability
predIdxs = np.argmax(predIdxs, axis=1)
# Show a classification report
print(classification_report(testY.argmax(axis=1), predIdxs, target_names=lb.classes_))