-
Notifications
You must be signed in to change notification settings - Fork 1
/
add_new_person.py
executable file
·200 lines (180 loc) · 8.29 KB
/
add_new_person.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
import numpy as np
import argparse
import utils.facenet as facenet
import os
import math
import pickle
import csv
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
from operator import itemgetter
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
def main():
args = parse_args()
align_command = 'python FaceDetector.py ' + args.input_dir + args.align_dir + ' --image_size 182' + ' --margin 44'
os.system(align_command)
print("-------- Alignment Completed ----------")
with tf.Graph().as_default():
with tf.Session() as sess:
np.random.seed(666)
datadir = args.align_dir
embeddingdir = "data/new_person_embedding/"
modeldir = args.model_path
dataset = facenet.get_dataset(datadir)
paths, labels = facenet.get_image_paths_and_labels(dataset)
print(labels)
# # Create a list of class names
#class_names = [cls.name.replace('_', ' ') for cls in dataset]
#label_name = [class_names[i] for i in labels]
print('Number of classes: {}'.format(len(dataset)))
print('Number of images: {}'.format(len(paths)))
print('Loading feature extraction model')
facenet.load_model(modeldir)
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
embedding_size = embeddings.get_shape()[1]
print(embedding_size)
# Run forward pass to calculate embeddings
print('Calculating features for images')
batch_size = 200
image_size = 160
nrof_images = len(paths)
nrof_batches_per_epoch = int(math.ceil(1.0 * nrof_images / batch_size))
emb_array = np.zeros((nrof_images, embedding_size))
for i in range(nrof_batches_per_epoch):
print('{}/{}'.format(i,nrof_batches_per_epoch))
start_index = i * batch_size
end_index = min((i + 1) * batch_size, nrof_images)
paths_batch = paths[start_index:end_index]
images = facenet.load_data(paths_batch, False, False, image_size)
feed_dict = {images_placeholder: images, phase_train_placeholder: False}
emb_array[start_index:end_index, :] = sess.run(embeddings, feed_dict=feed_dict)
# store embedding and labels
np.savetxt(embeddingdir+'embedding.csv', emb_array, delimiter=",")
with open(embeddingdir+'label.csv', 'w') as f:
writer = csv.writer(f)
writer.writerows(zip(labels, paths))
# merge 2 embedding files
merge_embedding_files("data/embedding/", embeddingdir, "embedding.csv")
merge_label_files("data/embedding/", embeddingdir, "label.csv")
# re-train the classifier
start = time.time()
fname = "data/embedding/label.csv"
labels = pd.read_csv(fname, header=None).as_matrix()[:, 1]
labels = map(itemgetter(1),
map(os.path.split,
map(os.path.dirname, labels))) # Get the directory.
labels = list(labels)
fname = "data/embedding/embedding.csv"
embeddings = pd.read_csv(fname, header=None).as_matrix()
le = LabelEncoder().fit(labels)
class_names = list(le.classes_)
class_names = [ i.replace("_"," ") for i in class_names]
labelsNum = le.transform(labels)
print(class_names)
print(labelsNum)
classifier_filename_exp = os.path.expanduser(args.classifier_filename)
print('Start training classifier')
if(args.classifier == 'SVM'):
model = SVC(kernel='linear', probability=True)
elif (args.classifier=='KNN'):
model = KNeighborsClassifier(n_neighbors=1)
elif (args.classifier=='Softmax'):
model = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial')
elif (args.classifier=='LinearSVC'):
model = LinearSVC(random_state=0, tol=1e-5)
else:
model = RandomForestClassifier(n_estimators=600, max_depth=420, max_features='auto', n_jobs=-1)
model.fit(embeddings, labelsNum)
print("Re-train the classifier took {} seconds.".format(time.time() - start))
print('End training classifier')
print(le)
# saving classifier model
with open(classifier_filename_exp, 'wb') as outfile:
pickle.dump((model,class_names), outfile)
print('Saved classifier model to file "%s"' % classifier_filename_exp)
print('Goodluck')
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", type=str,
help='Path to the data directory containing new person images.',
default = "data/new_person" )
parser.add_argument("--align_dir", type=str,
help='Path to the data directory containing aligned of new person images.',
default ="data/aligned_new_person")
parser.add_argument('--model_path', type=str,
help='Path to embedding model',
default="model/20180402-114759.pb")
parser.add_argument('--classifier', type=str, choices=['KNN','SVM','RF','LinearSVC','Softmax'],
help='The type of classifier to use.',default='KNN')
parser.add_argument('classifier_filename',
help='Classifier model file name as a pickle (.pkl) file. ' +
'For training this is the output and for classification this is an input.')
args = parser.parse_args()
return args
def merge_embedding_files(old_dir, new_dir, file_name):
fout = open(old_dir + file_name, "a")
f = open(new_dir + file_name)
next(f) # skip the header
for line in f:
fout.write(line)
f.close() # not really needed
# function to create a map (label, person_name)
def create_map(old_dir, file_name):
fout = open(old_dir + file_name, "r")
f_map = open(old_dir + "maps.csv", "w")
labels = fout.readlines()
prev_label = ""
for line in labels:
curr_label = line.split(',')[0] # get the current label
if curr_label != prev_label:
person_name = line.split(',')[1].split('/')[-2] # get the person name
f_map.write(curr_label + ',' + person_name + '\n')
prev_label = curr_label
fout.close()
f_map.close()
def merge_label_files(old_dir, new_dir, file_name):
# check if there is already a map or not
if not os.path.isfile(old_dir + "maps.csv"):
create_map(old_dir, file_name)
# get the map from file
f_map = open(old_dir + "maps.csv", "r")
maps = f_map.readlines()
f_map.close()
fout = open(old_dir + file_name, "a")
f = open(new_dir + file_name)
next(f) # skip the header
prev_label = ""
save_label = ""
for line in f:
split_line = line.split(',')
if (split_line[0] == prev_label):
fout.write(str(save_label) + ',' + split_line[1])
continue
person_name = split_line[1].split('/')[-2]
label = [s for s in maps if person_name in s]
if not label: # this is new person
label = int(maps[-1].split(',')[0]) + 1
maps.append(str(label) + ',' + person_name + '\n')
else:
label = label[0].split(',')[0]
fout.write(str(label) + ',' + split_line[1])
save_label = label
f.close() # not really needed
# write back the map to file
f_map = open(old_dir + "maps.csv", "w")
f_map.writelines(maps)
f_map.close()
if __name__ == '__main__':
main()