Skip to content

Commit be99512

Browse files
committed
Course 4 Week 4
last week Neural Style Transfer Face Recognition
1 parent 4198432 commit be99512

17 files changed

+3926
-0
lines changed
Binary file not shown.

Course 4/Week 4/Face Recognition/Face+Recognition+for+the+Happy+House+-+v3.ipynb

Lines changed: 779 additions & 0 deletions
Large diffs are not rendered by default.

Course 4/Week 4/Face Recognition/Face+Recognition+for+the+Happy+House+-+v3.py

Lines changed: 476 additions & 0 deletions
Large diffs are not rendered by default.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Lines changed: 197 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,197 @@
1+
#### PART OF THIS CODE IS USING CODE FROM VICTOR SY WANG: https://github.com/iwantooxxoox/Keras-OpenFace/blob/master/utils.py ####
2+
3+
import tensorflow as tf
4+
import numpy as np
5+
import os
6+
import cv2
7+
from numpy import genfromtxt
8+
from keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate
9+
from keras.models import Model
10+
from keras.layers.normalization import BatchNormalization
11+
from keras.layers.pooling import MaxPooling2D, AveragePooling2D
12+
import h5py
13+
import matplotlib.pyplot as plt
14+
15+
16+
_FLOATX = 'float32'
17+
18+
def variable(value, dtype=_FLOATX, name=None):
19+
v = tf.Variable(np.asarray(value, dtype=dtype), name=name)
20+
_get_session().run(v.initializer)
21+
return v
22+
23+
def shape(x):
24+
return x.get_shape()
25+
26+
def square(x):
27+
return tf.square(x)
28+
29+
def zeros(shape, dtype=_FLOATX, name=None):
30+
return variable(np.zeros(shape), dtype, name)
31+
32+
def concatenate(tensors, axis=-1):
33+
if axis < 0:
34+
axis = axis % len(tensors[0].get_shape())
35+
return tf.concat(axis, tensors)
36+
37+
def LRN2D(x):
38+
return tf.nn.lrn(x, alpha=1e-4, beta=0.75)
39+
40+
def conv2d_bn(x,
41+
layer=None,
42+
cv1_out=None,
43+
cv1_filter=(1, 1),
44+
cv1_strides=(1, 1),
45+
cv2_out=None,
46+
cv2_filter=(3, 3),
47+
cv2_strides=(1, 1),
48+
padding=None):
49+
num = '' if cv2_out == None else '1'
50+
tensor = Conv2D(cv1_out, cv1_filter, strides=cv1_strides, data_format='channels_first', name=layer+'_conv'+num)(x)
51+
tensor = BatchNormalization(axis=1, epsilon=0.00001, name=layer+'_bn'+num)(tensor)
52+
tensor = Activation('relu')(tensor)
53+
if padding == None:
54+
return tensor
55+
tensor = ZeroPadding2D(padding=padding, data_format='channels_first')(tensor)
56+
if cv2_out == None:
57+
return tensor
58+
tensor = Conv2D(cv2_out, cv2_filter, strides=cv2_strides, data_format='channels_first', name=layer+'_conv'+'2')(tensor)
59+
tensor = BatchNormalization(axis=1, epsilon=0.00001, name=layer+'_bn'+'2')(tensor)
60+
tensor = Activation('relu')(tensor)
61+
return tensor
62+
63+
WEIGHTS = [
64+
'conv1', 'bn1', 'conv2', 'bn2', 'conv3', 'bn3',
65+
'inception_3a_1x1_conv', 'inception_3a_1x1_bn',
66+
'inception_3a_pool_conv', 'inception_3a_pool_bn',
67+
'inception_3a_5x5_conv1', 'inception_3a_5x5_conv2', 'inception_3a_5x5_bn1', 'inception_3a_5x5_bn2',
68+
'inception_3a_3x3_conv1', 'inception_3a_3x3_conv2', 'inception_3a_3x3_bn1', 'inception_3a_3x3_bn2',
69+
'inception_3b_3x3_conv1', 'inception_3b_3x3_conv2', 'inception_3b_3x3_bn1', 'inception_3b_3x3_bn2',
70+
'inception_3b_5x5_conv1', 'inception_3b_5x5_conv2', 'inception_3b_5x5_bn1', 'inception_3b_5x5_bn2',
71+
'inception_3b_pool_conv', 'inception_3b_pool_bn',
72+
'inception_3b_1x1_conv', 'inception_3b_1x1_bn',
73+
'inception_3c_3x3_conv1', 'inception_3c_3x3_conv2', 'inception_3c_3x3_bn1', 'inception_3c_3x3_bn2',
74+
'inception_3c_5x5_conv1', 'inception_3c_5x5_conv2', 'inception_3c_5x5_bn1', 'inception_3c_5x5_bn2',
75+
'inception_4a_3x3_conv1', 'inception_4a_3x3_conv2', 'inception_4a_3x3_bn1', 'inception_4a_3x3_bn2',
76+
'inception_4a_5x5_conv1', 'inception_4a_5x5_conv2', 'inception_4a_5x5_bn1', 'inception_4a_5x5_bn2',
77+
'inception_4a_pool_conv', 'inception_4a_pool_bn',
78+
'inception_4a_1x1_conv', 'inception_4a_1x1_bn',
79+
'inception_4e_3x3_conv1', 'inception_4e_3x3_conv2', 'inception_4e_3x3_bn1', 'inception_4e_3x3_bn2',
80+
'inception_4e_5x5_conv1', 'inception_4e_5x5_conv2', 'inception_4e_5x5_bn1', 'inception_4e_5x5_bn2',
81+
'inception_5a_3x3_conv1', 'inception_5a_3x3_conv2', 'inception_5a_3x3_bn1', 'inception_5a_3x3_bn2',
82+
'inception_5a_pool_conv', 'inception_5a_pool_bn',
83+
'inception_5a_1x1_conv', 'inception_5a_1x1_bn',
84+
'inception_5b_3x3_conv1', 'inception_5b_3x3_conv2', 'inception_5b_3x3_bn1', 'inception_5b_3x3_bn2',
85+
'inception_5b_pool_conv', 'inception_5b_pool_bn',
86+
'inception_5b_1x1_conv', 'inception_5b_1x1_bn',
87+
'dense_layer'
88+
]
89+
90+
conv_shape = {
91+
'conv1': [64, 3, 7, 7],
92+
'conv2': [64, 64, 1, 1],
93+
'conv3': [192, 64, 3, 3],
94+
'inception_3a_1x1_conv': [64, 192, 1, 1],
95+
'inception_3a_pool_conv': [32, 192, 1, 1],
96+
'inception_3a_5x5_conv1': [16, 192, 1, 1],
97+
'inception_3a_5x5_conv2': [32, 16, 5, 5],
98+
'inception_3a_3x3_conv1': [96, 192, 1, 1],
99+
'inception_3a_3x3_conv2': [128, 96, 3, 3],
100+
'inception_3b_3x3_conv1': [96, 256, 1, 1],
101+
'inception_3b_3x3_conv2': [128, 96, 3, 3],
102+
'inception_3b_5x5_conv1': [32, 256, 1, 1],
103+
'inception_3b_5x5_conv2': [64, 32, 5, 5],
104+
'inception_3b_pool_conv': [64, 256, 1, 1],
105+
'inception_3b_1x1_conv': [64, 256, 1, 1],
106+
'inception_3c_3x3_conv1': [128, 320, 1, 1],
107+
'inception_3c_3x3_conv2': [256, 128, 3, 3],
108+
'inception_3c_5x5_conv1': [32, 320, 1, 1],
109+
'inception_3c_5x5_conv2': [64, 32, 5, 5],
110+
'inception_4a_3x3_conv1': [96, 640, 1, 1],
111+
'inception_4a_3x3_conv2': [192, 96, 3, 3],
112+
'inception_4a_5x5_conv1': [32, 640, 1, 1,],
113+
'inception_4a_5x5_conv2': [64, 32, 5, 5],
114+
'inception_4a_pool_conv': [128, 640, 1, 1],
115+
'inception_4a_1x1_conv': [256, 640, 1, 1],
116+
'inception_4e_3x3_conv1': [160, 640, 1, 1],
117+
'inception_4e_3x3_conv2': [256, 160, 3, 3],
118+
'inception_4e_5x5_conv1': [64, 640, 1, 1],
119+
'inception_4e_5x5_conv2': [128, 64, 5, 5],
120+
'inception_5a_3x3_conv1': [96, 1024, 1, 1],
121+
'inception_5a_3x3_conv2': [384, 96, 3, 3],
122+
'inception_5a_pool_conv': [96, 1024, 1, 1],
123+
'inception_5a_1x1_conv': [256, 1024, 1, 1],
124+
'inception_5b_3x3_conv1': [96, 736, 1, 1],
125+
'inception_5b_3x3_conv2': [384, 96, 3, 3],
126+
'inception_5b_pool_conv': [96, 736, 1, 1],
127+
'inception_5b_1x1_conv': [256, 736, 1, 1],
128+
}
129+
130+
def load_weights_from_FaceNet(FRmodel):
131+
# Load weights from csv files (which was exported from Openface torch model)
132+
weights = WEIGHTS
133+
weights_dict = load_weights()
134+
135+
# Set layer weights of the model
136+
for name in weights:
137+
if FRmodel.get_layer(name) != None:
138+
FRmodel.get_layer(name).set_weights(weights_dict[name])
139+
elif model.get_layer(name) != None:
140+
model.get_layer(name).set_weights(weights_dict[name])
141+
142+
def load_weights():
143+
# Set weights path
144+
dirPath = './weights'
145+
fileNames = filter(lambda f: not f.startswith('.'), os.listdir(dirPath))
146+
paths = {}
147+
weights_dict = {}
148+
149+
for n in fileNames:
150+
paths[n.replace('.csv', '')] = dirPath + '/' + n
151+
152+
for name in WEIGHTS:
153+
if 'conv' in name:
154+
conv_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None)
155+
conv_w = np.reshape(conv_w, conv_shape[name])
156+
conv_w = np.transpose(conv_w, (2, 3, 1, 0))
157+
conv_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None)
158+
weights_dict[name] = [conv_w, conv_b]
159+
elif 'bn' in name:
160+
bn_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None)
161+
bn_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None)
162+
bn_m = genfromtxt(paths[name + '_m'], delimiter=',', dtype=None)
163+
bn_v = genfromtxt(paths[name + '_v'], delimiter=',', dtype=None)
164+
weights_dict[name] = [bn_w, bn_b, bn_m, bn_v]
165+
elif 'dense' in name:
166+
dense_w = genfromtxt(dirPath+'/dense_w.csv', delimiter=',', dtype=None)
167+
dense_w = np.reshape(dense_w, (128, 736))
168+
dense_w = np.transpose(dense_w, (1, 0))
169+
dense_b = genfromtxt(dirPath+'/dense_b.csv', delimiter=',', dtype=None)
170+
weights_dict[name] = [dense_w, dense_b]
171+
172+
return weights_dict
173+
174+
175+
def load_dataset():
176+
train_dataset = h5py.File('datasets/train_happy.h5', "r")
177+
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
178+
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
179+
180+
test_dataset = h5py.File('datasets/test_happy.h5', "r")
181+
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
182+
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
183+
184+
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
185+
186+
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
187+
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
188+
189+
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
190+
191+
def img_to_encoding(image_path, model):
192+
img1 = cv2.imread(image_path, 1)
193+
img = img1[...,::-1]
194+
img = np.around(np.transpose(img, (2,0,1))/255.0, decimals=12)
195+
x_train = np.array([img])
196+
embedding = model.predict_on_batch(x_train)
197+
return embedding

0 commit comments

Comments
 (0)