@@ -104,6 +104,15 @@ To load a model, you have provide the corresponding manifest.json file as well a
104
104
105
105
Assuming the models reside in ** public/models** :
106
106
107
+ ``` javascript
108
+ await faceapi .loadFaceDetectionModel (' /models' )
109
+ // accordingly for the other models:
110
+ // await faceapi.loadFaceLandmarkModel('/models')
111
+ // await faceapi.loadFaceRecognitionModel('/models')
112
+ ```
113
+
114
+ As an alternative, you can also create instance of the neural nets:
115
+
107
116
``` javascript
108
117
const net = new faceapi.FaceDetectionNet ()
109
118
// accordingly for the other models:
@@ -118,7 +127,7 @@ await net.load('/models/face_detection_model-weights_manifest.json')
118
127
await net .load (' /models' )
119
128
```
120
129
121
- Alternatively you can load the weights as a Float32Array (in case you want to use the uncompressed models):
130
+ Using instances, you can also load the weights as a Float32Array (in case you want to use the uncompressed models):
122
131
123
132
``` javascript
124
133
// using fetch
@@ -145,7 +154,7 @@ const maxResults = 10
145
154
146
155
// inputs can be html canvas, img or video element or their ids ...
147
156
const myImg = document .getElementById (' myImg' )
148
- const detections = await detectionNet .locateFaces (myImg, minConfidence, maxResults)
157
+ const detections = await faceapi .locateFaces (myImg, minConfidence, maxResults)
149
158
```
150
159
151
160
Draw the detected faces to a canvas:
@@ -162,7 +171,7 @@ faceapi.drawDetection(canvas, detectionsForSize, { withScore: false })
162
171
You can also obtain the tensors of the unfiltered bounding boxes and scores for each image in the batch (tensors have to be disposed manually):
163
172
164
173
``` javascript
165
- const { boxes , scores } = detectionNet .forward (' myImg' )
174
+ const { boxes , scores } = net .forward (' myImg' )
166
175
```
167
176
168
177
<a name =" usage-face-recognition " ></a >
@@ -173,8 +182,8 @@ Compute and compare the descriptors of two face images:
173
182
174
183
``` javascript
175
184
// inputs can be html canvas, img or video element or their ids ...
176
- const descriptor1 = await recognitionNet .computeFaceDescriptor (' myImg' )
177
- const descriptor2 = await recognitionNet .computeFaceDescriptor (document .getElementById (' myCanvas' ))
185
+ const descriptor1 = await faceapi .computeFaceDescriptor (' myImg' )
186
+ const descriptor2 = await faceapi .computeFaceDescriptor (document .getElementById (' myCanvas' ))
178
187
const distance = faceapi .euclidianDistance (descriptor1, descriptor2)
179
188
180
189
if (distance < 0.6 )
@@ -183,16 +192,10 @@ else
183
192
console .log (' no match' )
184
193
```
185
194
186
- You can also get the face descriptor data synchronously:
187
-
188
- ``` javascript
189
- const desc = recognitionNet .computeFaceDescriptorSync (' myImg' )
190
- ```
191
-
192
195
Or simply obtain the tensor (tensor has to be disposed manually):
193
196
194
197
``` javascript
195
- const t = recognitionNet .forward (' myImg' )
198
+ const t = net .forward (' myImg' )
196
199
```
197
200
198
201
<a name =" usage-face-landmark-detection " ></a >
@@ -204,7 +207,7 @@ Detect face landmarks:
204
207
``` javascript
205
208
// inputs can be html canvas, img or video element or their ids ...
206
209
const myImg = document .getElementById (' myImg' )
207
- const landmarks = await faceLandmarkNet .detectLandmarks (myImg)
210
+ const landmarks = await faceapi .detectLandmarks (myImg)
208
211
```
209
212
210
213
Draw the detected face landmarks to a canvas:
@@ -236,11 +239,11 @@ const rightEyeBrow = landmarks.getRightEyeBrow()
236
239
Compute the Face Landmarks for Detected Faces:
237
240
238
241
``` javascript
239
- const detections = await detectionNet .locateFaces (input)
242
+ const detections = await faceapi .locateFaces (input)
240
243
241
244
// get the face tensors from the image (have to be disposed manually)
242
245
const faceTensors = await faceapi .extractFaceTensors (input, detections)
243
- const landmarksByFace = await Promise .all (faceTensors .map (t => faceLandmarkNet .detectLandmarks (t)))
246
+ const landmarksByFace = await Promise .all (faceTensors .map (t => faceapi .detectLandmarks (t)))
244
247
245
248
// free memory for face image tensors after we computed their descriptors
246
249
faceTensors .forEach (t => t .dispose ())
@@ -250,19 +253,31 @@ faceTensors.forEach(t => t.dispose())
250
253
251
254
### Full Face Detection and Recognition Pipeline
252
255
253
- After face detection has been performed, I would recommend to align the bounding boxes of the detected faces before passing them to the face recognition net, which will make the computed face descriptor much more accurate. You can easily align the faces from their face landmark positions as shown in the following example:
256
+ After face detection has been performed, I would recommend to align the bounding boxes of the detected faces before passing them to the face recognition net, which will make the computed face descriptor much more accurate. Fortunately, the api can do this for you under the hood. You can obtain the full face descriptions (location, landmarks and descriptor) of each face in an input image as follows:
257
+
258
+ ``` javascript
259
+ const fullFaceDescriptions = await faceapi .allFaces (input, minConfidence)
260
+
261
+ const fullFaceDescription0 = fullFaceDescriptions[0 ]
262
+ console .log (fullFaceDescription0 .detection ) // bounding box & score
263
+ console .log (fullFaceDescription0 .landmarks ) // 68 point face landmarks
264
+ console .log (fullFaceDescription0 .descriptor ) // face descriptors
265
+
266
+ ```
267
+
268
+ You can also do everything manually as shown in the following:
254
269
255
270
``` javascript
256
271
// first detect the face locations
257
- const detections = await detectionNet .locateFaces (input)
272
+ const detections = await faceapi .locateFaces (input, minConfidence )
258
273
259
274
// get the face tensors from the image (have to be disposed manually)
260
275
const faceTensors = (await faceapi .extractFaceTensors (input, detections))
261
276
262
277
// detect landmarks and get the aligned face image bounding boxes
263
278
const alignedFaceBoxes = await Promise .all (faceTensors .map (
264
279
async (faceTensor , i ) => {
265
- const faceLandmarks = await landmarkNet .detectLandmarks (faceTensor)
280
+ const faceLandmarks = await faceapi .detectLandmarks (faceTensor)
266
281
return faceLandmarks .align (detections[i])
267
282
}
268
283
))
@@ -275,7 +290,7 @@ const alignedFaceTensors = (await faceapi.extractFaceTensors(input, alignedFaceB
275
290
276
291
// compute the face descriptors from the aligned face images
277
292
const descriptors = await Promise .all (alignedFaceTensors .map (
278
- faceTensor => recognitionNet .computeFaceDescriptor (faceTensor)
293
+ faceTensor => faceapi .computeFaceDescriptor (faceTensor)
279
294
))
280
295
281
296
// free memory for face image tensors after we computed their descriptors
0 commit comments