Skip to content

Commit d3ddbb5

Browse files
add batch face recognition example + batch processing in allFaces can be enabled via flag
1 parent 1e2d261 commit d3ddbb5

File tree

7 files changed

+174
-18
lines changed

7 files changed

+174
-18
lines changed

examples/public/commons.js

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,10 @@ function renderNavBar(navbarId, exampleUri) {
117117
{
118118
uri: 'batch_face_landmarks',
119119
name: 'Batch Face Landmarks'
120+
},
121+
{
122+
uri: 'batch_face_recognition',
123+
name: 'Batch Face Recognition'
120124
}
121125
]
122126

examples/public/styles.css

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,10 @@
4949
margin: 20px;
5050
}
5151

52+
.button-sm {
53+
padding: 0 10px !important;
54+
}
55+
5256
#github-link {
5357
display: flex !important;
5458
justify-content: center;

examples/server.js

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,7 @@ app.get('/detect_and_draw_landmarks', (req, res) => res.sendFile(path.join(views
2525
app.get('/face_alignment', (req, res) => res.sendFile(path.join(viewsDir, 'faceAlignment.html')))
2626
app.get('/detect_and_recognize_faces', (req, res) => res.sendFile(path.join(viewsDir, 'detectAndRecognizeFaces.html')))
2727
app.get('/batch_face_landmarks', (req, res) => res.sendFile(path.join(viewsDir, 'batchFaceLandmarks.html')))
28-
29-
28+
app.get('/batch_face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'batchFaceRecognition.html')))
3029

3130
app.post('/fetch_external_image', async (req, res) => {
3231
const { imageUrl } = req.body
Lines changed: 135 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,135 @@
1+
<!DOCTYPE html>
2+
<html>
3+
<head>
4+
<script src="face-api.js"></script>
5+
<script src="commons.js"></script>
6+
<link rel="stylesheet" href="styles.css">
7+
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
8+
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
9+
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
10+
</head>
11+
<body>
12+
<div id="navbar"></div>
13+
<div class="center-content page-container">
14+
<div>
15+
<div class="progress" id="loader">
16+
<div class="indeterminate"></div>
17+
</div>
18+
<div class="row side-by-side">
19+
<div class="row">
20+
<label for="timeNoBatch">Time for processing each face seperately:</label>
21+
<input disabled value="-" id="timeNoBatch" type="text" class="bold"/>
22+
</div>
23+
<div class="row">
24+
<label for="timeBatch">Time for processing in Batch:</label>
25+
<input disabled value="-" id="timeBatch" type="text" class="bold"/>
26+
</div>
27+
</div>
28+
<div class="row side-by-side">
29+
<div>
30+
<label for="numImages">Num Images:</label>
31+
<input id="numImages" type="text" class="bold" value="32"/>
32+
</div>
33+
<button
34+
class="waves-effect waves-light btn"
35+
onclick="measureTimingsAndDisplay()"
36+
>
37+
Ok
38+
</button>
39+
</div>
40+
<div class="row side-by-side">
41+
<div class="center-content">
42+
<div id="faceContainer"></div>
43+
</div>
44+
</div>
45+
</div>
46+
</div>
47+
48+
<script>
49+
let images = []
50+
let trainDescriptorsByClass = []
51+
let descriptorsByFace = []
52+
let numImages = 32
53+
let maxDistance = 0.6
54+
55+
function onNumImagesChanged(e) {
56+
const val = parseInt(e.target.value) || 32
57+
numImages = Math.min(Math.max(val, 0), 32)
58+
e.target.value = numImages
59+
}
60+
61+
function displayTimeStats(timeNoBatch, timeBatch) {
62+
$('#timeNoBatch').val(`${timeNoBatch} ms`)
63+
$('#timeBatch').val(`${timeBatch} ms`)
64+
}
65+
66+
function drawFaceRecognitionCanvas(img, descriptor) {
67+
const canvas = faceapi.createCanvasFromMedia(img)
68+
$('#faceContainer').append(canvas)
69+
70+
const bestMatch = getBestMatch(trainDescriptorsByClass, descriptor)
71+
const text = `${bestMatch.distance < maxDistance ? bestMatch.className : 'unkown'} (${bestMatch.distance})`
72+
73+
const x = 20, y = canvas.height - 20
74+
faceapi.drawText(
75+
canvas.getContext('2d'),
76+
x,
77+
y,
78+
text,
79+
Object.assign(faceapi.getDefaultDrawOptions(), { color: 'red', fontSize: 16 })
80+
)
81+
}
82+
83+
async function runComputeFaceDescriptors(useBatchInput) {
84+
const ts = Date.now()
85+
descriptorsByFace = useBatchInput
86+
? await faceapi.computeFaceDescriptor(images.slice(0, numImages))
87+
: await Promise.all(images.slice(0, numImages).map(img => faceapi.computeFaceDescriptor(img)))
88+
const time = Date.now() - ts
89+
return time
90+
}
91+
92+
async function measureTimings() {
93+
const timeNoBatch = await runComputeFaceDescriptors(false)
94+
const timeBatch = await runComputeFaceDescriptors(true)
95+
return { timeNoBatch, timeBatch }
96+
}
97+
98+
async function measureTimingsAndDisplay() {
99+
const { timeNoBatch, timeBatch } = await measureTimings()
100+
displayTimeStats(timeNoBatch, timeBatch)
101+
102+
$('#faceContainer').empty()
103+
descriptorsByFace.forEach((descriptor, i) => drawFaceRecognitionCanvas(images[i], descriptor))
104+
}
105+
106+
async function run() {
107+
await faceapi.loadFaceRecognitionModel('/')
108+
trainDescriptorsByClass = await initTrainDescriptorsByClass(faceapi.recognitionNet, 1)
109+
$('#loader').hide()
110+
111+
const imgUris = classes
112+
// skip images with idx 1, as they are used as reference data
113+
.map(clazz => Array.from(Array(4), (_, idx) => getFaceImageUri(clazz, idx + 2)))
114+
.reduce((flat, arr) => flat.concat(arr))
115+
116+
images = await Promise.all(imgUris.map(
117+
async uri => faceapi.bufferToImage(await fetchImage(uri))
118+
))
119+
120+
// warmup
121+
await measureTimings()
122+
// run
123+
measureTimingsAndDisplay()
124+
}
125+
126+
$(document).ready(function() {
127+
$('#numImages').on('change', onNumImagesChanged)
128+
renderNavBar('#navbar', 'batch_face_recognition')
129+
run()
130+
})
131+
132+
</script>
133+
134+
</body>
135+
</html>

examples/views/detectAndRecognizeFaces.html

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -30,20 +30,24 @@
3030
>
3131
Ok
3232
</button>
33+
<p>
34+
<input type="checkbox" id="useBatchProcessing" onchange="onChangeUseBatchProcessing(event)" />
35+
<label for="useBatchProcessing">Use Batch Processing</label>
36+
</p>
3337
</div>
3438
<div class="row side-by-side">
3539
<div class="row">
3640
<label for="minConfidence">Min Confidence:</label>
3741
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
3842
</div>
3943
<button
40-
class="waves-effect waves-light btn"
44+
class="waves-effect waves-light btn button-sm"
4145
onclick="onDecreaseMinConfidence()"
4246
>
4347
<i class="material-icons left">-</i>
4448
</button>
4549
<button
46-
class="waves-effect waves-light btn"
50+
class="waves-effect waves-light btn button-sm"
4751
onclick="onIncreaseMinConfidence()"
4852
>
4953
<i class="material-icons left">+</i>
@@ -53,13 +57,13 @@
5357
<input disabled value="0.6" id="maxDistance" type="text" class="bold">
5458
</div>
5559
<button
56-
class="waves-effect waves-light btn"
60+
class="waves-effect waves-light btn button-sm"
5761
onclick="onDecreaseMaxDistance()"
5862
>
5963
<i class="material-icons left">-</i>
6064
</button>
6165
<button
62-
class="waves-effect waves-light btn"
66+
class="waves-effect waves-light btn button-sm"
6367
onclick="onIncreaseMaxDistance()"
6468
>
6569
<i class="material-icons left">+</i>
@@ -70,9 +74,14 @@
7074
<script>
7175
let maxDistance = 0.6
7276
let minConfidence = 0.7
77+
let useBatchProcessing = false
7378
let detectionNet, recognitionNet, landmarkNet
7479
let trainDescriptorsByClass = []
7580

81+
function onChangeUseBatchProcessing(e) {
82+
useBatchProcessing = $(e.target).prop('checked')
83+
}
84+
7685
function onIncreaseMinConfidence() {
7786
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
7887
$('#minConfidence').val(minConfidence)
@@ -110,7 +119,7 @@
110119
canvas.width = width
111120
canvas.height = height
112121

113-
const fullFaceDescriptions = (await faceapi.allFaces(inputImgEl, minConfidence))
122+
const fullFaceDescriptions = (await faceapi.allFaces(inputImgEl, minConfidence, useBatchProcessing))
114123
.map(fd => fd.forSize(width, height))
115124

116125
fullFaceDescriptions.forEach(({ detection, descriptor }) => {

src/allFacesFactory.ts

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -13,18 +13,19 @@ export function allFacesFactory(
1313
) {
1414
return async function(
1515
input: TNetInput,
16-
minConfidence: number
16+
minConfidence: number,
17+
useBatchProcessing: boolean = false
1718
): Promise<FullFaceDescription[]> {
1819

1920
const detections = await detectionNet.locateFaces(input, minConfidence)
2021

2122
const faceTensors = await extractFaceTensors(input, detections)
22-
/**
23-
const faceLandmarksByFace = await Promise.all(faceTensors.map(
24-
faceTensor => landmarkNet.detectLandmarks(faceTensor)
25-
)) as FaceLandmarks[]
26-
*/
27-
const faceLandmarksByFace = await landmarkNet.detectLandmarks(faceTensors) as FaceLandmarks[]
23+
24+
const faceLandmarksByFace = useBatchProcessing
25+
? await landmarkNet.detectLandmarks(faceTensors) as FaceLandmarks[]
26+
: await Promise.all(faceTensors.map(
27+
faceTensor => landmarkNet.detectLandmarks(faceTensor)
28+
)) as FaceLandmarks[]
2829

2930
faceTensors.forEach(t => t.dispose())
3031

@@ -33,9 +34,12 @@ export function allFacesFactory(
3334
)
3435
const alignedFaceTensors = await extractFaceTensors(input, alignedFaceBoxes)
3536

36-
const descriptors = await Promise.all(alignedFaceTensors.map(
37-
faceTensor => recognitionNet.computeFaceDescriptor(faceTensor)
38-
)) as Float32Array[]
37+
const descriptors = useBatchProcessing
38+
? await recognitionNet.computeFaceDescriptor(alignedFaceTensors) as Float32Array[]
39+
: await Promise.all(alignedFaceTensors.map(
40+
faceTensor => recognitionNet.computeFaceDescriptor(faceTensor)
41+
)) as Float32Array[]
42+
3943
alignedFaceTensors.forEach(t => t.dispose())
4044

4145
return detections.map((detection, i) =>

src/globalApi.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,8 @@ export function computeFaceDescriptor(
5656

5757
export const allFaces: (
5858
input: tf.Tensor | NetInput | TNetInput,
59-
minConfidence: number
59+
minConfidence: number,
60+
useBatchProcessing?: boolean
6061
) => Promise<FullFaceDescription[]> = allFacesFactory(
6162
detectionNet,
6263
landmarkNet,

0 commit comments

Comments
 (0)