|
3 | 3 | <head>
|
4 | 4 | <script src="face-api.js"></script>
|
5 | 5 | <script src="js/commons.js"></script>
|
6 |
| - <script src="js/drawing.js"></script> |
7 | 6 | <script src="js/faceDetectionControls.js"></script>
|
8 | 7 | <script src="js/imageSelectionControls.js"></script>
|
9 | 8 | <link rel="stylesheet" href="styles.css">
|
|
139 | 138 |
|
140 | 139 | <script>
|
141 | 140 |
|
142 |
| - window.net = new faceapi.AgeGenderNet() |
143 |
| - |
144 | 141 | async function updateResults() {
|
145 | 142 | if (!isFaceDetectionModelLoaded()) {
|
146 | 143 | return
|
|
149 | 146 | const inputImgEl = $('#inputImg').get(0)
|
150 | 147 | const options = getFaceDetectorOptions()
|
151 | 148 |
|
152 |
| - const result = await faceapi.detectSingleFace(inputImgEl, options) |
153 |
| - |
154 |
| - if (!result) return |
155 |
| - |
156 |
| - const face = (await faceapi.extractFaces(inputImgEl, [result]))[0] |
157 |
| - const { age, gender, genderProbability } = await window.net.predictAgeAndGender(face) |
158 |
| - |
159 |
| - console.log('age', age) |
160 |
| - console.log('gender', gender, genderProbability) |
| 149 | + const results = await faceapi.detectAllFaces(inputImgEl, options) |
| 150 | + // compute face landmarks to align faces for better accuracy |
| 151 | + .withFaceLandmarks() |
| 152 | + .withAgeAndGender() |
| 153 | + |
| 154 | + const canvas = $('#overlay').get(0) |
| 155 | + faceapi.matchDimensions(canvas, inputImgEl) |
| 156 | + |
| 157 | + const resizedResults = faceapi.resizeResults(results, inputImgEl) |
| 158 | + faceapi.draw.drawDetections(canvas, resizedResults) |
| 159 | + |
| 160 | + resizedResults.forEach(result => { |
| 161 | + const { age, gender, genderProbability } = result |
| 162 | + new faceapi.draw.DrawTextField( |
| 163 | + [ |
| 164 | + `${faceapi.round(age, 0)} years`, |
| 165 | + `${gender} (${faceapi.round(genderProbability)})` |
| 166 | + ], |
| 167 | + result.detection.box.bottomLeft |
| 168 | + ).draw(canvas) |
| 169 | + }) |
161 | 170 | }
|
162 | 171 |
|
163 | 172 | async function run() {
|
164 | 173 | // load face detection and face expression recognition models
|
165 | 174 | await changeFaceDetector(SSD_MOBILENETV1)
|
| 175 | + await faceapi.loadFaceLandmarkModel('/') |
166 | 176 |
|
| 177 | + // TODO |
167 | 178 | const weights = await faceapi.fetchNetWeights('tmp/age_gender.weights')
|
168 |
| - console.log(weights.length) |
169 |
| - await window.net.load(weights) |
| 179 | + await faceapi.nets.ageGenderNet.load(weights) |
170 | 180 |
|
171 | 181 | // start processing image
|
172 | 182 | updateResults()
|
|
0 commit comments