Skip to content

Commit 397c05a

Browse files
finished test case restructuring + scripts for running specific test suites
1 parent 3c3c83d commit 397c05a

24 files changed

+819
-411
lines changed

karma.conf.js

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,13 +14,28 @@ const dataFiles = [
1414
nocache: false
1515
}))
1616

17+
const exclude = process.env.UUT
18+
? [
19+
'dom',
20+
'faceLandmarkNet',
21+
'faceRecognitionNet',
22+
'ssdMobilenetv1',
23+
'tinyFaceDetector',
24+
'mtcnn',
25+
'tinyYolov2'
26+
]
27+
.filter(ex => ex !== process.env.UUT)
28+
.map(ex => `test/tests/${ex}/*.ts`)
29+
: []
30+
1731
module.exports = function(config) {
1832
config.set({
1933
frameworks: ['jasmine', 'karma-typescript'],
2034
files: [
2135
'src/**/*.ts',
2236
'test/**/*.ts'
2337
].concat(dataFiles),
38+
exclude,
2439
preprocessors: {
2540
'**/*.ts': ['karma-typescript']
2641
},

package.json

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,13 @@
1111
"tsc": "tsc",
1212
"tsc-es6": "tsc --p tsconfig.es6.json",
1313
"build": "npm run rollup && npm run rollup-min && npm run tsc && npm run tsc-es6",
14-
"test": "karma start"
14+
"test": "karma start",
15+
"test-facelandmarknets": "set UUT=faceLandmarkNet&& karma start",
16+
"test-facerecognitionnet": "set UUT=faceRecognitionNet&& karma start",
17+
"test-ssdmobilenetv1": "set UUT=ssdMobilenetv1&& karma start",
18+
"test-tinyfacedetector": "set UUT=tinyFaceDetector&& karma start",
19+
"test-mtcnn": "set UUT=mtcnn&& karma start",
20+
"test-tinyyolov2": "set UUT=tinyYolov2&& karma start"
1521
},
1622
"keywords": [
1723
"face",

test/expectDetectionResults.ts renamed to test/expectFaceDetections.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
import { IRect } from '../src';
22
import { FaceDetection } from '../src/classes/FaceDetection';
3-
import { expectRectClose, sortBoxes, sortFaceDetections } from './utils';
3+
import { expectRectClose, sortFaceDetections } from './utils';
44

5-
export function expectDetectionResults(
5+
export function expectFaceDetections(
66
results: FaceDetection[],
77
allExpectedFaceDetections: IRect[],
88
expectedScores: number[],

test/tests/faceLandmarkNet/faceLandmark68Net.test.ts

Lines changed: 0 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -32,38 +32,6 @@ describe('faceLandmark68Net', () => {
3232
faceLandmarkPositionsRect = await fetchJson<Point[]>('base/test/data/faceLandmarkPositionsRect.json')
3333
})
3434

35-
describeWithNets('uncompressed weights', { withFaceLandmark68Net: { quantized: false } }, ({ faceLandmark68Net }) => {
36-
37-
it('computes face landmarks for squared input', async () => {
38-
const { width, height } = imgEl1
39-
40-
const result = await faceLandmark68Net.detectLandmarks(imgEl1) as FaceLandmarks68
41-
expect(result.imageWidth).toEqual(width)
42-
expect(result.imageHeight).toEqual(height)
43-
expect(result.shift.x).toEqual(0)
44-
expect(result.shift.y).toEqual(0)
45-
result.positions.forEach((pt, i) => {
46-
const { x, y } = faceLandmarkPositions1[i]
47-
expectPointClose(pt, { x, y }, 1)
48-
})
49-
})
50-
51-
it('computes face landmarks for rectangular input', async () => {
52-
const { width, height } = imgElRect
53-
54-
const result = await faceLandmark68Net.detectLandmarks(imgElRect) as FaceLandmarks68
55-
expect(result.imageWidth).toEqual(width)
56-
expect(result.imageHeight).toEqual(height)
57-
expect(result.shift.x).toEqual(0)
58-
expect(result.shift.y).toEqual(0)
59-
result.positions.forEach((pt, i) => {
60-
const { x, y } = faceLandmarkPositionsRect[i]
61-
expectPointClose(pt, { x, y }, 2)
62-
})
63-
})
64-
65-
})
66-
6735
describeWithNets('quantized weights', { withFaceLandmark68Net: { quantized: true } }, ({ faceLandmark68Net }) => {
6836

6937
it('computes face landmarks for squared input', async () => {
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
import { fetchImage, fetchJson, Point } from '../../../src';
2+
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
3+
import { describeWithNets, expectPointClose } from '../../utils';
4+
5+
describe('faceLandmark68Net, uncompressed', () => {
6+
7+
let imgEl1: HTMLImageElement
8+
let imgElRect: HTMLImageElement
9+
let faceLandmarkPositions1: Point[]
10+
let faceLandmarkPositionsRect: Point[]
11+
12+
beforeAll(async () => {
13+
imgEl1 = await fetchImage('base/test/images/face1.png')
14+
imgElRect = await fetchImage('base/test/images/face_rectangular.png')
15+
faceLandmarkPositions1 = await fetchJson<Point[]>('base/test/data/faceLandmarkPositions1.json')
16+
faceLandmarkPositionsRect = await fetchJson<Point[]>('base/test/data/faceLandmarkPositionsRect.json')
17+
})
18+
19+
describeWithNets('uncompressed weights', { withFaceLandmark68Net: { quantized: false } }, ({ faceLandmark68Net }) => {
20+
21+
it('computes face landmarks for squared input', async () => {
22+
const { width, height } = imgEl1
23+
24+
const result = await faceLandmark68Net.detectLandmarks(imgEl1) as FaceLandmarks68
25+
expect(result.imageWidth).toEqual(width)
26+
expect(result.imageHeight).toEqual(height)
27+
expect(result.shift.x).toEqual(0)
28+
expect(result.shift.y).toEqual(0)
29+
result.positions.forEach((pt, i) => {
30+
const { x, y } = faceLandmarkPositions1[i]
31+
expectPointClose(pt, { x, y }, 1)
32+
})
33+
})
34+
35+
it('computes face landmarks for rectangular input', async () => {
36+
const { width, height } = imgElRect
37+
38+
const result = await faceLandmark68Net.detectLandmarks(imgElRect) as FaceLandmarks68
39+
expect(result.imageWidth).toEqual(width)
40+
expect(result.imageHeight).toEqual(height)
41+
expect(result.shift.x).toEqual(0)
42+
expect(result.shift.y).toEqual(0)
43+
result.positions.forEach((pt, i) => {
44+
const { x, y } = faceLandmarkPositionsRect[i]
45+
expectPointClose(pt, { x, y }, 2)
46+
})
47+
})
48+
49+
})
50+
51+
})
52+

test/tests/faceLandmarkNet/faceLandmark68TinyNet.test.ts

Lines changed: 1 addition & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -32,38 +32,6 @@ describe('faceLandmark68TinyNet', () => {
3232
faceLandmarkPositionsRect = await fetchJson<Point[]>('base/test/data/faceLandmarkPositionsRectTiny.json')
3333
})
3434

35-
describeWithNets('uncompressed weights', { withFaceLandmark68TinyNet: { quantized: false } }, ({ faceLandmark68TinyNet }) => {
36-
37-
it('computes face landmarks for squared input', async () => {
38-
const { width, height } = imgEl1
39-
40-
const result = await faceLandmark68TinyNet.detectLandmarks(imgEl1) as FaceLandmarks68
41-
expect(result.imageWidth).toEqual(width)
42-
expect(result.imageHeight).toEqual(height)
43-
expect(result.shift.x).toEqual(0)
44-
expect(result.shift.y).toEqual(0)
45-
result.positions.forEach((pt, i) => {
46-
const { x, y } = faceLandmarkPositions1[i]
47-
expectPointClose(pt, { x, y }, 5)
48-
})
49-
})
50-
51-
it('computes face landmarks for rectangular input', async () => {
52-
const { width, height } = imgElRect
53-
54-
const result = await faceLandmark68TinyNet.detectLandmarks(imgElRect) as FaceLandmarks68
55-
expect(result.imageWidth).toEqual(width)
56-
expect(result.imageHeight).toEqual(height)
57-
expect(result.shift.x).toEqual(0)
58-
expect(result.shift.y).toEqual(0)
59-
result.positions.forEach((pt, i) => {
60-
const { x, y } = faceLandmarkPositionsRect[i]
61-
expectPointClose(pt, { x, y }, 5)
62-
})
63-
})
64-
65-
})
66-
6735
describeWithNets('quantized weights', { withFaceLandmark68TinyNet: { quantized: true } }, ({ faceLandmark68TinyNet }) => {
6836

6937
it('computes face landmarks for squared input', async () => {
@@ -96,7 +64,7 @@ describe('faceLandmark68TinyNet', () => {
9664

9765
})
9866

99-
describeWithNets('batch inputs', { withFaceLandmark68TinyNet: { quantized: false } }, ({ faceLandmark68TinyNet }) => {
67+
describeWithNets('batch inputs', { withFaceLandmark68TinyNet: { quantized: true } }, ({ faceLandmark68TinyNet }) => {
10068

10169
it('computes face landmarks for batch of image elements', async () => {
10270
const inputs = [imgEl1, imgEl2, imgElRect]
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
import { fetchImage, fetchJson, Point } from '../../../src';
2+
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
3+
import { describeWithNets, expectPointClose } from '../../utils';
4+
5+
describe('faceLandmark68TinyNet, uncompressed', () => {
6+
7+
let imgEl1: HTMLImageElement
8+
let imgElRect: HTMLImageElement
9+
let faceLandmarkPositions1: Point[]
10+
let faceLandmarkPositionsRect: Point[]
11+
12+
beforeAll(async () => {
13+
imgEl1 = await fetchImage('base/test/images/face1.png')
14+
imgElRect = await fetchImage('base/test/images/face_rectangular.png')
15+
faceLandmarkPositions1 = await fetchJson<Point[]>('base/test/data/faceLandmarkPositions1Tiny.json')
16+
faceLandmarkPositionsRect = await fetchJson<Point[]>('base/test/data/faceLandmarkPositionsRectTiny.json')
17+
})
18+
19+
describeWithNets('uncompressed weights', { withFaceLandmark68TinyNet: { quantized: false } }, ({ faceLandmark68TinyNet }) => {
20+
21+
it('computes face landmarks for squared input', async () => {
22+
const { width, height } = imgEl1
23+
24+
const result = await faceLandmark68TinyNet.detectLandmarks(imgEl1) as FaceLandmarks68
25+
expect(result.imageWidth).toEqual(width)
26+
expect(result.imageHeight).toEqual(height)
27+
expect(result.shift.x).toEqual(0)
28+
expect(result.shift.y).toEqual(0)
29+
result.positions.forEach((pt, i) => {
30+
const { x, y } = faceLandmarkPositions1[i]
31+
expectPointClose(pt, { x, y }, 5)
32+
})
33+
})
34+
35+
it('computes face landmarks for rectangular input', async () => {
36+
const { width, height } = imgElRect
37+
38+
const result = await faceLandmark68TinyNet.detectLandmarks(imgElRect) as FaceLandmarks68
39+
expect(result.imageWidth).toEqual(width)
40+
expect(result.imageHeight).toEqual(height)
41+
expect(result.shift.x).toEqual(0)
42+
expect(result.shift.y).toEqual(0)
43+
result.positions.forEach((pt, i) => {
44+
const { x, y } = faceLandmarkPositionsRect[i]
45+
expectPointClose(pt, { x, y }, 5)
46+
})
47+
})
48+
49+
})
50+
51+
})
52+

test/tests/faceRecognitionNet/faceRecognitionNet.test.ts

Lines changed: 3 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,7 @@ describe('faceRecognitionNet', () => {
2222
faceDescriptor2 = await fetchJson<number[]>('base/test/data/faceDescriptor2.json')
2323
faceDescriptorRect = await fetchJson<number[]>('base/test/data/faceDescriptorRect.json')
2424
})
25-
26-
describeWithNets('uncompressed weights', { withFaceRecognitionNet: { quantized: false } }, ({ faceRecognitionNet }) => {
25+
describeWithNets('quantized weights', { withFaceRecognitionNet: { quantized: true } }, ({ faceRecognitionNet }) => {
2726

2827
it('computes face descriptor for squared input', async () => {
2928
const result = await faceRecognitionNet.computeFaceDescriptor(imgEl1) as Float32Array
@@ -39,26 +38,8 @@ describe('faceRecognitionNet', () => {
3938

4039
})
4140

42-
// TODO: figure out why descriptors return NaN in the test cases
43-
/*
44-
describeWithNets('quantized weights', { withFaceRecognitionNet: { quantized: true } }, ({ faceRecognitionNet }) => {
45-
46-
it('computes face descriptor for squared input', async () => {
47-
const result = await faceRecognitionNet.computeFaceDescriptor(imgEl1) as Float32Array
48-
expect(result.length).toEqual(128)
49-
expect(result).toEqual(new Float32Array(faceDescriptor1))
50-
})
51-
52-
it('computes face descriptor for rectangular input', async () => {
53-
const result = await faceRecognitionNet.computeFaceDescriptor(imgElRect) as Float32Array
54-
expect(result.length).toEqual(128)
55-
expect(result).toEqual(new Float32Array(faceDescriptorRect))
56-
})
57-
58-
})
59-
*/
6041

61-
describeWithNets('batch inputs', { withFaceRecognitionNet: { quantized: false } }, ({ faceRecognitionNet }) => {
42+
describeWithNets('batch inputs', { withFaceRecognitionNet: { quantized: true } }, ({ faceRecognitionNet }) => {
6243

6344
it('computes face descriptors for batch of image elements', async () => {
6445
const inputs = [imgEl1, imgEl2, imgElRect]
@@ -113,7 +94,7 @@ describe('faceRecognitionNet', () => {
11394

11495
})
11596

116-
describeWithNets('no memory leaks', { withFaceRecognitionNet: { quantized: false } }, ({ faceRecognitionNet }) => {
97+
describeWithNets('no memory leaks', { withFaceRecognitionNet: { quantized: true } }, ({ faceRecognitionNet }) => {
11798

11899
describe('NeuralNetwork, uncompressed model', () => {
119100

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
import { fetchImage, fetchJson } from '../../../src';
2+
import { euclideanDistance } from '../../../src/euclideanDistance';
3+
import { describeWithNets } from '../../utils';
4+
5+
// TODO: figure out why quantized weights results in NaNs in testcases
6+
// apparently (net weight values differ when loading with karma)
7+
xdescribe('faceRecognitionNet, uncompressed', () => {
8+
9+
let imgEl1: HTMLImageElement
10+
let imgElRect: HTMLImageElement
11+
let faceDescriptor1: number[]
12+
let faceDescriptorRect: number[]
13+
14+
beforeAll(async () => {
15+
imgEl1 = await fetchImage('base/test/images/face1.png')
16+
imgElRect = await fetchImage('base/test/images/face_rectangular.png')
17+
faceDescriptor1 = await fetchJson<number[]>('base/test/data/faceDescriptor1.json')
18+
faceDescriptorRect = await fetchJson<number[]>('base/test/data/faceDescriptorRect.json')
19+
})
20+
21+
describeWithNets('uncompressed weights', { withFaceRecognitionNet: { quantized: false } }, ({ faceRecognitionNet }) => {
22+
23+
it('computes face descriptor for squared input', async () => {
24+
const result = await faceRecognitionNet.computeFaceDescriptor(imgEl1) as Float32Array
25+
expect(result.length).toEqual(128)
26+
expect(euclideanDistance(result, faceDescriptor1)).toBeLessThan(0.1)
27+
})
28+
29+
it('computes face descriptor for rectangular input', async () => {
30+
const result = await faceRecognitionNet.computeFaceDescriptor(imgElRect) as Float32Array
31+
expect(result.length).toEqual(128)
32+
expect(euclideanDistance(result, faceDescriptorRect)).toBeLessThan(0.1)
33+
})
34+
35+
})
36+
})

test/tests/mtcnn/expectMtcnnResults.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,13 @@ export const expectedMtcnnBoxes: IRect[] = sortBoxes([
1616
export function expectMtcnnResults(
1717
results: FaceDetectionWithLandmarks<FaceLandmarks5>[],
1818
expectedMtcnnFaceLandmarks: IPoint[][],
19+
expectedScores: number[],
1920
deltas: BoxAndLandmarksDeltas
2021
) {
2122

2223
const expectedMtcnnFaceLandmarksSorted = sortByDistanceToOrigin(expectedMtcnnFaceLandmarks, obj => obj[0])
2324
const expectedResults = expectedMtcnnBoxes
2425
.map((detection, i) => ({ detection, landmarks: expectedMtcnnFaceLandmarksSorted[i] }))
25-
const expectedScores = results.map(_ => 1.0)
2626

2727
return expectFaceDetectionsWithLandmarks<FaceLandmarks5>(results, expectedResults, expectedScores, deltas)
2828
}

0 commit comments

Comments
 (0)