|
| 1 | +// +build gofuzz |
| 2 | + |
| 3 | +package blake2b |
| 4 | + |
| 5 | +import ( |
| 6 | + "encoding/binary" |
| 7 | +) |
| 8 | + |
| 9 | +func Fuzz(data []byte) int { |
| 10 | + // Make sure the data confirms to the input model |
| 11 | + if len(data) != 211 { |
| 12 | + return 0 |
| 13 | + } |
| 14 | + // Parse everything and call all the implementations |
| 15 | + var ( |
| 16 | + rounds = binary.BigEndian.Uint16(data[0:2]) |
| 17 | + |
| 18 | + h [8]uint64 |
| 19 | + m [16]uint64 |
| 20 | + t [2]uint64 |
| 21 | + f uint64 |
| 22 | + ) |
| 23 | + for i := 0; i < 8; i++ { |
| 24 | + offset := 2 + i*8 |
| 25 | + h[i] = binary.LittleEndian.Uint64(data[offset : offset+8]) |
| 26 | + } |
| 27 | + for i := 0; i < 16; i++ { |
| 28 | + offset := 66 + i*8 |
| 29 | + m[i] = binary.LittleEndian.Uint64(data[offset : offset+8]) |
| 30 | + } |
| 31 | + t[0] = binary.LittleEndian.Uint64(data[194:202]) |
| 32 | + t[1] = binary.LittleEndian.Uint64(data[202:210]) |
| 33 | + |
| 34 | + if data[210]%2 == 1 { // Avoid spinning the fuzzer to hit 0/1 |
| 35 | + f = 0xFFFFFFFFFFFFFFFF |
| 36 | + } |
| 37 | + // Run the blake2b compression on all instruction sets and cross reference |
| 38 | + want := h |
| 39 | + fGeneric(&want, &m, t[0], t[1], f, uint64(rounds)) |
| 40 | + |
| 41 | + have := h |
| 42 | + fSSE4(&have, &m, t[0], t[1], f, uint64(rounds)) |
| 43 | + if have != want { |
| 44 | + panic("SSE4 mismatches generic algo") |
| 45 | + } |
| 46 | + have = h |
| 47 | + fAVX(&have, &m, t[0], t[1], f, uint64(rounds)) |
| 48 | + if have != want { |
| 49 | + panic("AVX mismatches generic algo") |
| 50 | + } |
| 51 | + have = h |
| 52 | + fAVX2(&have, &m, t[0], t[1], f, uint64(rounds)) |
| 53 | + if have != want { |
| 54 | + panic("AVX2 mismatches generic algo") |
| 55 | + } |
| 56 | + return 1 |
| 57 | +} |
0 commit comments