1
- /* auto-generated on Fri Aug 23 09:54:21 DST 2019. Do not edit! */
1
+ /* auto-generated on Fri Aug 23 10:23:28 DST 2019. Do not edit! */
2
2
#include " simdjson.h"
3
3
4
4
/* used for http://dmalloc.com/ Dmalloc - Debug Malloc Library */
@@ -1451,7 +1451,7 @@ UNTARGET_REGION // westmere
1451
1451
1452
1452
namespace simdjson ::arm64 {
1453
1453
1454
- static really_inline uint64_t compute_quote_mask (uint64_t quote_bits) {
1454
+ really_inline uint64_t compute_quote_mask (uint64_t quote_bits) {
1455
1455
1456
1456
#ifdef __ARM_FEATURE_CRYPTO // some ARM processors lack this extension
1457
1457
return vmull_p64 (-1ULL , quote_bits);
@@ -1460,7 +1460,7 @@ static really_inline uint64_t compute_quote_mask(uint64_t quote_bits) {
1460
1460
#endif
1461
1461
}
1462
1462
1463
- static really_inline void find_whitespace_and_structurals (
1463
+ really_inline void find_whitespace_and_structurals (
1464
1464
simd_input<ARCHITECTURE> in, uint64_t &whitespace,
1465
1465
uint64_t &structurals) {
1466
1466
const uint8x16_t low_nibble_mask =
@@ -1518,7 +1518,7 @@ static really_inline void find_whitespace_and_structurals(
1518
1518
// This is just a naive implementation. It should be normally
1519
1519
// disable, but can be used for research purposes to compare
1520
1520
// again our optimized version.
1521
- static really_inline void flatten_bits (uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
1521
+ really_inline void flatten_bits (uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
1522
1522
uint32_t *out_ptr = base_ptr + base;
1523
1523
idx -= 64 ;
1524
1524
while (bits != 0 ) {
@@ -1536,7 +1536,7 @@ static really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint3
1536
1536
// base_ptr[base] incrementing base as we go
1537
1537
// will potentially store extra values beyond end of valid bits, so base_ptr
1538
1538
// needs to be large enough to handle this
1539
- static really_inline void flatten_bits (uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
1539
+ really_inline void flatten_bits (uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
1540
1540
// In some instances, the next branch is expensive because it is mispredicted.
1541
1541
// Unfortunately, in other cases,
1542
1542
// it helps tremendously.
@@ -1864,15 +1864,15 @@ int find_structural_bits<Architecture::ARM64>(const uint8_t *buf, size_t len, si
1864
1864
TARGET_HASWELL
1865
1865
namespace simdjson ::haswell {
1866
1866
1867
- static really_inline uint64_t compute_quote_mask (uint64_t quote_bits) {
1867
+ really_inline uint64_t compute_quote_mask (uint64_t quote_bits) {
1868
1868
// There should be no such thing with a processing supporting avx2
1869
1869
// but not clmul.
1870
1870
uint64_t quote_mask = _mm_cvtsi128_si64 (_mm_clmulepi64_si128 (
1871
1871
_mm_set_epi64x (0ULL , quote_bits), _mm_set1_epi8 (0xFFu ), 0 ));
1872
1872
return quote_mask;
1873
1873
}
1874
1874
1875
- static really_inline void find_whitespace_and_structurals (simd_input<ARCHITECTURE> in,
1875
+ really_inline void find_whitespace_and_structurals (simd_input<ARCHITECTURE> in,
1876
1876
uint64_t &whitespace, uint64_t &structurals) {
1877
1877
1878
1878
#ifdef SIMDJSON_NAIVE_STRUCTURAL
@@ -1938,7 +1938,7 @@ static really_inline void find_whitespace_and_structurals(simd_input<ARCHITECTUR
1938
1938
// base_ptr[base] incrementing base as we go
1939
1939
// will potentially store extra values beyond end of valid bits, so base_ptr
1940
1940
// needs to be large enough to handle this
1941
- static really_inline void flatten_bits (uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
1941
+ really_inline void flatten_bits (uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
1942
1942
// In some instances, the next branch is expensive because it is mispredicted.
1943
1943
// Unfortunately, in other cases,
1944
1944
// it helps tremendously.
@@ -2269,12 +2269,12 @@ UNTARGET_REGION
2269
2269
TARGET_WESTMERE
2270
2270
namespace simdjson ::westmere {
2271
2271
2272
- static really_inline uint64_t compute_quote_mask (uint64_t quote_bits) {
2272
+ really_inline uint64_t compute_quote_mask (uint64_t quote_bits) {
2273
2273
return _mm_cvtsi128_si64 (_mm_clmulepi64_si128 (
2274
2274
_mm_set_epi64x (0ULL , quote_bits), _mm_set1_epi8 (0xFFu ), 0 ));
2275
2275
}
2276
2276
2277
- static really_inline void find_whitespace_and_structurals (simd_input<ARCHITECTURE> in,
2277
+ really_inline void find_whitespace_and_structurals (simd_input<ARCHITECTURE> in,
2278
2278
uint64_t &whitespace, uint64_t &structurals) {
2279
2279
2280
2280
const __m128i structural_table =
@@ -2306,7 +2306,7 @@ static really_inline void find_whitespace_and_structurals(simd_input<ARCHITECTUR
2306
2306
// This is just a naive implementation. It should be normally
2307
2307
// disable, but can be used for research purposes to compare
2308
2308
// again our optimized version.
2309
- static really_inline void flatten_bits (uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
2309
+ really_inline void flatten_bits (uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
2310
2310
uint32_t *out_ptr = base_ptr + base;
2311
2311
idx -= 64 ;
2312
2312
while (bits != 0 ) {
@@ -2324,7 +2324,7 @@ static really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint3
2324
2324
// base_ptr[base] incrementing base as we go
2325
2325
// will potentially store extra values beyond end of valid bits, so base_ptr
2326
2326
// needs to be large enough to handle this
2327
- static really_inline void flatten_bits (uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
2327
+ really_inline void flatten_bits (uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
2328
2328
// In some instances, the next branch is expensive because it is mispredicted.
2329
2329
// Unfortunately, in other cases,
2330
2330
// it helps tremendously.
0 commit comments