Skip to content

Commit da0f1ca

Browse files
committed
Remove static modifiers
1 parent 5e55921 commit da0f1ca

File tree

7 files changed

+23
-23
lines changed

7 files changed

+23
-23
lines changed

singleheader/amalgamation_demo.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
/* auto-generated on Fri Aug 23 09:54:21 DST 2019. Do not edit! */
1+
/* auto-generated on Fri Aug 23 10:23:28 DST 2019. Do not edit! */
22

33
#include <iostream>
44
#include "simdjson.h"

singleheader/simdjson.cpp

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
/* auto-generated on Fri Aug 23 09:54:21 DST 2019. Do not edit! */
1+
/* auto-generated on Fri Aug 23 10:23:28 DST 2019. Do not edit! */
22
#include "simdjson.h"
33

44
/* used for http://dmalloc.com/ Dmalloc - Debug Malloc Library */
@@ -1451,7 +1451,7 @@ UNTARGET_REGION // westmere
14511451

14521452
namespace simdjson::arm64 {
14531453

1454-
static really_inline uint64_t compute_quote_mask(uint64_t quote_bits) {
1454+
really_inline uint64_t compute_quote_mask(uint64_t quote_bits) {
14551455

14561456
#ifdef __ARM_FEATURE_CRYPTO // some ARM processors lack this extension
14571457
return vmull_p64(-1ULL, quote_bits);
@@ -1460,7 +1460,7 @@ static really_inline uint64_t compute_quote_mask(uint64_t quote_bits) {
14601460
#endif
14611461
}
14621462

1463-
static really_inline void find_whitespace_and_structurals(
1463+
really_inline void find_whitespace_and_structurals(
14641464
simd_input<ARCHITECTURE> in, uint64_t &whitespace,
14651465
uint64_t &structurals) {
14661466
const uint8x16_t low_nibble_mask =
@@ -1518,7 +1518,7 @@ static really_inline void find_whitespace_and_structurals(
15181518
// This is just a naive implementation. It should be normally
15191519
// disable, but can be used for research purposes to compare
15201520
// again our optimized version.
1521-
static really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
1521+
really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
15221522
uint32_t *out_ptr = base_ptr + base;
15231523
idx -= 64;
15241524
while (bits != 0) {
@@ -1536,7 +1536,7 @@ static really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint3
15361536
// base_ptr[base] incrementing base as we go
15371537
// will potentially store extra values beyond end of valid bits, so base_ptr
15381538
// needs to be large enough to handle this
1539-
static really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
1539+
really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
15401540
// In some instances, the next branch is expensive because it is mispredicted.
15411541
// Unfortunately, in other cases,
15421542
// it helps tremendously.
@@ -1864,15 +1864,15 @@ int find_structural_bits<Architecture::ARM64>(const uint8_t *buf, size_t len, si
18641864
TARGET_HASWELL
18651865
namespace simdjson::haswell {
18661866

1867-
static really_inline uint64_t compute_quote_mask(uint64_t quote_bits) {
1867+
really_inline uint64_t compute_quote_mask(uint64_t quote_bits) {
18681868
// There should be no such thing with a processing supporting avx2
18691869
// but not clmul.
18701870
uint64_t quote_mask = _mm_cvtsi128_si64(_mm_clmulepi64_si128(
18711871
_mm_set_epi64x(0ULL, quote_bits), _mm_set1_epi8(0xFFu), 0));
18721872
return quote_mask;
18731873
}
18741874

1875-
static really_inline void find_whitespace_and_structurals(simd_input<ARCHITECTURE> in,
1875+
really_inline void find_whitespace_and_structurals(simd_input<ARCHITECTURE> in,
18761876
uint64_t &whitespace, uint64_t &structurals) {
18771877

18781878
#ifdef SIMDJSON_NAIVE_STRUCTURAL
@@ -1938,7 +1938,7 @@ static really_inline void find_whitespace_and_structurals(simd_input<ARCHITECTUR
19381938
// base_ptr[base] incrementing base as we go
19391939
// will potentially store extra values beyond end of valid bits, so base_ptr
19401940
// needs to be large enough to handle this
1941-
static really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
1941+
really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
19421942
// In some instances, the next branch is expensive because it is mispredicted.
19431943
// Unfortunately, in other cases,
19441944
// it helps tremendously.
@@ -2269,12 +2269,12 @@ UNTARGET_REGION
22692269
TARGET_WESTMERE
22702270
namespace simdjson::westmere {
22712271

2272-
static really_inline uint64_t compute_quote_mask(uint64_t quote_bits) {
2272+
really_inline uint64_t compute_quote_mask(uint64_t quote_bits) {
22732273
return _mm_cvtsi128_si64(_mm_clmulepi64_si128(
22742274
_mm_set_epi64x(0ULL, quote_bits), _mm_set1_epi8(0xFFu), 0));
22752275
}
22762276

2277-
static really_inline void find_whitespace_and_structurals(simd_input<ARCHITECTURE> in,
2277+
really_inline void find_whitespace_and_structurals(simd_input<ARCHITECTURE> in,
22782278
uint64_t &whitespace, uint64_t &structurals) {
22792279

22802280
const __m128i structural_table =
@@ -2306,7 +2306,7 @@ static really_inline void find_whitespace_and_structurals(simd_input<ARCHITECTUR
23062306
// This is just a naive implementation. It should be normally
23072307
// disable, but can be used for research purposes to compare
23082308
// again our optimized version.
2309-
static really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
2309+
really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
23102310
uint32_t *out_ptr = base_ptr + base;
23112311
idx -= 64;
23122312
while (bits != 0) {
@@ -2324,7 +2324,7 @@ static really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint3
23242324
// base_ptr[base] incrementing base as we go
23252325
// will potentially store extra values beyond end of valid bits, so base_ptr
23262326
// needs to be large enough to handle this
2327-
static really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
2327+
really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
23282328
// In some instances, the next branch is expensive because it is mispredicted.
23292329
// Unfortunately, in other cases,
23302330
// it helps tremendously.

singleheader/simdjson.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
/* auto-generated on Fri Aug 23 09:54:21 DST 2019. Do not edit! */
1+
/* auto-generated on Fri Aug 23 10:23:28 DST 2019. Do not edit! */
22
/* begin file include/simdjson/simdjson_version.h */
33
// /include/simdjson/simdjson_version.h automatically generated by release.py,
44
// do not change by hand

src/arm64/stage1_find_marks.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313
namespace simdjson::arm64 {
1414

15-
static really_inline uint64_t compute_quote_mask(uint64_t quote_bits) {
15+
really_inline uint64_t compute_quote_mask(uint64_t quote_bits) {
1616

1717
#ifdef __ARM_FEATURE_CRYPTO // some ARM processors lack this extension
1818
return vmull_p64(-1ULL, quote_bits);
@@ -21,7 +21,7 @@ static really_inline uint64_t compute_quote_mask(uint64_t quote_bits) {
2121
#endif
2222
}
2323

24-
static really_inline void find_whitespace_and_structurals(
24+
really_inline void find_whitespace_and_structurals(
2525
simd_input<ARCHITECTURE> in, uint64_t &whitespace,
2626
uint64_t &structurals) {
2727
const uint8x16_t low_nibble_mask =

src/generic/stage1_find_marks_flatten.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
// This is just a naive implementation. It should be normally
99
// disable, but can be used for research purposes to compare
1010
// again our optimized version.
11-
static really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
11+
really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
1212
uint32_t *out_ptr = base_ptr + base;
1313
idx -= 64;
1414
while (bits != 0) {
@@ -26,7 +26,7 @@ static really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint3
2626
// base_ptr[base] incrementing base as we go
2727
// will potentially store extra values beyond end of valid bits, so base_ptr
2828
// needs to be large enough to handle this
29-
static really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
29+
really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
3030
// In some instances, the next branch is expensive because it is mispredicted.
3131
// Unfortunately, in other cases,
3232
// it helps tremendously.

src/haswell/stage1_find_marks.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,15 +13,15 @@
1313
TARGET_HASWELL
1414
namespace simdjson::haswell {
1515

16-
static really_inline uint64_t compute_quote_mask(uint64_t quote_bits) {
16+
really_inline uint64_t compute_quote_mask(uint64_t quote_bits) {
1717
// There should be no such thing with a processing supporting avx2
1818
// but not clmul.
1919
uint64_t quote_mask = _mm_cvtsi128_si64(_mm_clmulepi64_si128(
2020
_mm_set_epi64x(0ULL, quote_bits), _mm_set1_epi8(0xFFu), 0));
2121
return quote_mask;
2222
}
2323

24-
static really_inline void find_whitespace_and_structurals(simd_input<ARCHITECTURE> in,
24+
really_inline void find_whitespace_and_structurals(simd_input<ARCHITECTURE> in,
2525
uint64_t &whitespace, uint64_t &structurals) {
2626

2727
#ifdef SIMDJSON_NAIVE_STRUCTURAL
@@ -87,7 +87,7 @@ static really_inline void find_whitespace_and_structurals(simd_input<ARCHITECTUR
8787
// base_ptr[base] incrementing base as we go
8888
// will potentially store extra values beyond end of valid bits, so base_ptr
8989
// needs to be large enough to handle this
90-
static really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
90+
really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
9191
// In some instances, the next branch is expensive because it is mispredicted.
9292
// Unfortunately, in other cases,
9393
// it helps tremendously.

src/westmere/stage1_find_marks.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,12 @@
1313
TARGET_WESTMERE
1414
namespace simdjson::westmere {
1515

16-
static really_inline uint64_t compute_quote_mask(uint64_t quote_bits) {
16+
really_inline uint64_t compute_quote_mask(uint64_t quote_bits) {
1717
return _mm_cvtsi128_si64(_mm_clmulepi64_si128(
1818
_mm_set_epi64x(0ULL, quote_bits), _mm_set1_epi8(0xFFu), 0));
1919
}
2020

21-
static really_inline void find_whitespace_and_structurals(simd_input<ARCHITECTURE> in,
21+
really_inline void find_whitespace_and_structurals(simd_input<ARCHITECTURE> in,
2222
uint64_t &whitespace, uint64_t &structurals) {
2323

2424
const __m128i structural_table =

0 commit comments

Comments
 (0)