@@ -183,7 +183,7 @@ typedef int32_t mi_ssize_t;
183
183
184
184
#define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/4) // 8KiB on 64-bit
185
185
#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128KiB on 64-bit
186
- #define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
186
+ #define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
187
187
#define MI_LARGE_OBJ_SIZE_MAX (MI_SEGMENT_SIZE/2) // 32MiB on 64-bit
188
188
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
189
189
@@ -201,10 +201,10 @@ typedef int32_t mi_ssize_t;
201
201
#define MI_HUGE_BLOCK_SIZE ((uint32_t)(2*MI_GiB))
202
202
203
203
// blocks up to this size are always allocated aligned
204
- #define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
204
+ #define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
205
205
206
- // Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments
207
- #define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
206
+ // Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments
207
+ #define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
208
208
209
209
210
210
// ------------------------------------------------------
@@ -293,7 +293,7 @@ typedef uintptr_t mi_thread_free_t;
293
293
typedef struct mi_page_s {
294
294
// "owned" by the segment
295
295
uint32_t slice_count ; // slices in this page (0 if not a page)
296
- uint32_t slice_offset ; // distance from the actual page data slice (0 if a page)
296
+ uint32_t slice_offset ; // distance from the actual page data slice (0 if a page)
297
297
uint8_t is_committed : 1 ; // `true` if the page virtual memory is committed
298
298
uint8_t is_zero_init : 1 ; // `true` if the page was initially zero initialized
299
299
uint8_t tag : 4 ; // heap tag (mi_heap_tag_t)
@@ -349,17 +349,17 @@ typedef enum mi_segment_kind_e {
349
349
// A segment holds a commit mask where a bit is set if
350
350
// the corresponding MI_COMMIT_SIZE area is committed.
351
351
// The MI_COMMIT_SIZE must be a multiple of the slice
352
- // size. If it is equal we have the most fine grained
352
+ // size. If it is equal we have the most fine grained
353
353
// decommit (but setting it higher can be more efficient).
354
354
// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will
355
355
// be committed in one go which can be set higher than
356
356
// MI_COMMIT_SIZE for efficiency (while the decommit mask
357
357
// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
358
358
// ------------------------------------------------------
359
359
360
- #define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
360
+ #define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
361
361
#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
362
- #define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
362
+ #define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
363
363
#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS
364
364
#define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS)
365
365
@@ -432,11 +432,11 @@ typedef struct mi_segment_s {
432
432
433
433
// from here is zero initialized
434
434
struct mi_segment_s * next ; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
435
-
435
+
436
436
size_t abandoned ; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
437
437
size_t abandoned_visits ; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long)
438
438
size_t used ; // count of pages in use
439
- uintptr_t cookie ; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
439
+ uintptr_t cookie ; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
440
440
441
441
size_t segment_slices ; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
442
442
size_t segment_info_slices ; // initial slices we are using segment info and possible guard pages.
@@ -507,7 +507,7 @@ struct mi_heap_s {
507
507
mi_page_queue_t pages [MI_BIN_FULL + 1 ]; // queue of pages for each size class (or "bin")
508
508
_Atomic (mi_block_t * ) thread_delayed_free ;
509
509
mi_threadid_t thread_id ; // thread this heap belongs too
510
- mi_arena_id_t arena_id ; // arena id if the heap belongs to a specific arena (or 0)
510
+ mi_arena_id_t arena_id ; // arena id if the heap belongs to a specific arena (or 0)
511
511
uintptr_t cookie ; // random cookie to verify pointers (see `_mi_ptr_cookie`)
512
512
uintptr_t keys [2 ]; // two random keys used to encode the `thread_delayed_free` list
513
513
mi_random_ctx_t random ; // random number context used for secure allocation
0 commit comments