Skip to content

Commit 672b6f4

Browse files
committed
Fix mimalloc formatting
Summary: Test Plan: Reviewers: Subscribers: Tasks: Tags:
1 parent 951faba commit 672b6f4

File tree

20 files changed

+239
-242
lines changed

20 files changed

+239
-242
lines changed

Include/mimalloc/mimalloc.h

+7-7
Original file line numberDiff line numberDiff line change
@@ -340,18 +340,18 @@ typedef enum mi_option_e {
340340
mi_option_deprecated_segment_cache,
341341
mi_option_deprecated_page_reset,
342342
mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination
343-
mi_option_deprecated_segment_reset,
344-
mi_option_eager_commit_delay,
343+
mi_option_deprecated_segment_reset,
344+
mi_option_eager_commit_delay,
345345
mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all.
346346
mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes.
347347
mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas)
348348
mi_option_os_tag, // tag used for OS logging (macOS only for now)
349349
mi_option_max_errors, // issue at most N error messages
350350
mi_option_max_warnings, // issue at most N warning messages
351-
mi_option_max_segment_reclaim,
351+
mi_option_max_segment_reclaim,
352352
mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe.
353353
mi_option_arena_reserve, // initial memory size in KiB for arena reservation (1GiB on 64-bit)
354-
mi_option_arena_purge_mult,
354+
mi_option_arena_purge_mult,
355355
mi_option_purge_extend_delay,
356356
_mi_option_last,
357357
// legacy option names
@@ -521,7 +521,7 @@ template<class T, bool _mi_destroy> struct _mi_heap_stl_allocator_common : publi
521521
protected:
522522
std::shared_ptr<mi_heap_t> heap;
523523
template<class U, bool D> friend struct _mi_heap_stl_allocator_common;
524-
524+
525525
_mi_heap_stl_allocator_common() {
526526
mi_heap_t* hp = mi_heap_new();
527527
this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */
@@ -538,7 +538,7 @@ template<class T, bool _mi_destroy> struct _mi_heap_stl_allocator_common : publi
538538
template<class T> struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common<T, false> {
539539
using typename _mi_heap_stl_allocator_common<T, false>::size_type;
540540
mi_heap_stl_allocator() : _mi_heap_stl_allocator_common<T, false>() { } // creates fresh heap that is deleted when the destructor is called
541-
mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, false>(hp) { } // no delete nor destroy on the passed in heap
541+
mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, false>(hp) { } // no delete nor destroy on the passed in heap
542542
template<class U> mi_heap_stl_allocator(const mi_heap_stl_allocator<U>& x) mi_attr_noexcept : _mi_heap_stl_allocator_common<T, false>(x) { }
543543

544544
mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; }
@@ -555,7 +555,7 @@ template<class T1, class T2> bool operator!=(const mi_heap_stl_allocator<T1>& x,
555555
template<class T> struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common<T, true> {
556556
using typename _mi_heap_stl_allocator_common<T, true>::size_type;
557557
mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common<T, true>() { } // creates fresh heap that is destroyed when the destructor is called
558-
mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, true>(hp) { } // no delete nor destroy on the passed in heap
558+
mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, true>(hp) { } // no delete nor destroy on the passed in heap
559559
template<class U> mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator<U>& x) mi_attr_noexcept : _mi_heap_stl_allocator_common<T, true>(x) { }
560560

561561
mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; }

Include/mimalloc/mimalloc/atomic.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ typedef _Atomic(uintptr_t) mi_atomic_once_t;
300300

301301
// Returns true only on the first invocation
302302
static inline bool mi_atomic_once( mi_atomic_once_t* once ) {
303-
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
303+
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
304304
uintptr_t expected = 0;
305305
return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1
306306
}

Include/mimalloc/mimalloc/internal.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ void _mi_thread_abandon(mi_tld_t *tld);
8989

9090
// os.c
9191
void _mi_os_init(void); // called from process init
92-
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
92+
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
9393
void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats);
9494
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats);
9595

Include/mimalloc/mimalloc/prim.h

+7-7
Original file line numberDiff line numberDiff line change
@@ -35,10 +35,10 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config );
3535

3636
// Free OS memory
3737
int _mi_prim_free(void* addr, size_t size );
38-
38+
3939
// Allocate OS memory. Return NULL on error.
4040
// The `try_alignment` is just a hint and the returned pointer does not have to be aligned.
41-
// If `commit` is false, the virtual memory range only needs to be reserved (with no access)
41+
// If `commit` is false, the virtual memory range only needs to be reserved (with no access)
4242
// which will later be committed explicitly using `_mi_prim_commit`.
4343
// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
4444
// pre: !commit => !allow_large
@@ -82,11 +82,11 @@ mi_msecs_t _mi_prim_clock_now(void);
8282
typedef struct mi_process_info_s {
8383
mi_msecs_t elapsed;
8484
mi_msecs_t utime;
85-
mi_msecs_t stime;
86-
size_t current_rss;
87-
size_t peak_rss;
85+
mi_msecs_t stime;
86+
size_t current_rss;
87+
size_t peak_rss;
8888
size_t current_commit;
89-
size_t peak_commit;
89+
size_t peak_commit;
9090
size_t page_faults;
9191
} mi_process_info_t;
9292

@@ -117,7 +117,7 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap);
117117

118118
//-------------------------------------------------------------------
119119
// Thread id: `_mi_prim_thread_id()`
120-
//
120+
//
121121
// Getting the thread id should be performant as it is called in the
122122
// fast path of `_mi_free` and we specialize for various platforms as
123123
// inlined definitions. Regular code should call `init.c:_mi_thread_id()`.

Include/mimalloc/mimalloc/track.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ The corresponding `mi_track_free` still uses the block start pointer and origina
3434
The `mi_track_resize` is currently unused but could be called on reallocations within a block.
3535
`mi_track_init` is called at program start.
3636
37-
The following macros are for tools like asan and valgrind to track whether memory is
37+
The following macros are for tools like asan and valgrind to track whether memory is
3838
defined, undefined, or not accessible at all:
3939
4040
#define mi_track_mem_defined(p,size)
@@ -94,7 +94,7 @@ defined, undefined, or not accessible at all:
9494
// no tracking
9595

9696
#define MI_TRACK_ENABLED 0
97-
#define MI_TRACK_HEAP_DESTROY 0
97+
#define MI_TRACK_HEAP_DESTROY 0
9898
#define MI_TRACK_TOOL "none"
9999

100100
#define mi_track_malloc_size(p,reqsize,size,zero)

Include/mimalloc/mimalloc/types.h

+11-11
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ typedef int32_t mi_ssize_t;
183183

184184
#define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/4) // 8KiB on 64-bit
185185
#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128KiB on 64-bit
186-
#define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
186+
#define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
187187
#define MI_LARGE_OBJ_SIZE_MAX (MI_SEGMENT_SIZE/2) // 32MiB on 64-bit
188188
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
189189

@@ -201,10 +201,10 @@ typedef int32_t mi_ssize_t;
201201
#define MI_HUGE_BLOCK_SIZE ((uint32_t)(2*MI_GiB))
202202

203203
// blocks up to this size are always allocated aligned
204-
#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
204+
#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
205205

206-
// Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments
207-
#define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
206+
// Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments
207+
#define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
208208

209209

210210
// ------------------------------------------------------
@@ -293,7 +293,7 @@ typedef uintptr_t mi_thread_free_t;
293293
typedef struct mi_page_s {
294294
// "owned" by the segment
295295
uint32_t slice_count; // slices in this page (0 if not a page)
296-
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
296+
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
297297
uint8_t is_committed : 1; // `true` if the page virtual memory is committed
298298
uint8_t is_zero_init : 1; // `true` if the page was initially zero initialized
299299
uint8_t tag : 4; // heap tag (mi_heap_tag_t)
@@ -349,17 +349,17 @@ typedef enum mi_segment_kind_e {
349349
// A segment holds a commit mask where a bit is set if
350350
// the corresponding MI_COMMIT_SIZE area is committed.
351351
// The MI_COMMIT_SIZE must be a multiple of the slice
352-
// size. If it is equal we have the most fine grained
352+
// size. If it is equal we have the most fine grained
353353
// decommit (but setting it higher can be more efficient).
354354
// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will
355355
// be committed in one go which can be set higher than
356356
// MI_COMMIT_SIZE for efficiency (while the decommit mask
357357
// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
358358
// ------------------------------------------------------
359359

360-
#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
360+
#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
361361
#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
362-
#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
362+
#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
363363
#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS
364364
#define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS)
365365

@@ -432,11 +432,11 @@ typedef struct mi_segment_s {
432432

433433
// from here is zero initialized
434434
struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
435-
435+
436436
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
437437
size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long)
438438
size_t used; // count of pages in use
439-
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
439+
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
440440

441441
size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
442442
size_t segment_info_slices; // initial slices we are using segment info and possible guard pages.
@@ -507,7 +507,7 @@ struct mi_heap_s {
507507
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
508508
_Atomic(mi_block_t*) thread_delayed_free;
509509
mi_threadid_t thread_id; // thread this heap belongs too
510-
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
510+
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
511511
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
512512
uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list
513513
mi_random_ctx_t random; // random number context used for secure allocation

Objects/mimalloc/alloc-aligned.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
4747
oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
4848
p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block
4949
// zero afterwards as only the area from the aligned_p may be committed!
50-
if (p == NULL) return NULL;
50+
if (p == NULL) return NULL;
5151
}
5252
else {
5353
// otherwise over-allocate
@@ -73,7 +73,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
7373
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
7474
mi_assert_internal(mi_usable_size(aligned_p)>=size);
7575
mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);
76-
76+
7777
// now zero the block if needed
7878
if (alignment > MI_ALIGNMENT_MAX) {
7979
// for the tracker, on huge aligned allocations only from the start of the large block is defined
@@ -85,7 +85,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
8585

8686
if (p != aligned_p) {
8787
mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p));
88-
}
88+
}
8989
return aligned_p;
9090
}
9191

Objects/mimalloc/alloc.c

+7-7
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
7070
}
7171
else {
7272
_mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE);
73-
}
73+
}
7474
}
7575

7676
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
@@ -126,7 +126,7 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
126126
if (size == 0) { size = sizeof(void*); }
127127
#endif
128128
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
129-
void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
129+
void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
130130
mi_track_malloc(p,size,zero);
131131
#if MI_STAT>1
132132
if (p != NULL) {
@@ -359,15 +359,15 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
359359
// only maintain stats for smaller objects if requested
360360
#if (MI_STAT>0)
361361
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
362-
#if (MI_STAT < 2)
362+
#if (MI_STAT < 2)
363363
MI_UNUSED(block);
364364
#endif
365365
mi_heap_t* const heap = mi_heap_get_default();
366366
const size_t bsize = mi_page_usable_block_size(page);
367367
#if (MI_STAT>1)
368368
const size_t usize = mi_page_usable_size_of(page, block);
369369
mi_heap_stat_decrease(heap, malloc, usize);
370-
#endif
370+
#endif
371371
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
372372
mi_heap_stat_decrease(heap, normal, bsize);
373373
#if (MI_STAT > 1)
@@ -379,7 +379,7 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
379379
}
380380
else {
381381
mi_heap_stat_decrease(heap, huge, bsize);
382-
}
382+
}
383383
}
384384
#else
385385
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
@@ -418,7 +418,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
418418
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
419419
mi_check_padding(page, block);
420420
_mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
421-
421+
422422
// huge page segments are always abandoned and can be freed immediately
423423
mi_segment_t* segment = _mi_page_segment(page);
424424
if (segment->kind == MI_SEGMENT_HUGE) {
@@ -434,7 +434,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
434434
_mi_segment_huge_page_reset(segment, page, block);
435435
#endif
436436
}
437-
437+
438438
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
439439
if (segment->kind != MI_SEGMENT_HUGE) { // not for huge segments as we just reset the content
440440
_mi_debug_fill(page, block, MI_DEBUG_FREED, mi_usable_size(block));

0 commit comments

Comments
 (0)