65
65
* double this size, and so on. Larger segments may be created if necessary
66
66
* to satisfy large requests.
67
67
*/
68
- #define DSA_INITIAL_SEGMENT_SIZE ((Size ) (1 * 1024 * 1024))
68
+ #define DSA_INITIAL_SEGMENT_SIZE ((size_t ) (1 * 1024 * 1024))
69
69
70
70
/*
71
71
* How many segments to create before we double the segment size. If this is
98
98
#define DSA_OFFSET_BITMASK (((dsa_pointer) 1 << DSA_OFFSET_WIDTH) - 1)
99
99
100
100
/* The maximum size of a DSM segment. */
101
- #define DSA_MAX_SEGMENT_SIZE ((Size ) 1 << DSA_OFFSET_WIDTH)
101
+ #define DSA_MAX_SEGMENT_SIZE ((size_t ) 1 << DSA_OFFSET_WIDTH)
102
102
103
103
/* Number of pages (see FPM_PAGE_SIZE) per regular superblock. */
104
104
#define DSA_PAGES_PER_SUPERBLOCK 16
121
121
#define DSA_EXTRACT_OFFSET (dp ) ((dp) & DSA_OFFSET_BITMASK)
122
122
123
123
/* The type used for index segment indexes (zero based). */
124
- typedef Size dsa_segment_index ;
124
+ typedef size_t dsa_segment_index ;
125
125
126
126
/* Sentinel value for dsa_segment_index indicating 'none' or 'end'. */
127
127
#define DSA_SEGMENT_INDEX_NONE (~(dsa_segment_index)0)
@@ -153,9 +153,9 @@ typedef struct
153
153
/* Sanity check magic value. */
154
154
uint32 magic ;
155
155
/* Total number of pages in this segment (excluding metadata area). */
156
- Size usable_pages ;
156
+ size_t usable_pages ;
157
157
/* Total size of this segment in bytes. */
158
- Size size ;
158
+ size_t size ;
159
159
160
160
/*
161
161
* Index of the segment that precedes this one in the same segment bin, or
@@ -169,7 +169,7 @@ typedef struct
169
169
*/
170
170
dsa_segment_index next ;
171
171
/* The index of the bin that contains this segment. */
172
- Size bin ;
172
+ size_t bin ;
173
173
174
174
/*
175
175
* A flag raised to indicate that this segment is being returned to the
@@ -197,7 +197,7 @@ typedef struct
197
197
dsa_pointer prevspan ; /* Previous span. */
198
198
dsa_pointer nextspan ; /* Next span. */
199
199
dsa_pointer start ; /* Starting address. */
200
- Size npages ; /* Length of span in pages. */
200
+ size_t npages ; /* Length of span in pages. */
201
201
uint16 size_class ; /* Size class. */
202
202
uint16 ninitialized ; /* Maximum number of objects ever allocated. */
203
203
uint16 nallocatable ; /* Number of objects currently allocatable. */
@@ -308,17 +308,17 @@ typedef struct
308
308
/* The object pools for each size class. */
309
309
dsa_area_pool pools [DSA_NUM_SIZE_CLASSES ];
310
310
/* The total size of all active segments. */
311
- Size total_segment_size ;
311
+ size_t total_segment_size ;
312
312
/* The maximum total size of backing storage we are allowed. */
313
- Size max_total_segment_size ;
313
+ size_t max_total_segment_size ;
314
314
/* Highest used segment index in the history of this area. */
315
315
dsa_segment_index high_segment_index ;
316
316
/* The reference count for this area. */
317
317
int refcnt ;
318
318
/* A flag indicating that this area has been pinned. */
319
319
bool pinned ;
320
320
/* The number of times that segments have been freed. */
321
- Size freed_segment_counter ;
321
+ size_t freed_segment_counter ;
322
322
/* The LWLock tranche ID. */
323
323
int lwlock_tranche_id ;
324
324
/* The general lock (protects everything except object pools). */
@@ -371,7 +371,7 @@ struct dsa_area
371
371
dsa_segment_index high_segment_index ;
372
372
373
373
/* The last observed freed_segment_counter. */
374
- Size freed_segment_counter ;
374
+ size_t freed_segment_counter ;
375
375
};
376
376
377
377
#define DSA_SPAN_NOTHING_FREE ((uint16) -1)
@@ -382,7 +382,7 @@ struct dsa_area
382
382
(segment_map_ptr - &area->segment_maps[0])
383
383
384
384
static void init_span (dsa_area * area , dsa_pointer span_pointer ,
385
- dsa_area_pool * pool , dsa_pointer start , Size npages ,
385
+ dsa_area_pool * pool , dsa_pointer start , size_t npages ,
386
386
uint16 size_class );
387
387
static bool transfer_first_span (dsa_area * area , dsa_area_pool * pool ,
388
388
int fromclass , int toclass );
@@ -396,8 +396,8 @@ static void unlink_span(dsa_area *area, dsa_area_span *span);
396
396
static void add_span_to_fullness_class (dsa_area * area , dsa_area_span * span ,
397
397
dsa_pointer span_pointer , int fclass );
398
398
static void unlink_segment (dsa_area * area , dsa_segment_map * segment_map );
399
- static dsa_segment_map * get_best_segment (dsa_area * area , Size npages );
400
- static dsa_segment_map * make_new_segment (dsa_area * area , Size requested_pages );
399
+ static dsa_segment_map * get_best_segment (dsa_area * area , size_t npages );
400
+ static dsa_segment_map * make_new_segment (dsa_area * area , size_t requested_pages );
401
401
static dsa_area * create_internal (void * place , size_t size ,
402
402
int tranche_id ,
403
403
dsm_handle control_handle ,
@@ -662,7 +662,7 @@ dsa_pin_mapping(dsa_area *area)
662
662
* flags.
663
663
*/
664
664
dsa_pointer
665
- dsa_allocate_extended (dsa_area * area , Size size , int flags )
665
+ dsa_allocate_extended (dsa_area * area , size_t size , int flags )
666
666
{
667
667
uint16 size_class ;
668
668
dsa_pointer start_pointer ;
@@ -685,8 +685,8 @@ dsa_allocate_extended(dsa_area *area, Size size, int flags)
685
685
*/
686
686
if (size > dsa_size_classes [lengthof (dsa_size_classes ) - 1 ])
687
687
{
688
- Size npages = fpm_size_to_pages (size );
689
- Size first_page ;
688
+ size_t npages = fpm_size_to_pages (size );
689
+ size_t first_page ;
690
690
dsa_pointer span_pointer ;
691
691
dsa_area_pool * pool = & area -> control -> pools [DSA_SCLASS_SPAN_LARGE ];
692
692
@@ -818,7 +818,7 @@ dsa_free(dsa_area *area, dsa_pointer dp)
818
818
dsa_area_span * span ;
819
819
char * superblock ;
820
820
char * object ;
821
- Size size ;
821
+ size_t size ;
822
822
int size_class ;
823
823
824
824
/* Make sure we don't have a stale segment in the slot 'dp' refers to. */
@@ -925,7 +925,7 @@ void *
925
925
dsa_get_address (dsa_area * area , dsa_pointer dp )
926
926
{
927
927
dsa_segment_index index ;
928
- Size offset ;
928
+ size_t offset ;
929
929
930
930
/* Convert InvalidDsaPointer to NULL. */
931
931
if (!DsaPointerIsValid (dp ))
@@ -998,7 +998,7 @@ dsa_unpin(dsa_area *area)
998
998
* backends that have attached to them.
999
999
*/
1000
1000
void
1001
- dsa_set_size_limit (dsa_area * area , Size limit )
1001
+ dsa_set_size_limit (dsa_area * area , size_t limit )
1002
1002
{
1003
1003
LWLockAcquire (DSA_AREA_LOCK (area ), LW_EXCLUSIVE );
1004
1004
area -> control -> max_total_segment_size = limit ;
@@ -1057,7 +1057,7 @@ dsa_trim(dsa_area *area)
1057
1057
void
1058
1058
dsa_dump (dsa_area * area )
1059
1059
{
1060
- Size i ,
1060
+ size_t i ,
1061
1061
j ;
1062
1062
1063
1063
/*
@@ -1158,10 +1158,10 @@ dsa_dump(dsa_area *area)
1158
1158
* Return the smallest size that you can successfully provide to
1159
1159
* dsa_create_in_place.
1160
1160
*/
1161
- Size
1161
+ size_t
1162
1162
dsa_minimum_size (void )
1163
1163
{
1164
- Size size ;
1164
+ size_t size ;
1165
1165
int pages = 0 ;
1166
1166
1167
1167
size = MAXALIGN (sizeof (dsa_area_control )) +
@@ -1189,9 +1189,9 @@ create_internal(void *place, size_t size,
1189
1189
dsa_area_control * control ;
1190
1190
dsa_area * area ;
1191
1191
dsa_segment_map * segment_map ;
1192
- Size usable_pages ;
1193
- Size total_pages ;
1194
- Size metadata_bytes ;
1192
+ size_t usable_pages ;
1193
+ size_t total_pages ;
1194
+ size_t metadata_bytes ;
1195
1195
int i ;
1196
1196
1197
1197
/* Sanity check on the space we have to work in. */
@@ -1224,7 +1224,7 @@ create_internal(void *place, size_t size,
1224
1224
control -> segment_header .freed = false;
1225
1225
control -> segment_header .size = DSA_INITIAL_SEGMENT_SIZE ;
1226
1226
control -> handle = control_handle ;
1227
- control -> max_total_segment_size = (Size ) - 1 ;
1227
+ control -> max_total_segment_size = (size_t ) -1 ;
1228
1228
control -> total_segment_size = size ;
1229
1229
memset (& control -> segment_handles [0 ], 0 ,
1230
1230
sizeof (dsm_handle ) * DSA_MAX_SEGMENTS );
@@ -1337,11 +1337,11 @@ attach_internal(void *place, dsm_segment *segment, dsa_handle handle)
1337
1337
static void
1338
1338
init_span (dsa_area * area ,
1339
1339
dsa_pointer span_pointer ,
1340
- dsa_area_pool * pool , dsa_pointer start , Size npages ,
1340
+ dsa_area_pool * pool , dsa_pointer start , size_t npages ,
1341
1341
uint16 size_class )
1342
1342
{
1343
1343
dsa_area_span * span = dsa_get_address (area , span_pointer );
1344
- Size obsize = dsa_size_classes [size_class ];
1344
+ size_t obsize = dsa_size_classes [size_class ];
1345
1345
1346
1346
/*
1347
1347
* The per-pool lock must be held because we manipulate the span list for
@@ -1437,7 +1437,7 @@ alloc_object(dsa_area *area, int size_class)
1437
1437
dsa_pointer block ;
1438
1438
dsa_pointer result ;
1439
1439
char * object ;
1440
- Size size ;
1440
+ size_t size ;
1441
1441
1442
1442
/*
1443
1443
* Even though ensure_active_superblock can in turn call alloc_object if
@@ -1523,12 +1523,12 @@ ensure_active_superblock(dsa_area *area, dsa_area_pool *pool,
1523
1523
{
1524
1524
dsa_pointer span_pointer ;
1525
1525
dsa_pointer start_pointer ;
1526
- Size obsize = dsa_size_classes [size_class ];
1527
- Size nmax ;
1526
+ size_t obsize = dsa_size_classes [size_class ];
1527
+ size_t nmax ;
1528
1528
int fclass ;
1529
- Size npages = 1 ;
1530
- Size first_page ;
1531
- Size i ;
1529
+ size_t npages = 1 ;
1530
+ size_t first_page ;
1531
+ size_t i ;
1532
1532
dsa_segment_map * segment_map ;
1533
1533
1534
1534
Assert (LWLockHeldByMe (DSA_SCLASS_LOCK (area , size_class )));
@@ -1959,9 +1959,9 @@ unlink_segment(dsa_area *area, dsa_segment_map *segment_map)
1959
1959
* pages map.
1960
1960
*/
1961
1961
static dsa_segment_map *
1962
- get_best_segment (dsa_area * area , Size npages )
1962
+ get_best_segment (dsa_area * area , size_t npages )
1963
1963
{
1964
- Size bin ;
1964
+ size_t bin ;
1965
1965
1966
1966
Assert (LWLockHeldByMe (DSA_AREA_LOCK (area )));
1967
1967
check_for_freed_segments_locked (area );
@@ -1978,7 +1978,7 @@ get_best_segment(dsa_area *area, Size npages)
1978
1978
* The minimum contiguous size that any segment in this bin should
1979
1979
* have. We'll re-bin if we see segments with fewer.
1980
1980
*/
1981
- Size threshold = (Size ) 1 << (bin - 1 );
1981
+ size_t threshold = (size_t ) 1 << (bin - 1 );
1982
1982
dsa_segment_index segment_index ;
1983
1983
1984
1984
/* Search this bin for a segment with enough contiguous space. */
@@ -1987,7 +1987,7 @@ get_best_segment(dsa_area *area, Size npages)
1987
1987
{
1988
1988
dsa_segment_map * segment_map ;
1989
1989
dsa_segment_index next_segment_index ;
1990
- Size contiguous_pages ;
1990
+ size_t contiguous_pages ;
1991
1991
1992
1992
segment_map = get_segment_by_index (area , segment_index );
1993
1993
next_segment_index = segment_map -> header -> next ;
@@ -2003,7 +2003,7 @@ get_best_segment(dsa_area *area, Size npages)
2003
2003
/* Re-bin it if it's no longer in the appropriate bin. */
2004
2004
if (contiguous_pages < threshold )
2005
2005
{
2006
- Size new_bin ;
2006
+ size_t new_bin ;
2007
2007
2008
2008
new_bin = contiguous_pages_to_segment_bin (contiguous_pages );
2009
2009
@@ -2051,13 +2051,13 @@ get_best_segment(dsa_area *area, Size npages)
2051
2051
* segments would be exceeded.
2052
2052
*/
2053
2053
static dsa_segment_map *
2054
- make_new_segment (dsa_area * area , Size requested_pages )
2054
+ make_new_segment (dsa_area * area , size_t requested_pages )
2055
2055
{
2056
2056
dsa_segment_index new_index ;
2057
- Size metadata_bytes ;
2058
- Size total_size ;
2059
- Size total_pages ;
2060
- Size usable_pages ;
2057
+ size_t metadata_bytes ;
2058
+ size_t total_size ;
2059
+ size_t total_pages ;
2060
+ size_t usable_pages ;
2061
2061
dsa_segment_map * segment_map ;
2062
2062
dsm_segment * segment ;
2063
2063
@@ -2095,7 +2095,7 @@ make_new_segment(dsa_area *area, Size requested_pages)
2095
2095
* pages we can fit.
2096
2096
*/
2097
2097
total_size = DSA_INITIAL_SEGMENT_SIZE *
2098
- ((Size ) 1 << (new_index / DSA_NUM_SEGMENTS_AT_EACH_SIZE ));
2098
+ ((size_t ) 1 << (new_index / DSA_NUM_SEGMENTS_AT_EACH_SIZE ));
2099
2099
total_size = Min (total_size , DSA_MAX_SEGMENT_SIZE );
2100
2100
total_size = Min (total_size ,
2101
2101
area -> control -> max_total_segment_size -
@@ -2222,7 +2222,7 @@ make_new_segment(dsa_area *area, Size requested_pages)
2222
2222
static void
2223
2223
check_for_freed_segments (dsa_area * area )
2224
2224
{
2225
- Size freed_segment_counter ;
2225
+ size_t freed_segment_counter ;
2226
2226
2227
2227
/*
2228
2228
* Any other process that has freed a segment has incremented
@@ -2258,7 +2258,7 @@ check_for_freed_segments(dsa_area *area)
2258
2258
static void
2259
2259
check_for_freed_segments_locked (dsa_area * area )
2260
2260
{
2261
- Size freed_segment_counter ;
2261
+ size_t freed_segment_counter ;
2262
2262
int i ;
2263
2263
2264
2264
Assert (LWLockHeldByMe (DSA_AREA_LOCK (area )));
0 commit comments