Skip to content

Commit 9a40720

Browse files
committed
Allow repalloc() to give back space when a large chunk is downsized.
Up to now, if you resized a large (>8K) palloc chunk down to a smaller size, aset.c made no attempt to return any space to the malloc pool. That's unpleasant if a really large allocation is resized to a significantly smaller size. I think no such cases existed when this code was designed, and I'm not sure whether they're common even yet, but an upcoming fix to encoding conversion will certainly create such cases. Therefore, fix AllocSetRealloc so that it gives realloc() a chance to do something with the block. This doesn't noticeably increase complexity, we mostly just have to change the order in which the cases are considered. Back-patch to all supported branches. Discussion: https://postgr.es/m/20190816181418.GA898@alvherre.pgsql Discussion: https://postgr.es/m/3614.1569359690@sss.pgh.pa.us
1 parent 0b11dc0 commit 9a40720

File tree

1 file changed

+77
-65
lines changed

1 file changed

+77
-65
lines changed

src/backend/utils/mmgr/aset.c

Lines changed: 77 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -1084,62 +1084,12 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
10841084
set->header.name, chunk);
10851085
#endif
10861086

1087-
/*
1088-
* Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1089-
* allocated area already is >= the new size. (In particular, we always
1090-
* fall out here if the requested size is a decrease.)
1091-
*/
1092-
if (oldsize >= size)
1093-
{
1094-
#ifdef MEMORY_CONTEXT_CHECKING
1095-
Size oldrequest = chunk->requested_size;
1096-
1097-
#ifdef RANDOMIZE_ALLOCATED_MEMORY
1098-
/* We can only fill the extra space if we know the prior request */
1099-
if (size > oldrequest)
1100-
randomize_mem((char *) pointer + oldrequest,
1101-
size - oldrequest);
1102-
#endif
1103-
1104-
chunk->requested_size = size;
1105-
1106-
/*
1107-
* If this is an increase, mark any newly-available part UNDEFINED.
1108-
* Otherwise, mark the obsolete part NOACCESS.
1109-
*/
1110-
if (size > oldrequest)
1111-
VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1112-
size - oldrequest);
1113-
else
1114-
VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1115-
oldsize - size);
1116-
1117-
/* set mark to catch clobber of "unused" space */
1118-
if (size < oldsize)
1119-
set_sentinel(pointer, size);
1120-
#else /* !MEMORY_CONTEXT_CHECKING */
1121-
1122-
/*
1123-
* We don't have the information to determine whether we're growing
1124-
* the old request or shrinking it, so we conservatively mark the
1125-
* entire new allocation DEFINED.
1126-
*/
1127-
VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
1128-
VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1129-
#endif
1130-
1131-
/* Disallow external access to private part of chunk header. */
1132-
VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1133-
1134-
return pointer;
1135-
}
1136-
11371087
if (oldsize > set->allocChunkLimit)
11381088
{
11391089
/*
11401090
* The chunk must have been allocated as a single-chunk block. Use
1141-
* realloc() to make the containing block bigger with minimum space
1142-
* wastage.
1091+
* realloc() to make the containing block bigger, or smaller, with
1092+
* minimum space wastage.
11431093
*/
11441094
AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
11451095
Size chksize;
@@ -1153,11 +1103,19 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
11531103
if (block->aset != set ||
11541104
block->freeptr != block->endptr ||
11551105
block->freeptr != ((char *) block) +
1156-
(chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1106+
(oldsize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
11571107
elog(ERROR, "could not find block containing chunk %p", chunk);
11581108

1109+
/*
1110+
* Even if the new request is less than set->allocChunkLimit, we stick
1111+
* with the single-chunk block approach. Therefore we need
1112+
* chunk->size to be bigger than set->allocChunkLimit, so we don't get
1113+
* confused about the chunk's status in future calls.
1114+
*/
1115+
chksize = Max(size, set->allocChunkLimit + 1);
1116+
chksize = MAXALIGN(chksize);
1117+
11591118
/* Do the realloc */
1160-
chksize = MAXALIGN(size);
11611119
blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
11621120
block = (AllocBlock) realloc(block, blksize);
11631121
if (block == NULL)
@@ -1182,17 +1140,21 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
11821140
#ifdef MEMORY_CONTEXT_CHECKING
11831141
#ifdef RANDOMIZE_ALLOCATED_MEMORY
11841142
/* We can only fill the extra space if we know the prior request */
1185-
randomize_mem((char *) pointer + chunk->requested_size,
1186-
size - chunk->requested_size);
1143+
if (size > chunk->requested_size)
1144+
randomize_mem((char *) pointer + chunk->requested_size,
1145+
size - chunk->requested_size);
11871146
#endif
11881147

11891148
/*
1190-
* realloc() (or randomize_mem()) will have left the newly-allocated
1149+
* realloc() (or randomize_mem()) will have left any newly-allocated
11911150
* part UNDEFINED, but we may need to adjust trailing bytes from the
11921151
* old allocation.
11931152
*/
1194-
VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1195-
oldsize - chunk->requested_size);
1153+
#ifdef USE_VALGRIND
1154+
if (oldsize > chunk->requested_size)
1155+
VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1156+
oldsize - chunk->requested_size);
1157+
#endif
11961158

11971159
chunk->requested_size = size;
11981160

@@ -1217,15 +1179,65 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
12171179

12181180
return pointer;
12191181
}
1182+
1183+
/*
1184+
* Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1185+
* allocated area already is >= the new size. (In particular, we will
1186+
* fall out here if the requested size is a decrease.)
1187+
*/
1188+
else if (oldsize >= size)
1189+
{
1190+
#ifdef MEMORY_CONTEXT_CHECKING
1191+
Size oldrequest = chunk->requested_size;
1192+
1193+
#ifdef RANDOMIZE_ALLOCATED_MEMORY
1194+
/* We can only fill the extra space if we know the prior request */
1195+
if (size > oldrequest)
1196+
randomize_mem((char *) pointer + oldrequest,
1197+
size - oldrequest);
1198+
#endif
1199+
1200+
chunk->requested_size = size;
1201+
1202+
/*
1203+
* If this is an increase, mark any newly-available part UNDEFINED.
1204+
* Otherwise, mark the obsolete part NOACCESS.
1205+
*/
1206+
if (size > oldrequest)
1207+
VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1208+
size - oldrequest);
1209+
else
1210+
VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1211+
oldsize - size);
1212+
1213+
/* set mark to catch clobber of "unused" space */
1214+
if (size < oldsize)
1215+
set_sentinel(pointer, size);
1216+
#else /* !MEMORY_CONTEXT_CHECKING */
1217+
1218+
/*
1219+
* We don't have the information to determine whether we're growing
1220+
* the old request or shrinking it, so we conservatively mark the
1221+
* entire new allocation DEFINED.
1222+
*/
1223+
VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
1224+
VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1225+
#endif
1226+
1227+
/* Disallow external access to private part of chunk header. */
1228+
VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1229+
1230+
return pointer;
1231+
}
12201232
else
12211233
{
12221234
/*
1223-
* Small-chunk case. We just do this by brute force, ie, allocate a
1224-
* new chunk and copy the data. Since we know the existing data isn't
1225-
* huge, this won't involve any great memcpy expense, so it's not
1226-
* worth being smarter. (At one time we tried to avoid memcpy when it
1227-
* was possible to enlarge the chunk in-place, but that turns out to
1228-
* misbehave unpleasantly for repeated cycles of
1235+
* Enlarge-a-small-chunk case. We just do this by brute force, ie,
1236+
* allocate a new chunk and copy the data. Since we know the existing
1237+
* data isn't huge, this won't involve any great memcpy expense, so
1238+
* it's not worth being smarter. (At one time we tried to avoid
1239+
* memcpy when it was possible to enlarge the chunk in-place, but that
1240+
* turns out to misbehave unpleasantly for repeated cycles of
12291241
* palloc/repalloc/pfree: the eventually freed chunks go into the
12301242
* wrong freelist for the next initial palloc request, and so we leak
12311243
* memory indefinitely. See pgsql-hackers archives for 2007-08-11.)

0 commit comments

Comments
 (0)