1 /*-------------------------------------------------------------------------
4 * Allocation set definitions.
6 * AllocSet is our standard implementation of the abstract MemoryContext
10 * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
11 * Portions Copyright (c) 1994, Regents of the University of California
14 * src/backend/utils/mmgr/aset.c
17 * This is a new (Feb. 05, 1999) implementation of the allocation set
18 * routines. AllocSet...() does not use OrderedSet...() any more.
19 * Instead it manages allocations in a block pool by itself, combining
20 * many small allocations in a few bigger blocks. AllocSetFree() normally
21 * doesn't free() memory really. It just add's the free'd area to some
22 * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23 * at once on AllocSetReset(), which happens when the memory context gets
27 * Performance improvement from Tom Lane, 8/99: for extremely large request
28 * sizes, we do want to be able to give the memory back to free() as soon
29 * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30 * freelist entries that might never be usable. This is specially needed
31 * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32 * the previous instances of the block were guaranteed to be wasted until
33 * AllocSetReset() under the old way.
35 * Further improvement 12/00: as the code stood, request sizes in the
36 * midrange between "small" and "large" were handled very inefficiently,
37 * because any sufficiently large free chunk would be used to satisfy a
38 * request, even if it was much larger than necessary. This led to more
39 * and more wasted space in allocated chunks over time. To fix, get rid
40 * of the midrange behavior: we now handle only "small" power-of-2-size
41 * chunks as chunks. Anything "large" is passed off to malloc(). Change
42 * the number of freelists to change the small/large boundary.
44 *-------------------------------------------------------------------------
49 #include "port/pg_bitutils.h"
50 #include "utils/memdebug.h"
51 #include "utils/memutils.h"
53 /*--------------------
54 * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
55 * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
57 * Note that all chunks in the freelists have power-of-2 sizes. This
58 * improves recyclability: we may waste some space, but the wasted space
59 * should stay pretty constant as requests are made and released.
61 * A request too large for the last freelist is handled by allocating a
62 * dedicated block from malloc(). The block still has a block header and
63 * chunk header, but when the chunk is freed we'll return the whole block
64 * to malloc(), not put it on our freelists.
66 * CAUTION: ALLOC_MINBITS must be large enough so that
67 * 1<<ALLOC_MINBITS is at least MAXALIGN,
68 * or we may fail to align the smallest chunks adequately.
69 * 8-byte alignment is enough on all currently known machines.
71 * With the current parameters, request sizes up to 8K are treated as chunks,
72 * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
73 * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
74 * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
75 * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
79 #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
80 #define ALLOCSET_NUM_FREELISTS 11
81 #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
82 /* Size of largest chunk that we use a fixed size for */
83 #define ALLOC_CHUNK_FRACTION 4
84 /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
86 /*--------------------
87 * The first block allocated for an allocset has size initBlockSize.
88 * Each time we have to allocate another block, we double the block size
89 * (if possible, and without exceeding maxBlockSize), so as to reduce
90 * the bookkeeping load on malloc().
92 * Blocks allocated to hold oversize chunks do not follow this rule, however;
93 * they are just however big they need to be to hold that single chunk.
95 * Also, if a minContextSize is specified, the first block has that size,
96 * and then initBlockSize is used for the next one.
100 #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
101 #define ALLOC_CHUNKHDRSZ sizeof(struct AllocChunkData)
103 typedef struct AllocBlockData
*AllocBlock
; /* forward reference */
104 typedef struct AllocChunkData
*AllocChunk
;
108 * Aligned pointer which may be a member of an allocation set.
110 typedef void *AllocPointer
;
113 * AllocSetContext is our standard implementation of MemoryContext.
115 * Note: header.isReset means there is nothing for AllocSetReset to do.
116 * This is different from the aset being physically empty (empty blocks list)
117 * because we will still have a keeper block. It's also different from the set
118 * being logically empty, because we don't attempt to detect pfree'ing the
121 typedef struct AllocSetContext
123 MemoryContextData header
; /* Standard memory-context fields */
124 /* Info about storage allocated in this context: */
125 AllocBlock blocks
; /* head of list of blocks in this set */
126 AllocChunk freelist
[ALLOCSET_NUM_FREELISTS
]; /* free chunk lists */
127 /* Allocation parameters for this context: */
128 Size initBlockSize
; /* initial block size */
129 Size maxBlockSize
; /* maximum block size */
130 Size nextBlockSize
; /* next block size to allocate */
131 Size allocChunkLimit
; /* effective chunk size limit */
132 AllocBlock keeper
; /* keep this block over resets */
133 /* freelist this context could be put in, or -1 if not a candidate: */
134 int freeListIndex
; /* index in context_freelists[], or -1 */
137 typedef AllocSetContext
*AllocSet
;
141 * An AllocBlock is the unit of memory that is obtained by aset.c
142 * from malloc(). It contains one or more AllocChunks, which are
143 * the units requested by palloc() and freed by pfree(). AllocChunks
144 * cannot be returned to malloc() individually, instead they are put
145 * on freelists by pfree() and re-used by the next palloc() that has
146 * a matching request size.
148 * AllocBlockData is the header data for a block --- the usable space
149 * within the block begins at the next alignment boundary.
151 typedef struct AllocBlockData
153 AllocSet aset
; /* aset that owns this block */
154 AllocBlock prev
; /* prev block in aset's blocks list, if any */
155 AllocBlock next
; /* next block in aset's blocks list, if any */
156 char *freeptr
; /* start of free space in this block */
157 char *endptr
; /* end of space in this block */
162 * The prefix of each piece of memory in an AllocBlock
164 * Note: to meet the memory context APIs, the payload area of the chunk must
165 * be maxaligned, and the "aset" link must be immediately adjacent to the
166 * payload area (cf. GetMemoryChunkContext). We simplify matters for this
167 * module by requiring sizeof(AllocChunkData) to be maxaligned, and then
168 * we can ensure things work by adding any required alignment padding before
169 * the "aset" field. There is a static assertion below that the alignment
172 typedef struct AllocChunkData
174 /* size is always the size of the usable space in the chunk */
176 #ifdef MEMORY_CONTEXT_CHECKING
177 /* when debugging memory usage, also store actual requested size */
178 /* this is zero in a free chunk */
181 #define ALLOCCHUNK_RAWSIZE (SIZEOF_SIZE_T * 2 + SIZEOF_VOID_P)
183 #define ALLOCCHUNK_RAWSIZE (SIZEOF_SIZE_T + SIZEOF_VOID_P)
184 #endif /* MEMORY_CONTEXT_CHECKING */
186 /* ensure proper alignment by adding padding if needed */
187 #if (ALLOCCHUNK_RAWSIZE % MAXIMUM_ALIGNOF) != 0
188 char padding
[MAXIMUM_ALIGNOF
- ALLOCCHUNK_RAWSIZE
% MAXIMUM_ALIGNOF
];
191 /* aset is the owning aset if allocated, or the freelist link if free */
193 /* there must not be any padding to reach a MAXALIGN boundary here! */
197 * Only the "aset" field should be accessed outside this module.
198 * We keep the rest of an allocated chunk's header marked NOACCESS when using
199 * valgrind. But note that chunk headers that are in a freelist are kept
200 * accessible, for simplicity.
202 #define ALLOCCHUNK_PRIVATE_LEN offsetof(AllocChunkData, aset)
205 * AllocPointerIsValid
206 * True iff pointer is valid allocation pointer.
208 #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
212 * True iff set is valid allocation set.
214 #define AllocSetIsValid(set) PointerIsValid(set)
216 #define AllocPointerGetChunk(ptr) \
217 ((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ))
218 #define AllocChunkGetPointer(chk) \
219 ((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
222 * Rather than repeatedly creating and deleting memory contexts, we keep some
223 * freed contexts in freelists so that we can hand them out again with little
224 * work. Before putting a context in a freelist, we reset it so that it has
225 * only its initial malloc chunk and no others. To be a candidate for a
226 * freelist, a context must have the same minContextSize/initBlockSize as
227 * other contexts in the list; but its maxBlockSize is irrelevant since that
228 * doesn't affect the size of the initial chunk.
230 * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
231 * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
232 * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
234 * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
235 * hopes of improving locality of reference. But if there get to be too
236 * many contexts in the list, we'd prefer to drop the most-recently-created
237 * contexts in hopes of keeping the process memory map compact.
238 * We approximate that by simply deleting all existing entries when the list
239 * overflows, on the assumption that queries that allocate a lot of contexts
240 * will probably free them in more or less reverse order of allocation.
242 * Contexts in a freelist are chained via their nextchild pointers.
244 #define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
246 typedef struct AllocSetFreeList
248 int num_free
; /* current list length */
249 AllocSetContext
*first_free
; /* list header */
252 /* context_freelists[0] is for default params, [1] for small params */
253 static AllocSetFreeList context_freelists
[2] =
264 * These functions implement the MemoryContext API for AllocSet contexts.
266 static void *AllocSetAlloc(MemoryContext context
, Size size
);
267 static void AllocSetFree(MemoryContext context
, void *pointer
);
268 static void *AllocSetRealloc(MemoryContext context
, void *pointer
, Size size
);
269 static void AllocSetReset(MemoryContext context
);
270 static void AllocSetDelete(MemoryContext context
);
271 static Size
AllocSetGetChunkSpace(MemoryContext context
, void *pointer
);
272 static bool AllocSetIsEmpty(MemoryContext context
);
273 static void AllocSetStats(MemoryContext context
,
274 MemoryStatsPrintFunc printfunc
, void *passthru
,
275 MemoryContextCounters
*totals
,
276 bool print_to_stderr
);
278 #ifdef MEMORY_CONTEXT_CHECKING
279 static void AllocSetCheck(MemoryContext context
);
283 * This is the virtual function table for AllocSet contexts.
285 static const MemoryContextMethods AllocSetMethods
= {
291 AllocSetGetChunkSpace
,
294 #ifdef MEMORY_CONTEXT_CHECKING
301 * AllocSetFreeIndex -
303 * Depending on the size of an allocation compute which freechunk
304 * list of the alloc set it belongs to. Caller must have verified
305 * that size <= ALLOC_CHUNK_LIMIT.
309 AllocSetFreeIndex(Size size
)
313 if (size
> (1 << ALLOC_MINBITS
))
316 * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
317 * This is the same as
318 * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
320 * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
322 * However, rather than just calling that function, we duplicate the
323 * logic here, allowing an additional optimization. It's reasonable
324 * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
325 * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
326 * the last two bytes.
328 * Yes, this function is enough of a hot-spot to make it worth this
332 #ifdef HAVE__BUILTIN_CLZ
333 idx
= 31 - __builtin_clz((uint32
) size
- 1) - ALLOC_MINBITS
+ 1;
338 /* Statically assert that we only have a 16-bit input value. */
339 StaticAssertStmt(ALLOC_CHUNK_LIMIT
< (1 << 16),
340 "ALLOC_CHUNK_LIMIT must be less than 64kB");
344 idx
= t
? pg_leftmost_one_pos
[t
] + 8 : pg_leftmost_one_pos
[tsize
];
345 idx
-= ALLOC_MINBITS
- 1;
348 Assert(idx
< ALLOCSET_NUM_FREELISTS
);
363 * AllocSetContextCreateInternal
364 * Create a new AllocSet context.
366 * parent: parent context, or NULL if top-level context
367 * name: name of context (must be statically allocated)
368 * minContextSize: minimum context size
369 * initBlockSize: initial allocation block size
370 * maxBlockSize: maximum allocation block size
372 * Most callers should abstract the context size parameters using a macro
373 * such as ALLOCSET_DEFAULT_SIZES.
375 * Note: don't call this directly; go through the wrapper macro
376 * AllocSetContextCreate.
379 AllocSetContextCreateInternal(MemoryContext parent
,
390 /* Assert we padded AllocChunkData properly */
391 StaticAssertStmt(ALLOC_CHUNKHDRSZ
== MAXALIGN(ALLOC_CHUNKHDRSZ
),
392 "sizeof(AllocChunkData) is not maxaligned");
393 StaticAssertStmt(offsetof(AllocChunkData
, aset
) + sizeof(MemoryContext
) ==
395 "padding calculation in AllocChunkData is wrong");
398 * First, validate allocation parameters. Once these were regular runtime
399 * test and elog's, but in practice Asserts seem sufficient because nobody
400 * varies their parameters at runtime. We somewhat arbitrarily enforce a
401 * minimum 1K block size.
403 Assert(initBlockSize
== MAXALIGN(initBlockSize
) &&
404 initBlockSize
>= 1024);
405 Assert(maxBlockSize
== MAXALIGN(maxBlockSize
) &&
406 maxBlockSize
>= initBlockSize
&&
407 AllocHugeSizeIsValid(maxBlockSize
)); /* must be safe to double */
408 Assert(minContextSize
== 0 ||
409 (minContextSize
== MAXALIGN(minContextSize
) &&
410 minContextSize
>= 1024 &&
411 minContextSize
<= maxBlockSize
));
414 * Check whether the parameters match either available freelist. We do
415 * not need to demand a match of maxBlockSize.
417 if (minContextSize
== ALLOCSET_DEFAULT_MINSIZE
&&
418 initBlockSize
== ALLOCSET_DEFAULT_INITSIZE
)
420 else if (minContextSize
== ALLOCSET_SMALL_MINSIZE
&&
421 initBlockSize
== ALLOCSET_SMALL_INITSIZE
)
427 * If a suitable freelist entry exists, just recycle that context.
429 if (freeListIndex
>= 0)
431 AllocSetFreeList
*freelist
= &context_freelists
[freeListIndex
];
433 if (freelist
->first_free
!= NULL
)
435 /* Remove entry from freelist */
436 set
= freelist
->first_free
;
437 freelist
->first_free
= (AllocSet
) set
->header
.nextchild
;
438 freelist
->num_free
--;
440 /* Update its maxBlockSize; everything else should be OK */
441 set
->maxBlockSize
= maxBlockSize
;
443 /* Reinitialize its header, installing correct name and parent */
444 MemoryContextCreate((MemoryContext
) set
,
450 ((MemoryContext
) set
)->mem_allocated
=
451 set
->keeper
->endptr
- ((char *) set
);
453 return (MemoryContext
) set
;
457 /* Determine size of initial block */
458 firstBlockSize
= MAXALIGN(sizeof(AllocSetContext
)) +
459 ALLOC_BLOCKHDRSZ
+ ALLOC_CHUNKHDRSZ
;
460 if (minContextSize
!= 0)
461 firstBlockSize
= Max(firstBlockSize
, minContextSize
);
463 firstBlockSize
= Max(firstBlockSize
, initBlockSize
);
466 * Allocate the initial block. Unlike other aset.c blocks, it starts with
467 * the context header and its block header follows that.
469 set
= (AllocSet
) malloc(firstBlockSize
);
472 if (TopMemoryContext
)
473 MemoryContextStats(TopMemoryContext
);
475 (errcode(ERRCODE_OUT_OF_MEMORY
),
476 errmsg("out of memory"),
477 errdetail("Failed while creating memory context \"%s\".",
482 * Avoid writing code that can fail between here and MemoryContextCreate;
483 * we'd leak the header/initial block if we ereport in this stretch.
486 /* Fill in the initial block's block header */
487 block
= (AllocBlock
) (((char *) set
) + MAXALIGN(sizeof(AllocSetContext
)));
489 block
->freeptr
= ((char *) block
) + ALLOC_BLOCKHDRSZ
;
490 block
->endptr
= ((char *) set
) + firstBlockSize
;
494 /* Mark unallocated space NOACCESS; leave the block header alone. */
495 VALGRIND_MAKE_MEM_NOACCESS(block
->freeptr
, block
->endptr
- block
->freeptr
);
497 /* Remember block as part of block list */
499 /* Mark block as not to be released at reset time */
502 /* Finish filling in aset-specific parts of the context header */
503 MemSetAligned(set
->freelist
, 0, sizeof(set
->freelist
));
505 set
->initBlockSize
= initBlockSize
;
506 set
->maxBlockSize
= maxBlockSize
;
507 set
->nextBlockSize
= initBlockSize
;
508 set
->freeListIndex
= freeListIndex
;
511 * Compute the allocation chunk size limit for this context. It can't be
512 * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
513 * If maxBlockSize is small then requests exceeding the maxBlockSize, or
514 * even a significant fraction of it, should be treated as large chunks
515 * too. For the typical case of maxBlockSize a power of 2, the chunk size
516 * limit will be at most 1/8th maxBlockSize, so that given a stream of
517 * requests that are all the maximum chunk size we will waste at most
518 * 1/8th of the allocated space.
520 * We have to have allocChunkLimit a power of two, because the requested
521 * and actually-allocated sizes of any chunk must be on the same side of
522 * the limit, else we get confused about whether the chunk is "big".
524 * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
526 StaticAssertStmt(ALLOC_CHUNK_LIMIT
== ALLOCSET_SEPARATE_THRESHOLD
,
527 "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
529 set
->allocChunkLimit
= ALLOC_CHUNK_LIMIT
;
530 while ((Size
) (set
->allocChunkLimit
+ ALLOC_CHUNKHDRSZ
) >
531 (Size
) ((maxBlockSize
- ALLOC_BLOCKHDRSZ
) / ALLOC_CHUNK_FRACTION
))
532 set
->allocChunkLimit
>>= 1;
534 /* Finally, do the type-independent part of context creation */
535 MemoryContextCreate((MemoryContext
) set
,
541 ((MemoryContext
) set
)->mem_allocated
= firstBlockSize
;
543 return (MemoryContext
) set
;
548 * Frees all memory which is allocated in the given set.
550 * Actually, this routine has some discretion about what to do.
551 * It should mark all allocated chunks freed, but it need not necessarily
552 * give back all the resources the set owns. Our actual implementation is
553 * that we give back all but the "keeper" block (which we must keep, since
554 * it shares a malloc chunk with the context header). In this way, we don't
555 * thrash malloc() when a context is repeatedly reset after small allocations,
556 * which is typical behavior for per-tuple contexts.
559 AllocSetReset(MemoryContext context
)
561 AllocSet set
= (AllocSet
) context
;
563 Size keepersize PG_USED_FOR_ASSERTS_ONLY
564 = set
->keeper
->endptr
- ((char *) set
);
566 AssertArg(AllocSetIsValid(set
));
568 #ifdef MEMORY_CONTEXT_CHECKING
569 /* Check for corruption and leaks before freeing */
570 AllocSetCheck(context
);
573 /* Clear chunk freelists */
574 MemSetAligned(set
->freelist
, 0, sizeof(set
->freelist
));
578 /* New blocks list will be just the keeper block */
579 set
->blocks
= set
->keeper
;
581 while (block
!= NULL
)
583 AllocBlock next
= block
->next
;
585 if (block
== set
->keeper
)
587 /* Reset the block, but don't return it to malloc */
588 char *datastart
= ((char *) block
) + ALLOC_BLOCKHDRSZ
;
590 #ifdef CLOBBER_FREED_MEMORY
591 wipe_mem(datastart
, block
->freeptr
- datastart
);
593 /* wipe_mem() would have done this */
594 VALGRIND_MAKE_MEM_NOACCESS(datastart
, block
->freeptr
- datastart
);
596 block
->freeptr
= datastart
;
602 /* Normal case, release the block */
603 context
->mem_allocated
-= block
->endptr
- ((char *) block
);
605 #ifdef CLOBBER_FREED_MEMORY
606 wipe_mem(block
, block
->freeptr
- ((char *) block
));
613 Assert(context
->mem_allocated
== keepersize
);
615 /* Reset block size allocation sequence, too */
616 set
->nextBlockSize
= set
->initBlockSize
;
621 * Frees all memory which is allocated in the given set,
622 * in preparation for deletion of the set.
624 * Unlike AllocSetReset, this *must* free all resources of the set.
627 AllocSetDelete(MemoryContext context
)
629 AllocSet set
= (AllocSet
) context
;
630 AllocBlock block
= set
->blocks
;
631 Size keepersize PG_USED_FOR_ASSERTS_ONLY
632 = set
->keeper
->endptr
- ((char *) set
);
634 AssertArg(AllocSetIsValid(set
));
636 #ifdef MEMORY_CONTEXT_CHECKING
637 /* Check for corruption and leaks before freeing */
638 AllocSetCheck(context
);
642 * If the context is a candidate for a freelist, put it into that freelist
643 * instead of destroying it.
645 if (set
->freeListIndex
>= 0)
647 AllocSetFreeList
*freelist
= &context_freelists
[set
->freeListIndex
];
650 * Reset the context, if it needs it, so that we aren't hanging on to
651 * more than the initial malloc chunk.
653 if (!context
->isReset
)
654 MemoryContextResetOnly(context
);
657 * If the freelist is full, just discard what's already in it. See
658 * comments with context_freelists[].
660 if (freelist
->num_free
>= MAX_FREE_CONTEXTS
)
662 while (freelist
->first_free
!= NULL
)
664 AllocSetContext
*oldset
= freelist
->first_free
;
666 freelist
->first_free
= (AllocSetContext
*) oldset
->header
.nextchild
;
667 freelist
->num_free
--;
669 /* All that remains is to free the header/initial block */
672 Assert(freelist
->num_free
== 0);
675 /* Now add the just-deleted context to the freelist. */
676 set
->header
.nextchild
= (MemoryContext
) freelist
->first_free
;
677 freelist
->first_free
= set
;
678 freelist
->num_free
++;
683 /* Free all blocks, except the keeper which is part of context header */
684 while (block
!= NULL
)
686 AllocBlock next
= block
->next
;
688 if (block
!= set
->keeper
)
689 context
->mem_allocated
-= block
->endptr
- ((char *) block
);
691 #ifdef CLOBBER_FREED_MEMORY
692 wipe_mem(block
, block
->freeptr
- ((char *) block
));
695 if (block
!= set
->keeper
)
701 Assert(context
->mem_allocated
== keepersize
);
703 /* Finally, free the context header, including the keeper block */
709 * Returns pointer to allocated memory of given size or NULL if
710 * request could not be completed; memory is added to the set.
712 * No request may exceed:
713 * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
714 * All callers use a much-lower limit.
716 * Note: when using valgrind, it doesn't matter how the returned allocation
717 * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
718 * return space that is marked NOACCESS - AllocSetRealloc has to beware!
721 AllocSetAlloc(MemoryContext context
, Size size
)
723 AllocSet set
= (AllocSet
) context
;
730 AssertArg(AllocSetIsValid(set
));
733 * If requested size exceeds maximum for chunks, allocate an entire block
736 if (size
> set
->allocChunkLimit
)
738 chunk_size
= MAXALIGN(size
);
739 blksize
= chunk_size
+ ALLOC_BLOCKHDRSZ
+ ALLOC_CHUNKHDRSZ
;
740 block
= (AllocBlock
) malloc(blksize
);
744 context
->mem_allocated
+= blksize
;
747 block
->freeptr
= block
->endptr
= ((char *) block
) + blksize
;
749 chunk
= (AllocChunk
) (((char *) block
) + ALLOC_BLOCKHDRSZ
);
751 chunk
->size
= chunk_size
;
752 #ifdef MEMORY_CONTEXT_CHECKING
753 chunk
->requested_size
= size
;
754 /* set mark to catch clobber of "unused" space */
755 if (size
< chunk_size
)
756 set_sentinel(AllocChunkGetPointer(chunk
), size
);
758 #ifdef RANDOMIZE_ALLOCATED_MEMORY
759 /* fill the allocated space with junk */
760 randomize_mem((char *) AllocChunkGetPointer(chunk
), size
);
764 * Stick the new block underneath the active allocation block, if any,
765 * so that we don't lose the use of the space remaining therein.
767 if (set
->blocks
!= NULL
)
769 block
->prev
= set
->blocks
;
770 block
->next
= set
->blocks
->next
;
772 block
->next
->prev
= block
;
773 set
->blocks
->next
= block
;
782 /* Ensure any padding bytes are marked NOACCESS. */
783 VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk
) + size
,
786 /* Disallow external access to private part of chunk header. */
787 VALGRIND_MAKE_MEM_NOACCESS(chunk
, ALLOCCHUNK_PRIVATE_LEN
);
789 return AllocChunkGetPointer(chunk
);
793 * Request is small enough to be treated as a chunk. Look in the
794 * corresponding free list to see if there is a free chunk we could reuse.
795 * If one is found, remove it from the free list, make it again a member
796 * of the alloc set and return its data address.
798 fidx
= AllocSetFreeIndex(size
);
799 chunk
= set
->freelist
[fidx
];
802 Assert(chunk
->size
>= size
);
804 set
->freelist
[fidx
] = (AllocChunk
) chunk
->aset
;
806 chunk
->aset
= (void *) set
;
808 #ifdef MEMORY_CONTEXT_CHECKING
809 chunk
->requested_size
= size
;
810 /* set mark to catch clobber of "unused" space */
811 if (size
< chunk
->size
)
812 set_sentinel(AllocChunkGetPointer(chunk
), size
);
814 #ifdef RANDOMIZE_ALLOCATED_MEMORY
815 /* fill the allocated space with junk */
816 randomize_mem((char *) AllocChunkGetPointer(chunk
), size
);
819 /* Ensure any padding bytes are marked NOACCESS. */
820 VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk
) + size
,
823 /* Disallow external access to private part of chunk header. */
824 VALGRIND_MAKE_MEM_NOACCESS(chunk
, ALLOCCHUNK_PRIVATE_LEN
);
826 return AllocChunkGetPointer(chunk
);
830 * Choose the actual chunk size to allocate.
832 chunk_size
= (1 << ALLOC_MINBITS
) << fidx
;
833 Assert(chunk_size
>= size
);
836 * If there is enough room in the active allocation block, we will put the
837 * chunk into that block. Else must start a new one.
839 if ((block
= set
->blocks
) != NULL
)
841 Size availspace
= block
->endptr
- block
->freeptr
;
843 if (availspace
< (chunk_size
+ ALLOC_CHUNKHDRSZ
))
846 * The existing active (top) block does not have enough room for
847 * the requested allocation, but it might still have a useful
848 * amount of space in it. Once we push it down in the block list,
849 * we'll never try to allocate more space from it. So, before we
850 * do that, carve up its free space into chunks that we can put on
851 * the set's freelists.
853 * Because we can only get here when there's less than
854 * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
855 * more than ALLOCSET_NUM_FREELISTS-1 times.
857 while (availspace
>= ((1 << ALLOC_MINBITS
) + ALLOC_CHUNKHDRSZ
))
859 Size availchunk
= availspace
- ALLOC_CHUNKHDRSZ
;
860 int a_fidx
= AllocSetFreeIndex(availchunk
);
863 * In most cases, we'll get back the index of the next larger
864 * freelist than the one we need to put this chunk on. The
865 * exception is when availchunk is exactly a power of 2.
867 if (availchunk
!= ((Size
) 1 << (a_fidx
+ ALLOC_MINBITS
)))
871 availchunk
= ((Size
) 1 << (a_fidx
+ ALLOC_MINBITS
));
874 chunk
= (AllocChunk
) (block
->freeptr
);
876 /* Prepare to initialize the chunk header. */
877 VALGRIND_MAKE_MEM_UNDEFINED(chunk
, ALLOC_CHUNKHDRSZ
);
879 block
->freeptr
+= (availchunk
+ ALLOC_CHUNKHDRSZ
);
880 availspace
-= (availchunk
+ ALLOC_CHUNKHDRSZ
);
882 chunk
->size
= availchunk
;
883 #ifdef MEMORY_CONTEXT_CHECKING
884 chunk
->requested_size
= 0; /* mark it free */
886 chunk
->aset
= (void *) set
->freelist
[a_fidx
];
887 set
->freelist
[a_fidx
] = chunk
;
890 /* Mark that we need to create a new block */
896 * Time to create a new regular (multi-chunk) block?
903 * The first such block has size initBlockSize, and we double the
904 * space in each succeeding block, but not more than maxBlockSize.
906 blksize
= set
->nextBlockSize
;
907 set
->nextBlockSize
<<= 1;
908 if (set
->nextBlockSize
> set
->maxBlockSize
)
909 set
->nextBlockSize
= set
->maxBlockSize
;
912 * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
913 * space... but try to keep it a power of 2.
915 required_size
= chunk_size
+ ALLOC_BLOCKHDRSZ
+ ALLOC_CHUNKHDRSZ
;
916 while (blksize
< required_size
)
919 /* Try to allocate it */
920 block
= (AllocBlock
) malloc(blksize
);
923 * We could be asking for pretty big blocks here, so cope if malloc
924 * fails. But give up if there's less than 1 MB or so available...
926 while (block
== NULL
&& blksize
> 1024 * 1024)
929 if (blksize
< required_size
)
931 block
= (AllocBlock
) malloc(blksize
);
937 context
->mem_allocated
+= blksize
;
940 block
->freeptr
= ((char *) block
) + ALLOC_BLOCKHDRSZ
;
941 block
->endptr
= ((char *) block
) + blksize
;
943 /* Mark unallocated space NOACCESS. */
944 VALGRIND_MAKE_MEM_NOACCESS(block
->freeptr
,
945 blksize
- ALLOC_BLOCKHDRSZ
);
948 block
->next
= set
->blocks
;
950 block
->next
->prev
= block
;
955 * OK, do the allocation
957 chunk
= (AllocChunk
) (block
->freeptr
);
959 /* Prepare to initialize the chunk header. */
960 VALGRIND_MAKE_MEM_UNDEFINED(chunk
, ALLOC_CHUNKHDRSZ
);
962 block
->freeptr
+= (chunk_size
+ ALLOC_CHUNKHDRSZ
);
963 Assert(block
->freeptr
<= block
->endptr
);
965 chunk
->aset
= (void *) set
;
966 chunk
->size
= chunk_size
;
967 #ifdef MEMORY_CONTEXT_CHECKING
968 chunk
->requested_size
= size
;
969 /* set mark to catch clobber of "unused" space */
970 if (size
< chunk
->size
)
971 set_sentinel(AllocChunkGetPointer(chunk
), size
);
973 #ifdef RANDOMIZE_ALLOCATED_MEMORY
974 /* fill the allocated space with junk */
975 randomize_mem((char *) AllocChunkGetPointer(chunk
), size
);
978 /* Ensure any padding bytes are marked NOACCESS. */
979 VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk
) + size
,
982 /* Disallow external access to private part of chunk header. */
983 VALGRIND_MAKE_MEM_NOACCESS(chunk
, ALLOCCHUNK_PRIVATE_LEN
);
985 return AllocChunkGetPointer(chunk
);
990 * Frees allocated memory; memory is removed from the set.
993 AllocSetFree(MemoryContext context
, void *pointer
)
995 AllocSet set
= (AllocSet
) context
;
996 AllocChunk chunk
= AllocPointerGetChunk(pointer
);
998 /* Allow access to private part of chunk header. */
999 VALGRIND_MAKE_MEM_DEFINED(chunk
, ALLOCCHUNK_PRIVATE_LEN
);
1001 #ifdef MEMORY_CONTEXT_CHECKING
1002 /* Test for someone scribbling on unused space in chunk */
1003 if (chunk
->requested_size
< chunk
->size
)
1004 if (!sentinel_ok(pointer
, chunk
->requested_size
))
1005 elog(WARNING
, "detected write past chunk end in %s %p",
1006 set
->header
.name
, chunk
);
1009 if (chunk
->size
> set
->allocChunkLimit
)
1012 * Big chunks are certain to have been allocated as single-chunk
1013 * blocks. Just unlink that block and return it to malloc().
1015 AllocBlock block
= (AllocBlock
) (((char *) chunk
) - ALLOC_BLOCKHDRSZ
);
1018 * Try to verify that we have a sane block pointer: it should
1019 * reference the correct aset, and freeptr and endptr should point
1020 * just past the chunk.
1022 if (block
->aset
!= set
||
1023 block
->freeptr
!= block
->endptr
||
1024 block
->freeptr
!= ((char *) block
) +
1025 (chunk
->size
+ ALLOC_BLOCKHDRSZ
+ ALLOC_CHUNKHDRSZ
))
1026 elog(ERROR
, "could not find block containing chunk %p", chunk
);
1028 /* OK, remove block from aset's list and free it */
1030 block
->prev
->next
= block
->next
;
1032 set
->blocks
= block
->next
;
1034 block
->next
->prev
= block
->prev
;
1036 context
->mem_allocated
-= block
->endptr
- ((char *) block
);
1038 #ifdef CLOBBER_FREED_MEMORY
1039 wipe_mem(block
, block
->freeptr
- ((char *) block
));
1045 /* Normal case, put the chunk into appropriate freelist */
1046 int fidx
= AllocSetFreeIndex(chunk
->size
);
1048 chunk
->aset
= (void *) set
->freelist
[fidx
];
1050 #ifdef CLOBBER_FREED_MEMORY
1051 wipe_mem(pointer
, chunk
->size
);
1054 #ifdef MEMORY_CONTEXT_CHECKING
1055 /* Reset requested_size to 0 in chunks that are on freelist */
1056 chunk
->requested_size
= 0;
1058 set
->freelist
[fidx
] = chunk
;
1064 * Returns new pointer to allocated memory of given size or NULL if
1065 * request could not be completed; this memory is added to the set.
1066 * Memory associated with given pointer is copied into the new memory,
1067 * and the old memory is freed.
1069 * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1070 * makes our Valgrind client requests less-precise, hazarding false negatives.
1071 * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1075 AllocSetRealloc(MemoryContext context
, void *pointer
, Size size
)
1077 AllocSet set
= (AllocSet
) context
;
1078 AllocChunk chunk
= AllocPointerGetChunk(pointer
);
1081 /* Allow access to private part of chunk header. */
1082 VALGRIND_MAKE_MEM_DEFINED(chunk
, ALLOCCHUNK_PRIVATE_LEN
);
1084 oldsize
= chunk
->size
;
1086 #ifdef MEMORY_CONTEXT_CHECKING
1087 /* Test for someone scribbling on unused space in chunk */
1088 if (chunk
->requested_size
< oldsize
)
1089 if (!sentinel_ok(pointer
, chunk
->requested_size
))
1090 elog(WARNING
, "detected write past chunk end in %s %p",
1091 set
->header
.name
, chunk
);
1094 if (oldsize
> set
->allocChunkLimit
)
1097 * The chunk must have been allocated as a single-chunk block. Use
1098 * realloc() to make the containing block bigger, or smaller, with
1099 * minimum space wastage.
1101 AllocBlock block
= (AllocBlock
) (((char *) chunk
) - ALLOC_BLOCKHDRSZ
);
1107 * Try to verify that we have a sane block pointer: it should
1108 * reference the correct aset, and freeptr and endptr should point
1109 * just past the chunk.
1111 if (block
->aset
!= set
||
1112 block
->freeptr
!= block
->endptr
||
1113 block
->freeptr
!= ((char *) block
) +
1114 (oldsize
+ ALLOC_BLOCKHDRSZ
+ ALLOC_CHUNKHDRSZ
))
1115 elog(ERROR
, "could not find block containing chunk %p", chunk
);
1118 * Even if the new request is less than set->allocChunkLimit, we stick
1119 * with the single-chunk block approach. Therefore we need
1120 * chunk->size to be bigger than set->allocChunkLimit, so we don't get
1121 * confused about the chunk's status in future calls.
1123 chksize
= Max(size
, set
->allocChunkLimit
+ 1);
1124 chksize
= MAXALIGN(chksize
);
1126 /* Do the realloc */
1127 blksize
= chksize
+ ALLOC_BLOCKHDRSZ
+ ALLOC_CHUNKHDRSZ
;
1128 oldblksize
= block
->endptr
- ((char *) block
);
1130 block
= (AllocBlock
) realloc(block
, blksize
);
1133 /* Disallow external access to private part of chunk header. */
1134 VALGRIND_MAKE_MEM_NOACCESS(chunk
, ALLOCCHUNK_PRIVATE_LEN
);
1138 /* updated separately, not to underflow when (oldblksize > blksize) */
1139 context
->mem_allocated
-= oldblksize
;
1140 context
->mem_allocated
+= blksize
;
1142 block
->freeptr
= block
->endptr
= ((char *) block
) + blksize
;
1144 /* Update pointers since block has likely been moved */
1145 chunk
= (AllocChunk
) (((char *) block
) + ALLOC_BLOCKHDRSZ
);
1146 pointer
= AllocChunkGetPointer(chunk
);
1148 block
->prev
->next
= block
;
1150 set
->blocks
= block
;
1152 block
->next
->prev
= block
;
1153 chunk
->size
= chksize
;
1155 #ifdef MEMORY_CONTEXT_CHECKING
1156 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1157 /* We can only fill the extra space if we know the prior request */
1158 if (size
> chunk
->requested_size
)
1159 randomize_mem((char *) pointer
+ chunk
->requested_size
,
1160 size
- chunk
->requested_size
);
1164 * realloc() (or randomize_mem()) will have left any newly-allocated
1165 * part UNDEFINED, but we may need to adjust trailing bytes from the
1169 if (oldsize
> chunk
->requested_size
)
1170 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer
+ chunk
->requested_size
,
1171 oldsize
- chunk
->requested_size
);
1174 chunk
->requested_size
= size
;
1176 /* set mark to catch clobber of "unused" space */
1177 if (size
< chunk
->size
)
1178 set_sentinel(pointer
, size
);
1179 #else /* !MEMORY_CONTEXT_CHECKING */
1182 * We don't know how much of the old chunk size was the actual
1183 * allocation; it could have been as small as one byte. We have to be
1184 * conservative and just mark the entire old portion DEFINED.
1186 VALGRIND_MAKE_MEM_DEFINED(pointer
, oldsize
);
1189 /* Ensure any padding bytes are marked NOACCESS. */
1190 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer
+ size
, chksize
- size
);
1192 /* Disallow external access to private part of chunk header. */
1193 VALGRIND_MAKE_MEM_NOACCESS(chunk
, ALLOCCHUNK_PRIVATE_LEN
);
1199 * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1200 * allocated area already is >= the new size. (In particular, we will
1201 * fall out here if the requested size is a decrease.)
1203 else if (oldsize
>= size
)
1205 #ifdef MEMORY_CONTEXT_CHECKING
1206 Size oldrequest
= chunk
->requested_size
;
1208 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1209 /* We can only fill the extra space if we know the prior request */
1210 if (size
> oldrequest
)
1211 randomize_mem((char *) pointer
+ oldrequest
,
1215 chunk
->requested_size
= size
;
1218 * If this is an increase, mark any newly-available part UNDEFINED.
1219 * Otherwise, mark the obsolete part NOACCESS.
1221 if (size
> oldrequest
)
1222 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer
+ oldrequest
,
1225 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer
+ size
,
1228 /* set mark to catch clobber of "unused" space */
1230 set_sentinel(pointer
, size
);
1231 #else /* !MEMORY_CONTEXT_CHECKING */
1234 * We don't have the information to determine whether we're growing
1235 * the old request or shrinking it, so we conservatively mark the
1236 * entire new allocation DEFINED.
1238 VALGRIND_MAKE_MEM_NOACCESS(pointer
, oldsize
);
1239 VALGRIND_MAKE_MEM_DEFINED(pointer
, size
);
1242 /* Disallow external access to private part of chunk header. */
1243 VALGRIND_MAKE_MEM_NOACCESS(chunk
, ALLOCCHUNK_PRIVATE_LEN
);
1250 * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1251 * allocate a new chunk and copy the data. Since we know the existing
1252 * data isn't huge, this won't involve any great memcpy expense, so
1253 * it's not worth being smarter. (At one time we tried to avoid
1254 * memcpy when it was possible to enlarge the chunk in-place, but that
1255 * turns out to misbehave unpleasantly for repeated cycles of
1256 * palloc/repalloc/pfree: the eventually freed chunks go into the
1257 * wrong freelist for the next initial palloc request, and so we leak
1258 * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1260 AllocPointer newPointer
;
1262 /* allocate new chunk */
1263 newPointer
= AllocSetAlloc((MemoryContext
) set
, size
);
1265 /* leave immediately if request was not completed */
1266 if (newPointer
== NULL
)
1268 /* Disallow external access to private part of chunk header. */
1269 VALGRIND_MAKE_MEM_NOACCESS(chunk
, ALLOCCHUNK_PRIVATE_LEN
);
1274 * AllocSetAlloc() may have returned a region that is still NOACCESS.
1275 * Change it to UNDEFINED for the moment; memcpy() will then transfer
1276 * definedness from the old allocation to the new. If we know the old
1277 * allocation, copy just that much. Otherwise, make the entire old
1278 * chunk defined to avoid errors as we copy the currently-NOACCESS
1281 VALGRIND_MAKE_MEM_UNDEFINED(newPointer
, size
);
1282 #ifdef MEMORY_CONTEXT_CHECKING
1283 oldsize
= chunk
->requested_size
;
1285 VALGRIND_MAKE_MEM_DEFINED(pointer
, oldsize
);
1288 /* transfer existing data (certain to fit) */
1289 memcpy(newPointer
, pointer
, oldsize
);
1291 /* free old chunk */
1292 AllocSetFree((MemoryContext
) set
, pointer
);
1299 * AllocSetGetChunkSpace
1300 * Given a currently-allocated chunk, determine the total space
1301 * it occupies (including all memory-allocation overhead).
1304 AllocSetGetChunkSpace(MemoryContext context
, void *pointer
)
1306 AllocChunk chunk
= AllocPointerGetChunk(pointer
);
1309 VALGRIND_MAKE_MEM_DEFINED(chunk
, ALLOCCHUNK_PRIVATE_LEN
);
1310 result
= chunk
->size
+ ALLOC_CHUNKHDRSZ
;
1311 VALGRIND_MAKE_MEM_NOACCESS(chunk
, ALLOCCHUNK_PRIVATE_LEN
);
1317 * Is an allocset empty of any allocated space?
1320 AllocSetIsEmpty(MemoryContext context
)
1323 * For now, we say "empty" only if the context is new or just reset. We
1324 * could examine the freelists to determine if all space has been freed,
1325 * but it's not really worth the trouble for present uses of this
1328 if (context
->isReset
)
1335 * Compute stats about memory consumption of an allocset.
1337 * printfunc: if not NULL, pass a human-readable stats string to this.
1338 * passthru: pass this pointer through to printfunc.
1339 * totals: if not NULL, add stats about this context into *totals.
1340 * print_to_stderr: print stats to stderr if true, elog otherwise.
1343 AllocSetStats(MemoryContext context
,
1344 MemoryStatsPrintFunc printfunc
, void *passthru
,
1345 MemoryContextCounters
*totals
, bool print_to_stderr
)
1347 AllocSet set
= (AllocSet
) context
;
1349 Size freechunks
= 0;
1355 /* Include context header in totalspace */
1356 totalspace
= MAXALIGN(sizeof(AllocSetContext
));
1358 for (block
= set
->blocks
; block
!= NULL
; block
= block
->next
)
1361 totalspace
+= block
->endptr
- ((char *) block
);
1362 freespace
+= block
->endptr
- block
->freeptr
;
1364 for (fidx
= 0; fidx
< ALLOCSET_NUM_FREELISTS
; fidx
++)
1368 for (chunk
= set
->freelist
[fidx
]; chunk
!= NULL
;
1369 chunk
= (AllocChunk
) chunk
->aset
)
1372 freespace
+= chunk
->size
+ ALLOC_CHUNKHDRSZ
;
1378 char stats_string
[200];
1380 snprintf(stats_string
, sizeof(stats_string
),
1381 "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1382 totalspace
, nblocks
, freespace
, freechunks
,
1383 totalspace
- freespace
);
1384 printfunc(context
, passthru
, stats_string
, print_to_stderr
);
1389 totals
->nblocks
+= nblocks
;
1390 totals
->freechunks
+= freechunks
;
1391 totals
->totalspace
+= totalspace
;
1392 totals
->freespace
+= freespace
;
1397 #ifdef MEMORY_CONTEXT_CHECKING
1401 * Walk through chunks and check consistency of memory.
1403 * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1404 * find yourself in an infinite loop when trouble occurs, because this
1405 * routine will be entered again when elog cleanup tries to release memory!
1408 AllocSetCheck(MemoryContext context
)
1410 AllocSet set
= (AllocSet
) context
;
1411 const char *name
= set
->header
.name
;
1412 AllocBlock prevblock
;
1414 Size total_allocated
= 0;
1416 for (prevblock
= NULL
, block
= set
->blocks
;
1418 prevblock
= block
, block
= block
->next
)
1420 char *bpoz
= ((char *) block
) + ALLOC_BLOCKHDRSZ
;
1421 long blk_used
= block
->freeptr
- bpoz
;
1425 if (set
->keeper
== block
)
1426 total_allocated
+= block
->endptr
- ((char *) set
);
1428 total_allocated
+= block
->endptr
- ((char *) block
);
1431 * Empty block - empty can be keeper-block only
1435 if (set
->keeper
!= block
)
1436 elog(WARNING
, "problem in alloc set %s: empty block %p",
1441 * Check block header fields
1443 if (block
->aset
!= set
||
1444 block
->prev
!= prevblock
||
1445 block
->freeptr
< bpoz
||
1446 block
->freeptr
> block
->endptr
)
1447 elog(WARNING
, "problem in alloc set %s: corrupt header in block %p",
1453 while (bpoz
< block
->freeptr
)
1455 AllocChunk chunk
= (AllocChunk
) bpoz
;
1459 /* Allow access to private part of chunk header. */
1460 VALGRIND_MAKE_MEM_DEFINED(chunk
, ALLOCCHUNK_PRIVATE_LEN
);
1462 chsize
= chunk
->size
; /* aligned chunk size */
1463 dsize
= chunk
->requested_size
; /* real data */
1469 elog(WARNING
, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1470 name
, chunk
, block
);
1471 if (chsize
< (1 << ALLOC_MINBITS
))
1472 elog(WARNING
, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1473 name
, chsize
, chunk
, block
);
1475 /* single-chunk block? */
1476 if (chsize
> set
->allocChunkLimit
&&
1477 chsize
+ ALLOC_CHUNKHDRSZ
!= blk_used
)
1478 elog(WARNING
, "problem in alloc set %s: bad single-chunk %p in block %p",
1479 name
, chunk
, block
);
1482 * If chunk is allocated, check for correct aset pointer. (If it's
1483 * free, the aset is the freelist pointer, which we can't check as
1484 * easily...) Note this is an incomplete test, since palloc(0)
1485 * produces an allocated chunk with requested_size == 0.
1487 if (dsize
> 0 && chunk
->aset
!= (void *) set
)
1488 elog(WARNING
, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
1489 name
, block
, chunk
);
1492 * Check for overwrite of padding space in an allocated chunk.
1494 if (chunk
->aset
== (void *) set
&& dsize
< chsize
&&
1495 !sentinel_ok(chunk
, ALLOC_CHUNKHDRSZ
+ dsize
))
1496 elog(WARNING
, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1497 name
, block
, chunk
);
1500 * If chunk is allocated, disallow external access to private part
1503 if (chunk
->aset
== (void *) set
)
1504 VALGRIND_MAKE_MEM_NOACCESS(chunk
, ALLOCCHUNK_PRIVATE_LEN
);
1509 bpoz
+= ALLOC_CHUNKHDRSZ
+ chsize
;
1512 if ((blk_data
+ (nchunks
* ALLOC_CHUNKHDRSZ
)) != blk_used
)
1513 elog(WARNING
, "problem in alloc set %s: found inconsistent memory block %p",
1517 Assert(total_allocated
== context
->mem_allocated
);
1520 #endif /* MEMORY_CONTEXT_CHECKING */