exec.library: add ability to turn off MemHeaderAllocatorCtx support
[AROS.git] / rom / exec / memory.c
blob1b58d8a060601d329df1b83c099aee9cb6638247
1 /*
2 Copyright © 1995-2012, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <aros/debug.h>
7 #include <exec/rawfmt.h>
8 #include <proto/kernel.h>
10 #include "exec_intern.h"
11 #include "exec_util.h"
12 #include "etask.h"
13 #include "memory.h"
14 #include "mungwall.h"
16 #define DMH(x)
19 * Find MemHeader to which address belongs.
20 * This function is legal to be called in supervisor mode (we use TypeOfMem()
21 * in order to validate addresses in tons of places). So, here are checks.
23 struct MemHeader *FindMem(APTR address, struct ExecBase *SysBase)
25 int usermode = (KernelBase != NULL) && (KrnIsSuper() == 0);
26 struct MemHeader *mh;
28 /* Nobody should change the memory list now. */
29 if (usermode) MEM_LOCK_SHARED;
31 /* Follow the list of MemHeaders */
32 mh = (struct MemHeader *)SysBase->MemList.lh_Head;
34 while (mh->mh_Node.ln_Succ != NULL)
36 /* Check if this MemHeader fits */
37 if (address >= mh->mh_Lower && address < mh->mh_Upper)
39 /* Yes. Return it. */
40 if (usermode) MEM_UNLOCK;
41 return mh;
44 /* Go to next MemHeader */
45 mh = (struct MemHeader *)mh->mh_Node.ln_Succ;
48 if (usermode) MEM_UNLOCK;
49 return NULL;
52 char *FormatMMContext(char *buffer, struct MMContext *ctx, struct ExecBase *SysBase)
54 if (ctx->addr)
55 buffer = NewRawDoFmt("In %s, block at 0x%p, size %lu", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->func, ctx->addr, ctx->size) - 1;
56 else
57 buffer = NewRawDoFmt("In %s, size %lu", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->func, ctx->size) - 1;
59 if (ctx->mc)
61 buffer = NewRawDoFmt("\nCorrupted MemChunk 0x%p (next 0x%p, size %lu)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mc, ctx->mc->mc_Next, ctx->mc->mc_Bytes) - 1;
63 if (ctx->mcPrev)
64 buffer = NewRawDoFmt("\nPrevious MemChunk 0x%p (next 0x%p, size %lu)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mcPrev, ctx->mcPrev->mc_Next, ctx->mcPrev->mc_Bytes) - 1;
67 /* Print MemHeader details */
68 buffer = NewRawDoFmt("\nMemHeader 0x%p (0x%p - 0x%p)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mh, ctx->mh->mh_Lower, ctx->mh->mh_Upper) - 1;
69 if ((IPTR)ctx->mh->mh_First & (MEMCHUNK_TOTAL - 1))
70 buffer = NewRawDoFmt("\n- Unaligned first chunk address (0x%p)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mh->mh_First) - 1;
72 if (ctx->mh->mh_Free & (MEMCHUNK_TOTAL - 1))
73 buffer = NewRawDoFmt("\n- Unaligned free space count (0x%p)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mh->mh_Free) - 1;
75 if (ctx->mh->mh_First)
77 if ((APTR)ctx->mh->mh_First < ctx->mh->mh_Lower)
78 buffer = NewRawDoFmt("\n- First chunk (0x%p) below lower address", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mh->mh_First) - 1;
80 if (((APTR)ctx->mh->mh_First + ctx->mh->mh_Free > ctx->mh->mh_Upper))
81 buffer = NewRawDoFmt("\n- Free space count too large (%lu, first chunk 0x%xp)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mh->mh_Free, ctx->mh->mh_First) - 1;
84 return buffer;
87 #ifdef NO_ALLOCATOR_CONTEXT
89 struct MemHeaderAllocatorCtx * mhac_GetSysCtx(struct MemHeader * mh)
91 return NULL;
94 #define mhac_MemChunkClaimed(a, b)
95 #define mhac_MemChunkCreated(a, b, c) { (void)b; }
96 #define mhac_GetBetterPrevMemChunk(a, b, c) (a)
98 #else
99 /* Allocator optimization support */
102 * The array contains pointers to chunk previous to first chunk of at least size N
104 * N = 1 << (FIRSTPOTBIT + i), where i is index in array
105 * first is defined as MemChunk with lowest address
107 * Each chunk in array locates the place where search should start, not necesarly
108 * where allocation should happen
111 #define FIRSTPOTBIT (5)
112 #define FIRSTPOT (1 << FIRSTPOTBIT)
113 #define POTSTEP (2) /* Distance between each level */
114 #define ALLOCATORCTXINDEXSIZE (8) /* Number of levels in index */
116 struct MemHeaderAllocatorCtx
118 struct MemHeader *mhac_MemHeader;
120 struct MemChunk *mhac_PrevChunks[ALLOCATORCTXINDEXSIZE];
123 struct MemHeaderAllocatorCtx test[25];
125 struct MemHeaderAllocatorCtx * mhac_GetSysCtx(struct MemHeader * mh)
127 struct MemHeaderAllocatorCtx * mhi = NULL;
128 LONG i;
130 for (i = 0; i < 25; i++)
132 if (test[i].mhac_MemHeader == NULL && mhi == NULL)
133 mhi = &test[i]; /* Grab empty in case not yet allocated */
135 if (test[i].mhac_MemHeader == mh)
137 mhi = &test[i]; /* Found! */
138 break;
142 mhi->mhac_MemHeader = mh;
144 return mhi;
147 void mhac_MemChunkClaimed(struct MemChunk * mc, struct MemHeaderAllocatorCtx * mhac)
149 LONG i;
151 if (!mhac)
152 return;
154 for (i = 0; i < ALLOCATORCTXINDEXSIZE; i++)
156 if (mhac->mhac_PrevChunks[i] != NULL &&
157 (mhac->mhac_PrevChunks[i] == mc || mhac->mhac_PrevChunks[i]->mc_Next == mc))
159 mhac->mhac_PrevChunks[i] = NULL;
164 void mhac_MemChunkCreated(struct MemChunk *mc, struct MemChunk *mcprev, struct MemHeaderAllocatorCtx * mhac)
166 LONG i, v = FIRSTPOT;
168 if (mc->mc_Bytes < FIRSTPOT) /* Allocation too small for index */
169 return;
171 if (!mhac)
172 return;
174 for (i = 0; i < ALLOCATORCTXINDEXSIZE; i++, v = v << POTSTEP)
176 if (mc->mc_Bytes < v)
177 break; /* Chunk smaller than index at i. Stop */
179 /* If no chunk in index or given passed chunk has lower address than chunk in index */
180 if (mhac->mhac_PrevChunks[i] == NULL ||
181 (mhac->mhac_PrevChunks[i] != NULL && mhac->mhac_PrevChunks[i]->mc_Next > mc))
183 mhac->mhac_PrevChunks[i] = mcprev;
189 /* General idea:
190 * Function returned pointer to chunk that is prev to chunk that will allow
191 * to locate faster chunk big enough for allocation. Function never returns NULL.
192 * Current implementation:
193 * Function returns pointer to chunk that is prev to first biggest chunk,
194 * not bigger than requested size
196 struct MemChunk * mhac_GetBetterPrevMemChunk(struct MemChunk * prev, IPTR size, struct MemHeaderAllocatorCtx * mhac)
198 struct MemChunk * _return = prev;
200 if (size < FIRSTPOT)
201 return _return; /* Allocation too small for index */
203 if (mhac)
205 LONG i, v = FIRSTPOT;
207 for (i = 0; i < ALLOCATORCTXINDEXSIZE; i++, v = v << POTSTEP)
209 if (size < v)
210 return _return; /* This index is bigger than requester size */
212 if (mhac->mhac_PrevChunks[i] != NULL)
213 _return = mhac->mhac_PrevChunks[i];
217 return _return;
219 #endif
222 #ifdef NO_CONSISTENCY_CHECKS
224 #define validateHeader(mh, op, addr, size, SysBase) TRUE
225 #define validateChunk(mc, prev, mh, op, addr, size, SysBase) TRUE
227 #else
229 static ULONG memAlerts[] =
231 AT_DeadEnd|AN_MemoryInsane, /* MM_ALLOC */
232 AT_DeadEnd|AN_MemCorrupt, /* MM_FREE */
233 AN_FreeTwice /* MM_OVERLAP */
237 * MemHeader validation routine. Rules are:
239 * 1. Both mh_First and mh_Free must be MEMCHUNK_TOTAL-aligned.
240 * 2. Free space (if present) must completely fit in between mh_Lower and mh_Upper.
241 * We intentionally don't check header's own location. We assume that in future we'll
242 * be able to put MEMF_CHIP headers inside MEMF_FAST memory, for speed up.
244 static BOOL validateHeader(struct MemHeader *mh, UBYTE op, APTR addr, IPTR size, struct TraceLocation *tp, struct ExecBase *SysBase)
246 if (((IPTR)mh->mh_First & (MEMCHUNK_TOTAL - 1)) || (mh->mh_Free & (MEMCHUNK_TOTAL - 1)) || /* 1 */
247 (mh->mh_First &&
248 (((APTR)mh->mh_First < mh->mh_Lower) || ((APTR)mh->mh_First + mh->mh_Free > mh->mh_Upper)))) /* 2 */
250 if (tp)
252 /* TraceLocation is not supplied by PrepareExecBase(). Fail silently. */
253 struct MMContext alertData;
255 alertData.mh = mh;
256 alertData.mc = NULL;
257 alertData.mcPrev = NULL;
258 alertData.func = tp->function;
259 alertData.addr = addr;
260 alertData.size = size;
261 alertData.op = op;
263 Exec_ExtAlert(memAlerts[op], tp->caller, tp->stack, AT_MEMORY, &alertData, SysBase);
267 * Theoretically during very early boot we can fail to post an alert (no KernelBase yet).
268 * In this case we return with fault indication.
270 return FALSE;
272 return TRUE;
276 * MemChunk consistency check. Rules are:
278 * 1. Both mc_Next and mc_Bytes must me MEMCHUNK_TOTAL-aligned, and mc_Bytes can not be zero.
279 * 2. End of this chunk must not be greater than mh->mh_Upper
280 * 3. mc_Next (if present) must point in between end of this chunk and mh->mh_Upper - MEMCHUNK_TOTAL.
281 * There must be at least MEMHCUNK_TOTAL allocated bytes between free chunks.
283 * This function is inlined for speed improvements.
285 static inline BOOL validateChunk(struct MemChunk *p2, struct MemChunk *p1, struct MemHeader *mh,
286 UBYTE op, APTR addr, IPTR size,
287 struct TraceLocation *tp, struct ExecBase *SysBase)
289 if (((IPTR)p2->mc_Next & (MEMCHUNK_TOTAL-1)) || (p2->mc_Bytes == 0) || (p2->mc_Bytes & (MEMCHUNK_TOTAL-1)) || /* 1 */
290 ((APTR)p2 + p2->mc_Bytes > mh->mh_Upper) || /* 2 */
291 (p2->mc_Next && (((APTR)p2->mc_Next < (APTR)p2 + p2->mc_Bytes + MEMCHUNK_TOTAL) || /* 3 */
292 ((APTR)p2->mc_Next > mh->mh_Upper - MEMCHUNK_TOTAL))))
294 if (tp)
296 struct MMContext alertData;
298 alertData.mh = mh;
299 alertData.mc = p2;
300 alertData.mcPrev = (p1 == (struct MemChunk *)&mh->mh_First) ? NULL : p1;
301 alertData.func = tp->function;
302 alertData.addr = addr;
303 alertData.size = size;
304 alertData.op = op;
306 Exec_ExtAlert(memAlerts[op], tp->caller, tp->stack, AT_MEMORY, &alertData, SysBase);
308 return FALSE;
311 return TRUE;
314 #endif
317 * Allocate block from the given MemHeader in a specific way.
318 * This routine can be called with SysBase = NULL.
319 * MemHeaderAllocatorCtx
320 * This parameter is optional, allocation needs to work without it as well.
321 * However if it was passed once for a given MemHeader it needs to be passed
322 * in all consecutive calls.
324 APTR stdAlloc(struct MemHeader *mh, struct MemHeaderAllocatorCtx *mhac, IPTR size,
325 ULONG requirements, struct TraceLocation *tp, struct ExecBase *SysBase)
327 /* First round byteSize up to a multiple of MEMCHUNK_TOTAL */
328 IPTR byteSize = AROS_ROUNDUP2(size, MEMCHUNK_TOTAL);
329 struct MemChunk *mc=NULL, *p1, *p2;
331 /* Validate MemHeader before doing anything. */
332 if (!validateHeader(mh, MM_ALLOC, NULL, size, tp, SysBase))
333 return NULL;
336 * The free memory list is only single linked, i.e. to remove
337 * elements from the list I need the node's predecessor. For the
338 * first element I can use mh->mh_First instead of a real predecessor.
340 p1 = mhac_GetBetterPrevMemChunk((struct MemChunk *)&mh->mh_First, size, mhac);
341 p2 = p1->mc_Next;
344 * Follow the memory list. p1 is the previous MemChunk, p2 is the current one.
345 * On 1st pass p1 points to mh->mh_First, so that changing p1->mc_Next actually
346 * changes mh->mh_First.
348 while (p2 != NULL)
350 /* Validate the current chunk */
351 if (!validateChunk(p2, p1, mh, MM_ALLOC, NULL, size, tp, SysBase))
352 return NULL;
354 /* Check if the current block is large enough */
355 if (p2->mc_Bytes>=byteSize)
357 /* It is. */
358 mc = p1;
360 /* Use this one if MEMF_REVERSE is not set.*/
361 if (!(requirements & MEMF_REVERSE))
362 break;
363 /* Else continue - there may be more to come. */
366 /* Go to next block */
367 p1 = p2;
368 p2 = p1->mc_Next;
371 /* Something found? */
372 if (mc != NULL)
374 /* Remember: if MEMF_REVERSE is set p1 and p2 are now invalid. */
375 p1 = mc;
376 p2 = p1->mc_Next;
378 mhac_MemChunkClaimed(p2, mhac);
380 /* Remove the block from the list and return it. */
381 if (p2->mc_Bytes == byteSize)
383 /* Fits exactly. Just relink the list. */
384 p1->mc_Next = p2->mc_Next;
385 mc = p2;
387 else
389 struct MemChunk * pp = p1;
391 if (requirements & MEMF_REVERSE)
393 /* Return the last bytes. */
394 p1->mc_Next=p2;
395 mc = (struct MemChunk *)((UBYTE *)p2+p2->mc_Bytes-byteSize);
397 else
399 /* Return the first bytes. */
400 p1->mc_Next=(struct MemChunk *)((UBYTE *)p2+byteSize);
401 mc=p2;
404 p1 = p1->mc_Next;
405 p1->mc_Next = p2->mc_Next;
406 p1->mc_Bytes = p2->mc_Bytes-byteSize;
408 mhac_MemChunkCreated(p1, pp, mhac);
411 mh->mh_Free -= byteSize;
413 /* Clear the block if requested */
414 if (requirements & MEMF_CLEAR)
415 memset(mc, 0, byteSize);
418 return mc;
422 * Free 'byteSize' bytes starting at 'memoryBlock' belonging to MemHeader 'freeList'
423 * MemHeaderAllocatorCtx
424 * See stdAlloc
426 void stdDealloc(struct MemHeader *freeList, struct MemHeaderAllocatorCtx *mhac, APTR addr, IPTR size, struct TraceLocation *tp, struct ExecBase *SysBase)
428 APTR memoryBlock;
429 IPTR byteSize;
430 struct MemChunk *p1, *p2, *p3;
431 UBYTE *p4;
433 /* Make sure the MemHeader is OK */
434 if (!validateHeader(freeList, MM_FREE, addr, size, tp, SysBase))
435 return;
437 /* Align size to the requirements */
438 byteSize = size + ((IPTR)addr & (MEMCHUNK_TOTAL - 1));
439 byteSize = (byteSize + MEMCHUNK_TOTAL-1) & ~(MEMCHUNK_TOTAL - 1);
441 /* Align the block as well */
442 memoryBlock = (APTR)((IPTR)addr & ~(MEMCHUNK_TOTAL-1));
445 The free memory list is only single linked, i.e. to insert
446 elements into the list I need the node as well as its
447 predecessor. For the first element I can use freeList->mh_First
448 instead of a real predecessor.
450 p1 = (struct MemChunk *)&freeList->mh_First;
451 p2 = freeList->mh_First;
453 /* Start and end(+1) of the block */
454 p3 = (struct MemChunk *)memoryBlock;
455 p4 = (UBYTE *)p3 + byteSize;
457 /* No chunk in list? Just insert the current one and return. */
458 if (p2 == NULL)
460 p3->mc_Bytes = byteSize;
461 p3->mc_Next = NULL;
462 p1->mc_Next = p3;
463 freeList->mh_Free += byteSize;
464 return;
467 /* Follow the list to find a place where to insert our memory. */
470 if (!validateChunk(p2, p1, freeList, MM_FREE, addr, size, tp, SysBase))
471 return;
473 /* Found a block with a higher address? */
474 if (p2 >= p3)
476 #if !defined(NO_CONSISTENCY_CHECKS)
478 If the memory to be freed overlaps with the current
479 block something must be wrong.
481 if (p4>(UBYTE *)p2)
483 bug("[MM] Chunk allocator error\n");
484 bug("[MM] Attempt to free %u bytes at 0x%p from MemHeader 0x%p\n", byteSize, memoryBlock, freeList);
485 bug("[MM] Block overlaps (1) with chunk 0x%p (%u bytes)\n", p2, p2->mc_Bytes);
487 Alert(AN_FreeTwice);
488 return;
490 #endif
491 /* End the loop with p2 non-zero */
492 break;
494 /* goto next block */
495 p1 = p2;
496 p2 = p2->mc_Next;
498 /* If the loop ends with p2 zero add it at the end. */
499 } while (p2 != NULL);
501 /* If there was a previous block merge with it. */
502 if (p1 != (struct MemChunk *)&freeList->mh_First)
504 #if !defined(NO_CONSISTENCY_CHECKS)
505 /* Check if they overlap. */
506 if ((UBYTE *)p1 + p1->mc_Bytes > (UBYTE *)p3)
508 bug("[MM] Chunk allocator error\n");
509 bug("[MM] Attempt to free %u bytes at 0x%p from MemHeader 0x%p\n", byteSize, memoryBlock, freeList);
510 bug("[MM] Block overlaps (2) with chunk 0x%p (%u bytes)\n", p1, p1->mc_Bytes);
512 Alert(AN_FreeTwice);
513 return;
515 #endif
516 /* Merge if possible */
517 if ((UBYTE *)p1 + p1->mc_Bytes == (UBYTE *)p3)
519 mhac_MemChunkClaimed(p1, mhac);
520 p3 = p1;
522 else
523 /* Not possible to merge */
524 p1->mc_Next = p3;
525 }else
527 There was no previous block. Just insert the memory at
528 the start of the list.
530 p1->mc_Next = p3;
532 /* Try to merge with next block (if there is one ;-) ). */
533 if (p4 == (UBYTE *)p2 && p2 != NULL)
536 Overlap checking already done. Doing it here after
537 the list potentially changed would be a bad idea.
539 mhac_MemChunkClaimed(p2, mhac);
540 p4 += p2->mc_Bytes;
541 p2 = p2->mc_Next;
543 /* relink the list and return. */
544 p3->mc_Next = p2;
545 p3->mc_Bytes = p4 - (UBYTE *)p3;
546 // FIXME
547 // memChunkCreated(p3, mhi);
548 freeList->mh_Free += byteSize;
552 * TODO:
553 * During transition period four routines below use nommu allocator.
554 * When transition is complete they should use them only if MMU
555 * is inactive. Otherwise they should use KrnAllocPages()/KrnFreePages().
558 /* Non-mungwalled AllocAbs(). Does not destroy sideways regions. */
559 APTR InternalAllocAbs(APTR location, IPTR byteSize, struct ExecBase *SysBase)
561 return nommu_AllocAbs(location, byteSize, SysBase);
565 * Use this if you want to free region allocated by InternalAllocAbs().
566 * Otherwise you hit mungwall problem (FreeMem() expects header).
568 void InternalFreeMem(APTR location, IPTR byteSize, struct TraceLocation *loc, struct ExecBase *SysBase)
570 nommu_FreeMem(location, byteSize, loc, SysBase);
574 * Allocate a region managed by own header. Usable size is reduced by size
575 * of header.
577 APTR AllocMemHeader(IPTR size, ULONG flags, struct TraceLocation *loc, struct ExecBase *SysBase)
579 struct MemHeader *mh;
581 mh = nommu_AllocMem(size, flags, loc, SysBase);
582 DMH(bug("[AllocMemHeader] Allocated %u bytes at 0x%p\n", size, mh));
584 if (mh)
586 struct MemHeader *orig = FindMem(mh, SysBase);
588 size -= MEMHEADER_TOTAL;
591 * Initialize new MemHeader.
592 * Inherit attributes from system MemHeader from which
593 * our chunk was allocated.
595 mh->mh_Node.ln_Type = NT_MEMORY;
596 mh->mh_Node.ln_Pri = orig->mh_Node.ln_Pri;
597 mh->mh_Attributes = orig->mh_Attributes;
598 mh->mh_Lower = (APTR)mh + MEMHEADER_TOTAL;
599 mh->mh_Upper = mh->mh_Lower + size;
600 mh->mh_First = mh->mh_Lower;
601 mh->mh_Free = size;
603 /* Create the first (and the only) MemChunk */
604 mh->mh_First->mc_Next = NULL;
605 mh->mh_First->mc_Bytes = size;
607 return mh;
610 /* Free a region allocated by AllocMemHeader() */
611 void FreeMemHeader(APTR addr, struct TraceLocation *loc, struct ExecBase *SysBase)
613 ULONG size = ((struct MemHeader *)addr)->mh_Upper - addr;
615 DMH(bug("[FreeMemHeader] Freeing %u bytes at 0x%p\n", size, addr));
616 nommu_FreeMem(addr, size, loc, SysBase);
620 * This is our own Enqueue() version. Currently the only differece is that
621 * we insert our node before the first node with LOWER OR EQUAL priority,
622 * so that for nodes with equal priority it will be LIFO, not FIFO queue.
623 * This speeds up the allocator.
624 * TODO: implement secondary sorting by mh_Free. This will allow to
625 * implement best-match algorithm (so that puddles with smaller free space
626 * will be picked up first). This way the smallest allocations will reuse
627 * smallest chunks instead of fragmenting large ones.
629 static void EnqueueMemHeader(struct MinList *list, struct MemHeader *mh)
631 struct MemHeader *next;
633 /* Look through the list */
634 ForeachNode (list, next)
637 Look for the first MemHeader with a lower or equal pri as the node
638 we have to insert into the list.
640 if (mh->mh_Node.ln_Pri >= next->mh_Node.ln_Pri)
641 break;
644 /* Insert the node before next */
645 mh->mh_Node.ln_Pred = next->mh_Node.ln_Pred;
646 mh->mh_Node.ln_Succ = &next->mh_Node;
647 next->mh_Node.ln_Pred->ln_Succ = &mh->mh_Node;
648 next->mh_Node.ln_Pred = &mh->mh_Node;
652 * Allocate memory with given physical properties from the given pool.
653 * Our pools can be mixed. This means that different puddles from the
654 * pool can have different physical flags. For example the same pool
655 * can contain puddles from both CHIP and FAST memory. This is done in
656 * order to provide a single system default pool for all types of memory.
658 APTR InternalAllocPooled(APTR poolHeader, IPTR memSize, ULONG flags, struct TraceLocation *loc, struct ExecBase *SysBase)
660 struct ProtectedPool *pool = poolHeader + MEMHEADER_TOTAL;
661 APTR ret = NULL;
662 IPTR origSize;
663 struct MemHeader *mh;
665 D(bug("[exec] InternalAllocPooled(0x%p, %u, 0x%08X), header 0x%p\n", poolHeader, memSize, flags, pool));
668 * Memory blocks allocated from the pool store pointers to the MemHeader they were
669 * allocated from. This is done in order to avoid slow lookups in InternalFreePooled().
670 * This is done in AllocVec()-alike manner; the pointer is placed right before the block.
672 memSize += sizeof(struct MemHeader *);
673 origSize = memSize;
675 /* If mungwall is enabled, count also size of walls */
676 if (PrivExecBase(SysBase)->IntFlags & EXECF_MungWall)
677 memSize += MUNGWALL_TOTAL_SIZE;
679 if (pool->pool.Requirements & MEMF_SEM_PROTECTED)
681 ObtainSemaphore(&pool->sem);
684 /* Follow the list of MemHeaders */
685 mh = (struct MemHeader *)pool->pool.PuddleList.mlh_Head;
686 for(;;)
688 ULONG physFlags = flags & MEMF_PHYSICAL_MASK;
690 /* Are there no more MemHeaders? */
691 if (mh->mh_Node.ln_Succ == NULL)
694 * Get a new one.
695 * Usually we allocate puddles of default size, specified during
696 * pool creation. However we can be asked to allocate block whose
697 * size will be larger than default puddle size.
698 * Previously this was handled by threshSize parameter. In our new
699 * implementation we just allocate enlarged puddle. This is done
700 * in order not to waste page tails beyond the allocated large block.
701 * These tails will be used for our pool too. Their size is smaller
702 * than page size but they still perfectly fit for small allocations
703 * (the primary use for pools).
704 * Since our large block is also a puddle, it will be reused for our
705 * pool when the block is freed. It can also be reused for another
706 * large allocation, if it fits in.
707 * Our final puddle size still includes MEMHEADER_TOTAL in any case.
709 IPTR puddleSize = pool->pool.PuddleSize;
711 if (memSize > puddleSize - MEMHEADER_TOTAL)
713 IPTR align = PrivExecBase(SysBase)->PageSize - 1;
715 puddleSize = memSize + MEMHEADER_TOTAL;
716 /* Align the size up to page boundary */
717 puddleSize = (puddleSize + align) & ~align;
720 mh = AllocMemHeader(puddleSize, flags, loc, SysBase);
721 D(bug("[InternalAllocPooled] Allocated new puddle 0x%p, size %u\n", mh, puddleSize));
723 /* No memory left? */
724 if (mh == NULL)
725 break;
727 /* Add the new puddle to our pool */
728 mh->mh_Node.ln_Name = (STRPTR)pool;
729 Enqueue((struct List *)&pool->pool.PuddleList, &mh->mh_Node);
731 /* Fall through to get the memory */
733 else
735 /* Ignore existing MemHeaders with memory type that differ from the requested ones */
736 if (physFlags & ~mh->mh_Attributes)
738 D(bug("[InternalAllocPooled] Wrong flags for puddle 0x%p (wanted 0x%08X, have 0x%08X\n", flags, mh->mh_Attributes));
740 mh = (struct MemHeader *)mh->mh_Node.ln_Succ;
741 continue;
745 /* Try to get the memory */
746 ret = stdAlloc(mh, NULL, memSize, flags, loc, SysBase);
747 D(bug("[InternalAllocPooled] Allocated memory at 0x%p from puddle 0x%p\n", ret, mh));
749 /* Got it? */
750 if (ret != NULL)
753 * If this is not the first MemHeader and it has some free space,
754 * move it forward (so that the next allocation will attempt to use it first).
755 * IMPORTANT: We use modification of Enqueue() because we still sort MemHeaders
756 * according to their priority (which they inherit from system MemHeaders).
757 * This allows us to have mixed pools (e.g. with both CHIP and FAST regions). This
758 * will be needed in future for memory protection.
760 if (mh->mh_Node.ln_Pred != NULL && mh->mh_Free > 32)
762 D(bug("[InternalAllocPooled] Re-sorting puddle list\n"));
763 Remove(&mh->mh_Node);
764 EnqueueMemHeader(&pool->pool.PuddleList, mh);
767 break;
770 /* No. Try next MemHeader */
771 mh = (struct MemHeader *)mh->mh_Node.ln_Succ;
774 if (pool->pool.Requirements & MEMF_SEM_PROTECTED)
776 ReleaseSemaphore(&pool->sem);
779 if (ret)
781 /* Build munge walls if requested */
782 ret = MungWall_Build(ret, pool, origSize, flags, loc, SysBase);
784 /* Remember where we were allocated from */
785 *((struct MemHeader **)ret) = mh;
786 ret += sizeof(struct MemHeader *);
789 /* Everything fine */
790 return ret;
794 * This is a pair to InternalAllocPooled()
795 * This code separated from FreePooled() in order to provide compatibility with various
796 * memory tracking patches. If some exec code calls InternalAllocPooled() directly
797 * (AllocMem() will do it), it has to call also InternalFreePooled() directly.
798 * Our chunks remember from which pool they came, so we don't need a pointer to pool
799 * header here. This will save us from headaches in future FreeMem() implementation.
801 void InternalFreePooled(APTR memory, IPTR memSize, struct TraceLocation *loc, struct ExecBase *SysBase)
803 struct MemHeader *mh;
804 APTR freeStart;
805 IPTR freeSize;
807 D(bug("[exec] InternalFreePooled(0x%p, %u)\n", memory, memSize));
809 if (!memory || !memSize) return;
811 /* Get MemHeader pointer. It is stored right before our block. */
812 freeStart = memory - sizeof(struct MemHeader *);
813 freeSize = memSize + sizeof(struct MemHeader *);
814 mh = *((struct MemHeader **)freeStart);
816 /* Check walls first */
817 freeStart = MungWall_Check(freeStart, freeSize, loc, SysBase);
818 if (PrivExecBase(SysBase)->IntFlags & EXECF_MungWall)
819 freeSize += MUNGWALL_TOTAL_SIZE;
821 /* Verify that MemHeader pointer is correct */
822 if ((mh->mh_Node.ln_Type != NT_MEMORY) ||
823 (freeStart < mh->mh_Lower) || (freeStart + freeSize > mh->mh_Upper))
826 * Something is wrong.
827 * TODO: the following should actually be printed as part of the alert.
828 * In future there should be some kind of "alert context". CPU alerts
829 * (like illegal access) should remember CPU context there. Memory manager
830 * alerts (like this one) should remember some own information.
832 bug("[MM] Pool manager error\n");
833 bug("[MM] Attempt to free %u bytes at 0x%p\n", memSize, memory);
834 bug("[MM] The chunk does not belong to a pool\n");
836 Alert(AN_BadFreeAddr);
838 else
840 struct ProtectedPool *pool = (struct ProtectedPool *)mh->mh_Node.ln_Name;
841 IPTR size;
843 if (pool->pool.Requirements & MEMF_SEM_PROTECTED)
845 ObtainSemaphore(&pool->sem);
848 size = mh->mh_Upper - mh->mh_Lower;
849 D(bug("[FreePooled] Allocated from puddle 0x%p, size %u\n", mh, size));
851 /* Free the memory. */
852 stdDealloc(mh, NULL, freeStart, freeSize, loc, SysBase);
853 D(bug("[FreePooled] Deallocated chunk, %u free bytes in the puddle\n", mh->mh_Free));
855 /* Is this MemHeader completely free now? */
856 if (mh->mh_Free == size)
858 D(bug("[FreePooled] Puddle is empty, giving back to the system\n"));
860 /* Yes. Remove it from the list. */
861 Remove(&mh->mh_Node);
862 /* And free it. */
863 FreeMemHeader(mh, loc, SysBase);
865 /* All done. */
867 if (pool->pool.Requirements & MEMF_SEM_PROTECTED)
869 ReleaseSemaphore(&pool->sem);
874 ULONG checkMemHandlers(struct checkMemHandlersState *cmhs, struct ExecBase *SysBase)
876 struct Node *tmp;
877 struct Interrupt *lmh;
879 if (cmhs->cmhs_Data.memh_RequestFlags & MEMF_NO_EXPUNGE)
880 return MEM_DID_NOTHING;
882 /* In order to keep things clean, we must run in a single thread */
883 ObtainSemaphore(&PrivExecBase(SysBase)->LowMemSem);
886 * Loop over low memory handlers. Handlers can remove
887 * themselves from the list while being invoked, thus
888 * we need to be careful!
890 for (lmh = (struct Interrupt *)cmhs->cmhs_CurNode;
891 (tmp = lmh->is_Node.ln_Succ);
892 lmh = (struct Interrupt *)(cmhs->cmhs_CurNode = tmp))
894 ULONG ret;
896 ret = AROS_UFC3 (LONG, lmh->is_Code,
897 AROS_UFCA(struct MemHandlerData *, &cmhs->cmhs_Data, A0),
898 AROS_UFCA(APTR, lmh->is_Data, A1),
899 AROS_UFCA(struct ExecBase *, SysBase, A6)
902 if (ret == MEM_TRY_AGAIN)
904 /* MemHandler said he did something. Try again. */
905 /* Is there any program that depends on this flag??? */
906 cmhs->cmhs_Data.memh_Flags |= MEMHF_RECYCLE;
908 ReleaseSemaphore(&PrivExecBase(SysBase)->LowMemSem);
909 return MEM_TRY_AGAIN;
911 else
913 /* Nothing more to expect from this handler. */
914 cmhs->cmhs_Data.memh_Flags &= ~MEMHF_RECYCLE;
918 ReleaseSemaphore(&PrivExecBase(SysBase)->LowMemSem);
919 return MEM_DID_NOTHING;