support for MEMF_MANAGED memory is back in the system. Moreover, the MemHeaderExt...
[AROS.git] / rom / exec / memory.c
blobc6d12dd5150ea3aa47770f02aff5d75e46fc1558
1 /*
2 Copyright © 1995-2012, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <aros/debug.h>
7 #include <exec/rawfmt.h>
8 #include <exec/memheaderext.h>
9 #include <proto/kernel.h>
11 #include "exec_intern.h"
12 #include "exec_util.h"
13 #include "etask.h"
14 #include "memory.h"
15 #include "mungwall.h"
17 #define DMH(x)
20 * Find MemHeader to which address belongs.
21 * This function is legal to be called in supervisor mode (we use TypeOfMem()
22 * in order to validate addresses in tons of places). So, here are checks.
24 struct MemHeader *FindMem(APTR address, struct ExecBase *SysBase)
26 int usermode = (KernelBase != NULL) && (KrnIsSuper() == 0);
27 struct MemHeader *mh;
29 /* Nobody should change the memory list now. */
30 if (usermode) MEM_LOCK_SHARED;
32 /* Follow the list of MemHeaders */
33 mh = (struct MemHeader *)SysBase->MemList.lh_Head;
35 while (mh->mh_Node.ln_Succ != NULL)
37 /* Check if this MemHeader fits */
38 if (address >= mh->mh_Lower && address < mh->mh_Upper)
40 /* Yes. Return it. */
41 if (usermode) MEM_UNLOCK;
42 return mh;
45 /* Go to next MemHeader */
46 mh = (struct MemHeader *)mh->mh_Node.ln_Succ;
49 if (usermode) MEM_UNLOCK;
50 return NULL;
53 char *FormatMMContext(char *buffer, struct MMContext *ctx, struct ExecBase *SysBase)
55 if (ctx->addr)
56 buffer = NewRawDoFmt("In %s, block at 0x%p, size %lu", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->func, ctx->addr, ctx->size) - 1;
57 else
58 buffer = NewRawDoFmt("In %s, size %lu", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->func, ctx->size) - 1;
60 if (ctx->mc)
62 buffer = NewRawDoFmt("\nCorrupted MemChunk 0x%p (next 0x%p, size %lu)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mc, ctx->mc->mc_Next, ctx->mc->mc_Bytes) - 1;
64 if (ctx->mcPrev)
65 buffer = NewRawDoFmt("\nPrevious MemChunk 0x%p (next 0x%p, size %lu)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mcPrev, ctx->mcPrev->mc_Next, ctx->mcPrev->mc_Bytes) - 1;
68 /* Print MemHeader details */
69 buffer = NewRawDoFmt("\nMemHeader 0x%p (0x%p - 0x%p)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mh, ctx->mh->mh_Lower, ctx->mh->mh_Upper) - 1;
70 if ((IPTR)ctx->mh->mh_First & (MEMCHUNK_TOTAL - 1))
71 buffer = NewRawDoFmt("\n- Unaligned first chunk address (0x%p)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mh->mh_First) - 1;
73 if (ctx->mh->mh_Free & (MEMCHUNK_TOTAL - 1))
74 buffer = NewRawDoFmt("\n- Unaligned free space count (0x%p)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mh->mh_Free) - 1;
76 if (ctx->mh->mh_First)
78 if ((APTR)ctx->mh->mh_First < ctx->mh->mh_Lower)
79 buffer = NewRawDoFmt("\n- First chunk (0x%p) below lower address", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mh->mh_First) - 1;
81 if (((APTR)ctx->mh->mh_First + ctx->mh->mh_Free > ctx->mh->mh_Upper))
82 buffer = NewRawDoFmt("\n- Free space count too large (%lu, first chunk 0x%xp)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mh->mh_Free, ctx->mh->mh_First) - 1;
85 return buffer;
88 /* #define NO_ALLOCATOR_CONTEXT */
90 #ifdef NO_ALLOCATOR_CONTEXT
92 struct MemHeaderAllocatorCtx * mhac_GetSysCtx(struct MemHeader * mh, struct ExecBase * SysBase)
94 return NULL;
97 void mhac_PoolMemHeaderSetup(struct MemHeader * mh, struct ProtectedPool * pool)
99 mh->mh_Node.ln_Name = (STRPTR)pool;
102 void mhac_MemChunkClaimed(struct MemChunk * mc, struct MemHeaderAllocatorCtx * mhac)
106 ULONG mhac_GetCtxSize()
108 return 0;
111 #define mhac_IsIndexEmpty(a) (TRUE)
112 #define mhac_ClearIndex(a)
113 #define mhac_MemChunkCreated(a, b, c) { (void)b; }
114 #define mhac_GetBetterPrevMemChunk(a, b, c) (a)
115 #define mhac_GetCloserPrevMemChunk(a, b, c) (a)
116 #define mhac_PoolMemHeaderGetCtx(a) (NULL)
117 #define mhac_PoolMemHeaderGetPool(a) (a->mh_Node.ln_Name)
119 #else
120 /* Allocator optimization support */
123 * The array contains pointers to chunk previous to first chunk of at least size N
125 * N = 1 << (FIRSTPOTBIT + (i * POTSTEP)), where i is index in array
126 * first is defined as MemChunk with lowest address
128 * Each chunk in array locates the place where search should start, not necesarly
129 * where allocation should happen.
131 * If chunk is taken from MemHeader and is present in the index, it must be removed
132 * from index.
134 * If chunk is returned to MemHeader it may be registered with index.
137 #define FIRSTPOTBIT (5)
138 #define FIRSTPOT (1 << FIRSTPOTBIT)
139 #define POTSTEP (1) /* Distance between each level */
140 #define ALLOCATORCTXINDEXSIZE (10) /* Number of levels in index */
142 struct MemHeaderAllocatorCtx
144 struct Node mhac_Node;
145 struct MemHeader *mhac_MemHeader;
146 APTR mhac_Data1;
148 ULONG mhac_IndexSize;
149 struct MemChunk *mhac_PrevChunks[ALLOCATORCTXINDEXSIZE];
152 ULONG mhac_GetCtxSize()
154 return (AROS_ROUNDUP2(sizeof(struct MemHeaderAllocatorCtx), MEMCHUNK_TOTAL));
157 static BOOL mhac_IsIndexEmpty(struct MemHeaderAllocatorCtx * mhac)
159 LONG i;
160 if (!mhac)
161 return TRUE;
163 for (i = 0; i < mhac->mhac_IndexSize; i++)
164 if (mhac->mhac_PrevChunks[i] != NULL)
165 return FALSE;
167 return TRUE;
170 static void mhac_ClearIndex(struct MemHeaderAllocatorCtx * mhac)
172 LONG i;
174 if (!mhac)
175 return;
177 for (i = 0; i < ALLOCATORCTXINDEXSIZE; i++)
178 mhac->mhac_PrevChunks[i] = NULL;
181 static void mhac_SetupMemHeaderAllocatorCtx(struct MemHeader * mh, ULONG maxindexsize,
182 struct MemHeaderAllocatorCtx * mhac)
184 /* Adjust index size to space in MemHeader */
185 IPTR size = (IPTR)mh->mh_Upper - (IPTR)mh->mh_Lower;
186 LONG indexsize = 0;
188 size = size >> FIRSTPOTBIT;
189 size = size >> POTSTEP;
191 for (; size > 0; size = size >> POTSTEP) indexsize++;
193 if (indexsize < 0) indexsize = 0;
194 if (indexsize > maxindexsize) indexsize = maxindexsize;
195 if (indexsize > ALLOCATORCTXINDEXSIZE) indexsize = ALLOCATORCTXINDEXSIZE;
197 mhac->mhac_MemHeader = mh;
198 mhac->mhac_IndexSize = indexsize;
199 mhac_ClearIndex(mhac);
202 struct MemHeaderAllocatorCtx * mhac_GetSysCtx(struct MemHeader * mh, struct ExecBase * SysBase)
204 struct MemHeaderAllocatorCtx * mhac = NULL;
206 ForeachNode(&PrivExecBase(SysBase)->AllocatorCtxList, mhac)
208 if (mhac->mhac_MemHeader == mh)
209 return mhac;
212 /* New context is needed */
213 mhac = Allocate(mh, sizeof(struct MemHeaderAllocatorCtx));
214 mhac_SetupMemHeaderAllocatorCtx(mh, ALLOCATORCTXINDEXSIZE, mhac);
215 AddTail(&PrivExecBase(SysBase)->AllocatorCtxList, (struct Node *)mhac);
217 return mhac;
220 void mhac_MemChunkClaimed(struct MemChunk * mc, struct MemHeaderAllocatorCtx * mhac)
222 LONG i;
224 if (!mhac)
225 return;
227 for (i = 0; i < mhac->mhac_IndexSize; i++)
229 if (mhac->mhac_PrevChunks[i] != NULL &&
230 (mhac->mhac_PrevChunks[i] == mc || mhac->mhac_PrevChunks[i]->mc_Next == mc))
232 mhac->mhac_PrevChunks[i] = NULL;
237 static LONG mhac_CalcIndex(LONG size, ULONG indexsize)
239 LONG r = 0;
240 size >>= FIRSTPOTBIT;
241 while (size >>= 1)
242 r++;
244 if (r > indexsize - 1) r = indexsize - 1;
246 return r;
249 static void mhac_MemChunkCreated(struct MemChunk * mc, struct MemChunk *mcprev, struct MemHeaderAllocatorCtx * mhac)
251 LONG i, v = FIRSTPOT;
253 if (mc->mc_Bytes < FIRSTPOT) /* Allocation too small for index */
254 return;
256 if (!mhac)
257 return;
259 for (i = 0; i < mhac->mhac_IndexSize; i++, v = v << POTSTEP)
261 if (mc->mc_Bytes < v)
262 break; /* Chunk smaller than index at i. Stop */
264 /* If no chunk in index or given passed chunk has lower address than chunk in index */
265 if (mhac->mhac_PrevChunks[i] == NULL ||
266 (mhac->mhac_PrevChunks[i] != NULL && mhac->mhac_PrevChunks[i]->mc_Next > mc))
268 mhac->mhac_PrevChunks[i] = mcprev;
273 /* General idea:
274 * Function returned pointer to chunk that is prev to chunk that will allow
275 * to locate faster chunk big enough for allocation. Function never returns NULL.
276 * Current implementation:
277 * Function returns pointer to chunk that is prev to first biggest chunk,
278 * not bigger than requested size
280 static struct MemChunk * mhac_GetBetterPrevMemChunk(struct MemChunk * prev, IPTR size, struct MemHeaderAllocatorCtx * mhac)
282 struct MemChunk * _return = prev;
284 if (size < FIRSTPOT)
285 return _return; /* Allocation too small for index */
287 if (mhac)
289 LONG i;
290 LONG ii = mhac_CalcIndex(size, mhac->mhac_IndexSize);
292 if (mhac->mhac_PrevChunks[ii] != NULL)
293 _return = mhac->mhac_PrevChunks[ii];
294 else
296 for (i = ii - 1; i >= 0; i--)
298 if (mhac->mhac_PrevChunks[i] != NULL)
300 _return = mhac->mhac_PrevChunks[i];
301 break;
307 return _return;
310 static struct MemChunk * mhac_GetCloserPrevMemChunk(struct MemChunk * prev, APTR addr, struct MemHeaderAllocatorCtx * mhac)
312 struct MemChunk * _return = prev;
314 if (mhac)
316 LONG i;
318 for (i = 0; i < mhac->mhac_IndexSize; i++)
320 if (mhac->mhac_PrevChunks[i] != NULL &&
321 (APTR)mhac->mhac_PrevChunks[i]->mc_Next < addr &&
322 mhac->mhac_PrevChunks[i]->mc_Next > _return->mc_Next)
324 _return = mhac->mhac_PrevChunks[i];
329 return _return;
333 * Enhace MemHeader that is part of pool with MemHeaderAllocatorContext
335 void mhac_PoolMemHeaderSetup(struct MemHeader * mh, struct ProtectedPool * pool)
337 struct MemHeaderAllocatorCtx * mhac = Allocate(mh, sizeof(struct MemHeaderAllocatorCtx));
339 mhac_SetupMemHeaderAllocatorCtx(mh, 5, mhac);
341 mhac->mhac_Data1 = pool;
342 mh->mh_Node.ln_Name = (STRPTR)mhac;
345 #define mhac_PoolMemHeaderGetCtx(a) ((struct MemHeaderAllocatorCtx *)(a->mh_Node.ln_Name))
346 #define mhac_PoolMemHeaderGetPool(a) (mhac_PoolMemHeaderGetCtx(a)->mhac_Data1)
348 #endif
351 #ifdef NO_CONSISTENCY_CHECKS
353 #define validateHeader(mh, op, addr, size, SysBase) TRUE
354 #define validateChunk(mc, prev, mh, op, addr, size, SysBase) TRUE
356 #else
358 static ULONG memAlerts[] =
360 AT_DeadEnd|AN_MemoryInsane, /* MM_ALLOC */
361 AT_DeadEnd|AN_MemCorrupt, /* MM_FREE */
362 AN_FreeTwice /* MM_OVERLAP */
366 * MemHeader validation routine. Rules are:
368 * 1. Both mh_First and mh_Free must be MEMCHUNK_TOTAL-aligned.
369 * 2. Free space (if present) must completely fit in between mh_Lower and mh_Upper.
370 * We intentionally don't check header's own location. We assume that in future we'll
371 * be able to put MEMF_CHIP headers inside MEMF_FAST memory, for speed up.
373 static BOOL validateHeader(struct MemHeader *mh, UBYTE op, APTR addr, IPTR size, struct TraceLocation *tp, struct ExecBase *SysBase)
375 if (((IPTR)mh->mh_First & (MEMCHUNK_TOTAL - 1)) || (mh->mh_Free & (MEMCHUNK_TOTAL - 1)) || /* 1 */
376 (mh->mh_First &&
377 (((APTR)mh->mh_First < mh->mh_Lower) || ((APTR)mh->mh_First + mh->mh_Free > mh->mh_Upper)))) /* 2 */
379 if (tp)
381 /* TraceLocation is not supplied by PrepareExecBase(). Fail silently. */
382 struct MMContext alertData;
384 alertData.mh = mh;
385 alertData.mc = NULL;
386 alertData.mcPrev = NULL;
387 alertData.func = tp->function;
388 alertData.addr = addr;
389 alertData.size = size;
390 alertData.op = op;
392 Exec_ExtAlert(memAlerts[op], tp->caller, tp->stack, AT_MEMORY, &alertData, SysBase);
396 * Theoretically during very early boot we can fail to post an alert (no KernelBase yet).
397 * In this case we return with fault indication.
399 return FALSE;
401 return TRUE;
405 * MemChunk consistency check. Rules are:
407 * 1. Both mc_Next and mc_Bytes must me MEMCHUNK_TOTAL-aligned, and mc_Bytes can not be zero.
408 * 2. End of this chunk must not be greater than mh->mh_Upper
409 * 3. mc_Next (if present) must point in between end of this chunk and mh->mh_Upper - MEMCHUNK_TOTAL.
410 * There must be at least MEMHCUNK_TOTAL allocated bytes between free chunks.
412 * This function is inlined for speed improvements.
414 static inline BOOL validateChunk(struct MemChunk *p2, struct MemChunk *p1, struct MemHeader *mh,
415 UBYTE op, APTR addr, IPTR size,
416 struct TraceLocation *tp, struct ExecBase *SysBase)
418 if (((IPTR)p2->mc_Next & (MEMCHUNK_TOTAL-1)) || (p2->mc_Bytes == 0) || (p2->mc_Bytes & (MEMCHUNK_TOTAL-1)) || /* 1 */
419 ((APTR)p2 + p2->mc_Bytes > mh->mh_Upper) || /* 2 */
420 (p2->mc_Next && (((APTR)p2->mc_Next < (APTR)p2 + p2->mc_Bytes + MEMCHUNK_TOTAL) || /* 3 */
421 ((APTR)p2->mc_Next > mh->mh_Upper - MEMCHUNK_TOTAL))))
423 if (tp)
425 struct MMContext alertData;
427 alertData.mh = mh;
428 alertData.mc = p2;
429 alertData.mcPrev = (p1 == (struct MemChunk *)&mh->mh_First) ? NULL : p1;
430 alertData.func = tp->function;
431 alertData.addr = addr;
432 alertData.size = size;
433 alertData.op = op;
435 Exec_ExtAlert(memAlerts[op], tp->caller, tp->stack, AT_MEMORY, &alertData, SysBase);
437 return FALSE;
440 return TRUE;
443 #endif
446 * Allocate block from the given MemHeader in a specific way.
447 * This routine can be called with SysBase = NULL.
448 * MemHeaderAllocatorCtx
449 * This parameter is optional, allocation needs to work without it as well.
450 * However if it was passed once for a given MemHeader it needs to be passed
451 * in all consecutive calls.
453 APTR stdAlloc(struct MemHeader *mh, struct MemHeaderAllocatorCtx *mhac, IPTR size,
454 ULONG requirements, struct TraceLocation *tp, struct ExecBase *SysBase)
457 * The check has to be done for the second time. Exec uses stdAlloc on memheader
458 * passed upon startup. This is bad, very bad. So here a temporary hack :)
460 if (mh->mh_Attributes & MEMF_MANAGED)
462 struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh;
464 if (mh->mh_Alloc)
466 return mh->mh_Alloc(mhe, size, &requirements);
468 else
469 return NULL;
471 else
473 /* First round byteSize up to a multiple of MEMCHUNK_TOTAL */
474 IPTR byteSize = AROS_ROUNDUP2(size, MEMCHUNK_TOTAL);
475 struct MemChunk *mc=NULL, *p1, *p2;
477 /* Validate MemHeader before doing anything. */
478 if (!validateHeader(mh, MM_ALLOC, NULL, size, tp, SysBase))
479 return NULL;
481 /* Validate if there is even enough total free memory */
482 if (mh->mh_Free < byteSize)
483 return NULL;
487 * The free memory list is only single linked, i.e. to remove
488 * elements from the list I need the node's predecessor. For the
489 * first element I can use mh->mh_First instead of a real predecessor.
491 p1 = mhac_GetBetterPrevMemChunk((struct MemChunk *)&mh->mh_First, size, mhac);
492 p2 = p1->mc_Next;
495 * Follow the memory list. p1 is the previous MemChunk, p2 is the current one.
496 * On 1st pass p1 points to mh->mh_First, so that changing p1->mc_Next actually
497 * changes mh->mh_First.
499 while (p2 != NULL)
501 /* Validate the current chunk */
502 if (!validateChunk(p2, p1, mh, MM_ALLOC, NULL, size, tp, SysBase))
503 return NULL;
505 /* Check if the current block is large enough */
506 if (p2->mc_Bytes>=byteSize)
508 /* It is. */
509 mc = p1;
511 /* Use this one if MEMF_REVERSE is not set.*/
512 if (!(requirements & MEMF_REVERSE))
513 break;
514 /* Else continue - there may be more to come. */
517 /* Go to next block */
518 p1 = p2;
519 p2 = p1->mc_Next;
522 /* Something found? */
523 if (mc != NULL)
525 /* Remember: if MEMF_REVERSE is set p1 and p2 are now invalid. */
526 p1 = mc;
527 p2 = p1->mc_Next;
529 mhac_MemChunkClaimed(p2, mhac);
531 /* Remove the block from the list and return it. */
532 if (p2->mc_Bytes == byteSize)
534 /* Fits exactly. Just relink the list. */
535 p1->mc_Next = p2->mc_Next;
536 mc = p2;
538 else
540 struct MemChunk * pp = p1;
542 if (requirements & MEMF_REVERSE)
544 /* Return the last bytes. */
545 p1->mc_Next=p2;
546 mc = (struct MemChunk *)((UBYTE *)p2+p2->mc_Bytes-byteSize);
548 else
550 /* Return the first bytes. */
551 p1->mc_Next=(struct MemChunk *)((UBYTE *)p2+byteSize);
552 mc=p2;
555 p1 = p1->mc_Next;
556 p1->mc_Next = p2->mc_Next;
557 p1->mc_Bytes = p2->mc_Bytes-byteSize;
559 mhac_MemChunkCreated(p1, pp, mhac);
562 mh->mh_Free -= byteSize;
564 /* Clear the block if requested */
565 if (requirements & MEMF_CLEAR)
566 memset(mc, 0, byteSize);
568 else
570 if (!mhac_IsIndexEmpty(mhac))
573 * Since chunks created during deallocation are not returned to index,
574 * retry with cleared index.
576 mhac_ClearIndex(mhac);
577 mc = stdAlloc(mh, mhac, size, requirements, tp, SysBase);
581 return mc;
586 * Free 'byteSize' bytes starting at 'memoryBlock' belonging to MemHeader 'freeList'
587 * MemHeaderAllocatorCtx
588 * See stdAlloc
590 void stdDealloc(struct MemHeader *freeList, struct MemHeaderAllocatorCtx *mhac, APTR addr, IPTR size, struct TraceLocation *tp, struct ExecBase *SysBase)
592 APTR memoryBlock;
593 IPTR byteSize;
594 struct MemChunk *p1, *p2, *p3;
595 UBYTE *p4;
597 if (mh->mh_Attributes & MEMF_MANAGED)
599 struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh;
601 if (mhe->mhe_Free)
602 mhe->mhe_Free(mhe, addr, size);
604 else
606 /* Make sure the MemHeader is OK */
607 if (!validateHeader(freeList, MM_FREE, addr, size, tp, SysBase))
608 return;
610 /* Align size to the requirements */
611 byteSize = size + ((IPTR)addr & (MEMCHUNK_TOTAL - 1));
612 byteSize = (byteSize + MEMCHUNK_TOTAL-1) & ~(MEMCHUNK_TOTAL - 1);
614 /* Align the block as well */
615 memoryBlock = (APTR)((IPTR)addr & ~(MEMCHUNK_TOTAL-1));
618 The free memory list is only single linked, i.e. to insert
619 elements into the list I need the node as well as its
620 predecessor. For the first element I can use freeList->mh_First
621 instead of a real predecessor.
623 p1 = (struct MemChunk *)&freeList->mh_First;
624 p2 = freeList->mh_First;
626 /* Start and end(+1) of the block */
627 p3 = (struct MemChunk *)memoryBlock;
628 p4 = (UBYTE *)p3 + byteSize;
630 /* No chunk in list? Just insert the current one and return. */
631 if (p2 == NULL)
633 p3->mc_Bytes = byteSize;
634 p3->mc_Next = NULL;
635 p1->mc_Next = p3;
636 freeList->mh_Free += byteSize;
637 return;
640 /* Find closer chunk */
641 p1=mhac_GetCloserPrevMemChunk(p1, addr, mhac);
642 p2=p1->mc_Next;
644 /* Follow the list to find a place where to insert our memory. */
647 if (!validateChunk(p2, p1, freeList, MM_FREE, addr, size, tp, SysBase))
648 return;
650 /* Found a block with a higher address? */
651 if (p2 >= p3)
653 #if !defined(NO_CONSISTENCY_CHECKS)
655 If the memory to be freed overlaps with the current
656 block something must be wrong.
658 if (p4>(UBYTE *)p2)
660 bug("[MM] Chunk allocator error\n");
661 bug("[MM] Attempt to free %u bytes at 0x%p from MemHeader 0x%p\n", byteSize, memoryBlock, freeList);
662 bug("[MM] Block overlaps (1) with chunk 0x%p (%u bytes)\n", p2, p2->mc_Bytes);
664 Alert(AN_FreeTwice);
665 return;
667 #endif
668 /* End the loop with p2 non-zero */
669 break;
671 /* goto next block */
672 p1 = p2;
673 p2 = p2->mc_Next;
675 /* If the loop ends with p2 zero add it at the end. */
676 } while (p2 != NULL);
678 /* If there was a previous block merge with it. */
679 if (p1 != (struct MemChunk *)&freeList->mh_First)
681 #if !defined(NO_CONSISTENCY_CHECKS)
682 /* Check if they overlap. */
683 if ((UBYTE *)p1 + p1->mc_Bytes > (UBYTE *)p3)
685 bug("[MM] Chunk allocator error\n");
686 bug("[MM] Attempt to free %u bytes at 0x%p from MemHeader 0x%p\n", byteSize, memoryBlock, freeList);
687 bug("[MM] Block overlaps (2) with chunk 0x%p (%u bytes)\n", p1, p1->mc_Bytes);
689 Alert(AN_FreeTwice);
690 return;
692 #endif
693 /* Merge if possible */
694 if ((UBYTE *)p1 + p1->mc_Bytes == (UBYTE *)p3)
696 mhac_MemChunkClaimed(p1, mhac);
697 p3 = p1;
699 * Note: this case does not lead to mhac_MemChunkCreated, because
700 * we don't have chunk prev to p1
703 else
704 /* Not possible to merge */
705 p1->mc_Next = p3;
706 }else
708 There was no previous block. Just insert the memory at
709 the start of the list.
711 p1->mc_Next = p3;
713 /* Try to merge with next block (if there is one ;-) ). */
714 if (p4 == (UBYTE *)p2 && p2 != NULL)
717 Overlap checking already done. Doing it here after
718 the list potentially changed would be a bad idea.
720 mhac_MemChunkClaimed(p2, mhac);
721 p4 += p2->mc_Bytes;
722 p2 = p2->mc_Next;
724 /* relink the list and return. */
725 p3->mc_Next = p2;
726 p3->mc_Bytes = p4 - (UBYTE *)p3;
727 freeList->mh_Free += byteSize;
728 if (p1->mc_Next==p3) mhac_MemChunkCreated(p3, p1, mhac);
733 * TODO:
734 * During transition period four routines below use nommu allocator.
735 * When transition is complete they should use them only if MMU
736 * is inactive. Otherwise they should use KrnAllocPages()/KrnFreePages().
739 /* Non-mungwalled AllocAbs(). Does not destroy sideways regions. */
740 APTR InternalAllocAbs(APTR location, IPTR byteSize, struct ExecBase *SysBase)
742 return nommu_AllocAbs(location, byteSize, SysBase);
746 * Use this if you want to free region allocated by InternalAllocAbs().
747 * Otherwise you hit mungwall problem (FreeMem() expects header).
749 void InternalFreeMem(APTR location, IPTR byteSize, struct TraceLocation *loc, struct ExecBase *SysBase)
751 nommu_FreeMem(location, byteSize, loc, SysBase);
755 * Allocate a region managed by own header. Usable size is reduced by size
756 * of header.
758 APTR AllocMemHeader(IPTR size, ULONG flags, struct TraceLocation *loc, struct ExecBase *SysBase)
760 struct MemHeader *mh;
762 mh = nommu_AllocMem(size, flags, loc, SysBase);
763 DMH(bug("[AllocMemHeader] Allocated %u bytes at 0x%p\n", size, mh));
765 if (mh)
767 struct MemHeader *orig = FindMem(mh, SysBase);
769 size -= MEMHEADER_TOTAL;
772 * Initialize new MemHeader.
773 * Inherit attributes from system MemHeader from which
774 * our chunk was allocated.
776 mh->mh_Node.ln_Type = NT_MEMORY;
777 mh->mh_Node.ln_Pri = orig->mh_Node.ln_Pri;
778 mh->mh_Attributes = orig->mh_Attributes & ~MEMF_MANAGED;
779 mh->mh_Lower = (APTR)mh + MEMHEADER_TOTAL;
780 mh->mh_Upper = mh->mh_Lower + size;
781 mh->mh_First = mh->mh_Lower;
782 mh->mh_Free = size;
784 /* Create the first (and the only) MemChunk */
785 mh->mh_First->mc_Next = NULL;
786 mh->mh_First->mc_Bytes = size;
788 return mh;
791 /* Free a region allocated by AllocMemHeader() */
792 void FreeMemHeader(APTR addr, struct TraceLocation *loc, struct ExecBase *SysBase)
794 ULONG size = ((struct MemHeader *)addr)->mh_Upper - addr;
796 DMH(bug("[FreeMemHeader] Freeing %u bytes at 0x%p\n", size, addr));
797 nommu_FreeMem(addr, size, loc, SysBase);
801 * This is our own Enqueue() version. Currently the only differece is that
802 * we insert our node before the first node with LOWER OR EQUAL priority,
803 * so that for nodes with equal priority it will be LIFO, not FIFO queue.
804 * This speeds up the allocator.
805 * TODO: implement secondary sorting by mh_Free. This will allow to
806 * implement best-match algorithm (so that puddles with smaller free space
807 * will be picked up first). This way the smallest allocations will reuse
808 * smallest chunks instead of fragmenting large ones.
810 static void EnqueueMemHeader(struct MinList *list, struct MemHeader *mh)
812 struct MemHeader *next;
814 /* Look through the list */
815 ForeachNode (list, next)
818 Look for the first MemHeader with a lower or equal pri as the node
819 we have to insert into the list.
821 if (mh->mh_Node.ln_Pri >= next->mh_Node.ln_Pri)
822 break;
825 /* Insert the node before next */
826 mh->mh_Node.ln_Pred = next->mh_Node.ln_Pred;
827 mh->mh_Node.ln_Succ = &next->mh_Node;
828 next->mh_Node.ln_Pred->ln_Succ = &mh->mh_Node;
829 next->mh_Node.ln_Pred = &mh->mh_Node;
833 * Allocate memory with given physical properties from the given pool.
834 * Our pools can be mixed. This means that different puddles from the
835 * pool can have different physical flags. For example the same pool
836 * can contain puddles from both CHIP and FAST memory. This is done in
837 * order to provide a single system default pool for all types of memory.
839 APTR InternalAllocPooled(APTR poolHeader, IPTR memSize, ULONG flags, struct TraceLocation *loc, struct ExecBase *SysBase)
841 struct ProtectedPool *pool = poolHeader + MEMHEADER_TOTAL;
842 APTR ret = NULL;
843 IPTR origSize;
844 struct MemHeader *mh;
846 D(bug("[exec] InternalAllocPooled(0x%p, %u, 0x%08X), header 0x%p\n", poolHeader, memSize, flags, pool));
849 * Memory blocks allocated from the pool store pointers to the MemHeader they were
850 * allocated from. This is done in order to avoid slow lookups in InternalFreePooled().
851 * This is done in AllocVec()-alike manner; the pointer is placed right before the block.
853 memSize += sizeof(struct MemHeader *);
854 origSize = memSize;
856 /* If mungwall is enabled, count also size of walls */
857 if (PrivExecBase(SysBase)->IntFlags & EXECF_MungWall)
858 memSize += MUNGWALL_TOTAL_SIZE;
860 if (pool->pool.Requirements & MEMF_SEM_PROTECTED)
862 ObtainSemaphore(&pool->sem);
865 /* Follow the list of MemHeaders */
866 mh = (struct MemHeader *)pool->pool.PuddleList.mlh_Head;
867 for(;;)
869 ULONG physFlags = flags & MEMF_PHYSICAL_MASK;
871 /* Are there no more MemHeaders? */
872 if (mh->mh_Node.ln_Succ == NULL)
875 * Get a new one.
876 * Usually we allocate puddles of default size, specified during
877 * pool creation. However we can be asked to allocate block whose
878 * size will be larger than default puddle size.
879 * Previously this was handled by threshSize parameter. In our new
880 * implementation we just allocate enlarged puddle. This is done
881 * in order not to waste page tails beyond the allocated large block.
882 * These tails will be used for our pool too. Their size is smaller
883 * than page size but they still perfectly fit for small allocations
884 * (the primary use for pools).
885 * Since our large block is also a puddle, it will be reused for our
886 * pool when the block is freed. It can also be reused for another
887 * large allocation, if it fits in.
888 * Our final puddle size still includes MEMHEADER_TOTAL +
889 * allocator ctx size in any case.
891 IPTR puddleSize = pool->pool.PuddleSize;
893 if (memSize > puddleSize - (MEMHEADER_TOTAL + mhac_GetCtxSize()))
895 IPTR align = PrivExecBase(SysBase)->PageSize - 1;
897 puddleSize = memSize + MEMHEADER_TOTAL + mhac_GetCtxSize();
898 /* Align the size up to page boundary */
899 puddleSize = (puddleSize + align) & ~align;
902 mh = AllocMemHeader(puddleSize, flags, loc, SysBase);
903 D(bug("[InternalAllocPooled] Allocated new puddle 0x%p, size %u\n", mh, puddleSize));
905 /* No memory left? */
906 if (mh == NULL)
907 break;
909 /* Add the new puddle to our pool */
910 mhac_PoolMemHeaderSetup(mh, pool);
911 Enqueue((struct List *)&pool->pool.PuddleList, &mh->mh_Node);
913 /* Fall through to get the memory */
915 else
917 /* Ignore existing MemHeaders with free memory smaller than allocation */
918 if (mh->mh_Free < memSize)
920 mh = (struct MemHeader *)mh->mh_Node.ln_Succ;
921 continue;
925 /* Ignore existing MemHeaders with memory type that differ from the requested ones */
926 if (physFlags & ~mh->mh_Attributes)
928 D(bug("[InternalAllocPooled] Wrong flags for puddle 0x%p (wanted 0x%08X, have 0x%08X\n", flags, mh->mh_Attributes));
930 mh = (struct MemHeader *)mh->mh_Node.ln_Succ;
931 continue;
935 /* Try to get the memory */
936 ret = stdAlloc(mh, mhac_PoolMemHeaderGetCtx(mh), memSize, flags, loc, SysBase);
937 D(bug("[InternalAllocPooled] Allocated memory at 0x%p from puddle 0x%p\n", ret, mh));
939 /* Got it? */
940 if (ret != NULL)
943 * If this is not the first MemHeader and it has some free space,
944 * move it forward (so that the next allocation will attempt to use it first).
945 * IMPORTANT: We use modification of Enqueue() because we still sort MemHeaders
946 * according to their priority (which they inherit from system MemHeaders).
947 * This allows us to have mixed pools (e.g. with both CHIP and FAST regions). This
948 * will be needed in future for memory protection.
950 if (mh->mh_Node.ln_Pred != NULL && mh->mh_Free > 32)
952 D(bug("[InternalAllocPooled] Re-sorting puddle list\n"));
953 Remove(&mh->mh_Node);
954 EnqueueMemHeader(&pool->pool.PuddleList, mh);
957 break;
960 /* No. Try next MemHeader */
961 mh = (struct MemHeader *)mh->mh_Node.ln_Succ;
964 if (pool->pool.Requirements & MEMF_SEM_PROTECTED)
966 ReleaseSemaphore(&pool->sem);
969 if (ret)
971 /* Build munge walls if requested */
972 ret = MungWall_Build(ret, pool, origSize, flags, loc, SysBase);
974 /* Remember where we were allocated from */
975 *((struct MemHeader **)ret) = mh;
976 ret += sizeof(struct MemHeader *);
979 /* Everything fine */
980 return ret;
984 * This is a pair to InternalAllocPooled()
985 * This code separated from FreePooled() in order to provide compatibility with various
986 * memory tracking patches. If some exec code calls InternalAllocPooled() directly
987 * (AllocMem() will do it), it has to call also InternalFreePooled() directly.
988 * Our chunks remember from which pool they came, so we don't need a pointer to pool
989 * header here. This will save us from headaches in future FreeMem() implementation.
991 void InternalFreePooled(APTR memory, IPTR memSize, struct TraceLocation *loc, struct ExecBase *SysBase)
993 struct MemHeader *mh;
994 APTR freeStart;
995 IPTR freeSize;
997 D(bug("[exec] InternalFreePooled(0x%p, %u)\n", memory, memSize));
999 if (!memory || !memSize) return;
1001 /* Get MemHeader pointer. It is stored right before our block. */
1002 freeStart = memory - sizeof(struct MemHeader *);
1003 freeSize = memSize + sizeof(struct MemHeader *);
1004 mh = *((struct MemHeader **)freeStart);
1006 /* Check walls first */
1007 freeStart = MungWall_Check(freeStart, freeSize, loc, SysBase);
1008 if (PrivExecBase(SysBase)->IntFlags & EXECF_MungWall)
1009 freeSize += MUNGWALL_TOTAL_SIZE;
1011 /* Verify that MemHeader pointer is correct */
1012 if ((mh->mh_Node.ln_Type != NT_MEMORY) ||
1013 (freeStart < mh->mh_Lower) || (freeStart + freeSize > mh->mh_Upper))
1016 * Something is wrong.
1017 * TODO: the following should actually be printed as part of the alert.
1018 * In future there should be some kind of "alert context". CPU alerts
1019 * (like illegal access) should remember CPU context there. Memory manager
1020 * alerts (like this one) should remember some own information.
1022 bug("[MM] Pool manager error\n");
1023 bug("[MM] Attempt to free %u bytes at 0x%p\n", memSize, memory);
1024 bug("[MM] The chunk does not belong to a pool\n");
1026 Alert(AN_BadFreeAddr);
1028 else
1030 struct ProtectedPool *pool = (struct ProtectedPool *)mhac_PoolMemHeaderGetPool(mh);
1031 IPTR size;
1033 if (pool->pool.Requirements & MEMF_SEM_PROTECTED)
1035 ObtainSemaphore(&pool->sem);
1038 size = mh->mh_Upper - mh->mh_Lower;
1039 D(bug("[FreePooled] Allocated from puddle 0x%p, size %u\n", mh, size));
1041 /* Free the memory. */
1042 stdDealloc(mh, mhac_PoolMemHeaderGetCtx(mh), freeStart, freeSize, loc, SysBase);
1043 D(bug("[FreePooled] Deallocated chunk, %u free bytes in the puddle\n", mh->mh_Free));
1045 /* Is this MemHeader completely free now? */
1046 if (mh->mh_Free == size)
1048 D(bug("[FreePooled] Puddle is empty, giving back to the system\n"));
1050 /* Yes. Remove it from the list. */
1051 Remove(&mh->mh_Node);
1052 /* And free it. */
1053 FreeMemHeader(mh, loc, SysBase);
1055 /* All done. */
1057 if (pool->pool.Requirements & MEMF_SEM_PROTECTED)
1059 ReleaseSemaphore(&pool->sem);
1064 ULONG checkMemHandlers(struct checkMemHandlersState *cmhs, struct ExecBase *SysBase)
1066 struct Node *tmp;
1067 struct Interrupt *lmh;
1069 if (cmhs->cmhs_Data.memh_RequestFlags & MEMF_NO_EXPUNGE)
1070 return MEM_DID_NOTHING;
1072 /* In order to keep things clean, we must run in a single thread */
1073 ObtainSemaphore(&PrivExecBase(SysBase)->LowMemSem);
1076 * Loop over low memory handlers. Handlers can remove
1077 * themselves from the list while being invoked, thus
1078 * we need to be careful!
1080 for (lmh = (struct Interrupt *)cmhs->cmhs_CurNode;
1081 (tmp = lmh->is_Node.ln_Succ);
1082 lmh = (struct Interrupt *)(cmhs->cmhs_CurNode = tmp))
1084 ULONG ret;
1086 ret = AROS_UFC3 (LONG, lmh->is_Code,
1087 AROS_UFCA(struct MemHandlerData *, &cmhs->cmhs_Data, A0),
1088 AROS_UFCA(APTR, lmh->is_Data, A1),
1089 AROS_UFCA(struct ExecBase *, SysBase, A6)
1092 if (ret == MEM_TRY_AGAIN)
1094 /* MemHandler said he did something. Try again. */
1095 /* Is there any program that depends on this flag??? */
1096 cmhs->cmhs_Data.memh_Flags |= MEMHF_RECYCLE;
1098 ReleaseSemaphore(&PrivExecBase(SysBase)->LowMemSem);
1099 return MEM_TRY_AGAIN;
1101 else
1103 /* Nothing more to expect from this handler. */
1104 cmhs->cmhs_Data.memh_Flags &= ~MEMHF_RECYCLE;
1108 ReleaseSemaphore(&PrivExecBase(SysBase)->LowMemSem);
1109 return MEM_DID_NOTHING;