Add option to let genmf use a temp file when generating the target file, and use...
[AROS.git] / rom / exec / memory.c
blob430c7edf3a99e17da9859d00ef686d4db2d66f64
1 /*
2 Copyright © 1995-2017, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <aros/debug.h>
7 #include <exec/rawfmt.h>
8 #include <exec/memheaderext.h>
10 #include "exec_intern.h"
11 #include "exec_util.h"
12 #include "etask.h"
13 #include "memory.h"
14 #include "mungwall.h"
16 #define DMH(x)
19 * Find MemHeader to which address belongs.
20 * This function is legal to be called in supervisor mode (we use TypeOfMem()
21 * in order to validate addresses in tons of places). So, here are checks.
23 struct MemHeader *FindMem(APTR address, struct ExecBase *SysBase)
25 int usermode = (KernelBase != NULL) && (KrnIsSuper() == 0);
26 struct MemHeader *mh;
28 /* Nobody should change the memory list now. */
29 if (usermode) MEM_LOCK_SHARED;
31 /* Follow the list of MemHeaders */
32 mh = (struct MemHeader *)SysBase->MemList.lh_Head;
34 while (mh->mh_Node.ln_Succ != NULL)
36 if (IsManagedMem(mh))
38 struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh;
40 if (mhe->mhe_InBounds)
42 if (mhe->mhe_InBounds(mhe, address, address))
44 if (usermode) MEM_UNLOCK;
45 return mh;
49 else
51 /* Check if this MemHeader fits */
52 if (address >= mh->mh_Lower && address < mh->mh_Upper)
54 /* Yes. Return it. */
55 if (usermode) MEM_UNLOCK;
56 return mh;
59 /* Go to next MemHeader */
60 mh = (struct MemHeader *)mh->mh_Node.ln_Succ;
63 if (usermode) MEM_UNLOCK;
64 return NULL;
67 char *FormatMMContext(char *buffer, struct MMContext *ctx, struct ExecBase *SysBase)
69 if (ctx->addr)
70 buffer = NewRawDoFmt("In %s, block at 0x%p, size %lu", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->func, ctx->addr, ctx->size) - 1;
71 else
72 buffer = NewRawDoFmt("In %s, size %lu", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->func, ctx->size) - 1;
74 if (ctx->mc)
76 buffer = NewRawDoFmt("\nCorrupted MemChunk 0x%p (next 0x%p, size %lu)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mc, ctx->mc->mc_Next, ctx->mc->mc_Bytes) - 1;
78 if (ctx->mcPrev)
79 buffer = NewRawDoFmt("\nPrevious MemChunk 0x%p (next 0x%p, size %lu)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mcPrev, ctx->mcPrev->mc_Next, ctx->mcPrev->mc_Bytes) - 1;
82 /* Print MemHeader details */
83 buffer = NewRawDoFmt("\nMemHeader 0x%p (0x%p - 0x%p)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mh, ctx->mh->mh_Lower, ctx->mh->mh_Upper) - 1;
84 if ((IPTR)ctx->mh->mh_First & (MEMCHUNK_TOTAL - 1))
85 buffer = NewRawDoFmt("\n- Unaligned first chunk address (0x%p)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mh->mh_First) - 1;
87 if (ctx->mh->mh_Free & (MEMCHUNK_TOTAL - 1))
88 buffer = NewRawDoFmt("\n- Unaligned free space count (0x%p)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mh->mh_Free) - 1;
90 if (ctx->mh->mh_First)
92 if ((APTR)ctx->mh->mh_First < ctx->mh->mh_Lower)
93 buffer = NewRawDoFmt("\n- First chunk (0x%p) below lower address", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mh->mh_First) - 1;
95 if (((APTR)ctx->mh->mh_First + ctx->mh->mh_Free > ctx->mh->mh_Upper))
96 buffer = NewRawDoFmt("\n- Free space count too large (%lu, first chunk 0x%xp)", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->mh->mh_Free, ctx->mh->mh_First) - 1;
99 return buffer;
102 /* #define NO_ALLOCATOR_CONTEXT */
104 #ifdef NO_ALLOCATOR_CONTEXT
106 struct MemHeaderAllocatorCtx * mhac_GetSysCtx(struct MemHeader * mh, struct ExecBase * SysBase)
108 return NULL;
111 void mhac_PoolMemHeaderSetup(struct MemHeader * mh, struct ProtectedPool * pool)
113 mh->mh_Node.ln_Name = (STRPTR)pool;
116 ULONG mhac_GetCtxSize()
118 return 0;
121 void mhac_ClearSysCtx(struct MemHeader * mh, struct ExecBase * SysBase)
125 #define mhac_MemChunkClaimed(a, b)
126 #define mhac_IsIndexEmpty(a) (TRUE)
127 #define mhac_ClearIndex(a)
128 #define mhac_MemChunkCreated(a, b, c) { (void)b; }
129 #define mhac_GetBetterPrevMemChunk(a, b, c) (a)
130 #define mhac_GetCloserPrevMemChunk(a, b, c) (a)
131 #define mhac_PoolMemHeaderGetCtx(a) (NULL)
132 #define mhac_PoolMemHeaderGetPool(a) (a->mh_Node.ln_Name)
134 #else
135 /* Allocator optimization support */
138 * The array contains pointers to chunk previous to first chunk of at least size N
140 * N = 1 << (FIRSTPOTBIT + (i * POTSTEP)), where i is index in array
141 * first is defined as MemChunk with lowest address
143 * Each chunk in array locates the place where search should start, not necesarly
144 * where allocation should happen.
146 * If chunk is taken from MemHeader and is present in the index, it must be removed
147 * from index.
149 * If chunk is returned to MemHeader it may be registered with index.
152 #define FIRSTPOTBIT (5)
153 #define FIRSTPOT (1 << FIRSTPOTBIT)
154 #define POTSTEP (1) /* Distance between each level */
155 #define ALLOCATORCTXINDEXSIZE (10) /* Number of levels in index */
157 struct MemHeaderAllocatorCtx
159 struct Node mhac_Node;
160 struct MemHeader *mhac_MemHeader;
161 APTR mhac_Data1;
163 ULONG mhac_IndexSize;
164 struct MemChunk *mhac_PrevChunks[ALLOCATORCTXINDEXSIZE];
167 ULONG mhac_GetCtxSize()
169 return (AROS_ROUNDUP2(sizeof(struct MemHeaderAllocatorCtx), MEMCHUNK_TOTAL));
172 static BOOL mhac_IsIndexEmpty(struct MemHeaderAllocatorCtx * mhac)
174 LONG i;
175 if (!mhac)
176 return TRUE;
178 for (i = 0; i < mhac->mhac_IndexSize; i++)
179 if (mhac->mhac_PrevChunks[i] != NULL)
180 return FALSE;
182 return TRUE;
185 static void mhac_ClearIndex(struct MemHeaderAllocatorCtx * mhac)
187 LONG i;
189 if (!mhac)
190 return;
192 for (i = 0; i < ALLOCATORCTXINDEXSIZE; i++)
193 mhac->mhac_PrevChunks[i] = NULL;
196 static void mhac_SetupMemHeaderAllocatorCtx(struct MemHeader * mh, ULONG maxindexsize,
197 struct MemHeaderAllocatorCtx * mhac)
199 /* Adjust index size to space in MemHeader */
200 IPTR size = (IPTR)mh->mh_Upper - (IPTR)mh->mh_Lower;
201 LONG indexsize = 0;
203 size = size >> FIRSTPOTBIT;
204 size = size >> POTSTEP;
206 for (; size > 0; size = size >> POTSTEP) indexsize++;
208 if (indexsize < 0) indexsize = 0;
209 if (indexsize > maxindexsize) indexsize = maxindexsize;
210 if (indexsize > ALLOCATORCTXINDEXSIZE) indexsize = ALLOCATORCTXINDEXSIZE;
212 mhac->mhac_MemHeader = mh;
213 mhac->mhac_IndexSize = indexsize;
214 mhac_ClearIndex(mhac);
217 void mhac_ClearSysCtx(struct MemHeader * mh, struct ExecBase * SysBase)
219 struct MemHeaderAllocatorCtx * mhac = NULL;
221 ForeachNode(&PrivExecBase(SysBase)->AllocatorCtxList, mhac)
223 if (mhac->mhac_MemHeader == mh)
225 mhac_ClearIndex(mhac);
226 break;
231 struct MemHeaderAllocatorCtx * mhac_GetSysCtx(struct MemHeader * mh, struct ExecBase * SysBase)
233 struct MemHeaderAllocatorCtx * mhac = NULL;
235 ForeachNode(&PrivExecBase(SysBase)->AllocatorCtxList, mhac)
237 if (mhac->mhac_MemHeader == mh)
238 return mhac;
241 /* New context is needed */
242 mhac = Allocate(mh, sizeof(struct MemHeaderAllocatorCtx));
243 mhac_SetupMemHeaderAllocatorCtx(mh, ALLOCATORCTXINDEXSIZE, mhac);
244 AddTail(&PrivExecBase(SysBase)->AllocatorCtxList, (struct Node *)mhac);
246 return mhac;
249 static void mhac_MemChunkClaimed(struct MemChunk * mc, struct MemHeaderAllocatorCtx * mhac)
251 LONG i;
253 if (!mhac)
254 return;
256 for (i = 0; i < mhac->mhac_IndexSize; i++)
258 if (mhac->mhac_PrevChunks[i] != NULL &&
259 (mhac->mhac_PrevChunks[i] == mc || mhac->mhac_PrevChunks[i]->mc_Next == mc))
261 mhac->mhac_PrevChunks[i] = NULL;
266 static LONG mhac_CalcIndex(LONG size, ULONG indexsize)
268 LONG r = 0;
269 size >>= FIRSTPOTBIT;
270 while (size >>= 1)
271 r++;
273 if (r > indexsize - 1) r = indexsize - 1;
275 return r;
278 static void mhac_MemChunkCreated(struct MemChunk * mc, struct MemChunk *mcprev, struct MemHeaderAllocatorCtx * mhac)
280 LONG i, v = FIRSTPOT;
282 if (mc->mc_Bytes < FIRSTPOT) /* Allocation too small for index */
283 return;
285 if (!mhac)
286 return;
288 for (i = 0; i < mhac->mhac_IndexSize; i++, v = v << POTSTEP)
290 if (mc->mc_Bytes < v)
291 break; /* Chunk smaller than index at i. Stop */
293 /* If no chunk in index or given passed chunk has lower address than chunk in index */
294 if (mhac->mhac_PrevChunks[i] == NULL ||
295 (mhac->mhac_PrevChunks[i] != NULL && mhac->mhac_PrevChunks[i]->mc_Next > mc))
297 mhac->mhac_PrevChunks[i] = mcprev;
302 /* General idea:
303 * Function returned pointer to chunk that is prev to chunk that will allow
304 * to locate faster chunk big enough for allocation. Function never returns NULL.
305 * Current implementation:
306 * Function returns pointer to chunk that is prev to first biggest chunk,
307 * not bigger than requested size
309 static struct MemChunk * mhac_GetBetterPrevMemChunk(struct MemChunk * prev, IPTR size, struct MemHeaderAllocatorCtx * mhac)
311 struct MemChunk * _return = prev;
313 if (size < FIRSTPOT)
314 return _return; /* Allocation too small for index */
316 if (mhac)
318 LONG i;
319 LONG ii = mhac_CalcIndex(size, mhac->mhac_IndexSize);
321 if (mhac->mhac_PrevChunks[ii] != NULL)
322 _return = mhac->mhac_PrevChunks[ii];
323 else
325 for (i = ii - 1; i >= 0; i--)
327 if (mhac->mhac_PrevChunks[i] != NULL)
329 _return = mhac->mhac_PrevChunks[i];
330 break;
336 return _return;
339 static struct MemChunk * mhac_GetCloserPrevMemChunk(struct MemChunk * prev, APTR addr, struct MemHeaderAllocatorCtx * mhac)
341 struct MemChunk * _return = prev;
343 if (mhac)
345 LONG i;
347 for (i = 0; i < mhac->mhac_IndexSize; i++)
349 if (mhac->mhac_PrevChunks[i] != NULL &&
350 (APTR)mhac->mhac_PrevChunks[i]->mc_Next < addr &&
351 mhac->mhac_PrevChunks[i]->mc_Next > _return->mc_Next)
353 _return = mhac->mhac_PrevChunks[i];
358 return _return;
362 * Enhace MemHeader that is part of pool with MemHeaderAllocatorContext
364 void mhac_PoolMemHeaderSetup(struct MemHeader * mh, struct ProtectedPool * pool)
366 struct MemHeaderAllocatorCtx * mhac = Allocate(mh, sizeof(struct MemHeaderAllocatorCtx));
368 mhac_SetupMemHeaderAllocatorCtx(mh, 5, mhac);
370 mhac->mhac_Data1 = pool;
371 mh->mh_Node.ln_Name = (STRPTR)mhac;
374 #define mhac_PoolMemHeaderGetCtx(a) ((struct MemHeaderAllocatorCtx *)(a->mh_Node.ln_Name))
375 #define mhac_PoolMemHeaderGetPool(a) (mhac_PoolMemHeaderGetCtx(a)->mhac_Data1)
377 #endif
380 #ifdef NO_CONSISTENCY_CHECKS
382 #define validateHeader(mh, op, addr, size, SysBase) TRUE
383 #define validateChunk(mc, prev, mh, op, addr, size, SysBase) TRUE
385 #else
387 static ULONG memAlerts[] =
389 AT_DeadEnd|AN_MemoryInsane, /* MM_ALLOC */
390 AT_DeadEnd|AN_MemCorrupt, /* MM_FREE */
391 AN_FreeTwice /* MM_OVERLAP */
395 * MemHeader validation routine. Rules are:
397 * 1. Both mh_First and mh_Free must be MEMCHUNK_TOTAL-aligned.
398 * 2. Free space (if present) must completely fit in between mh_Lower and mh_Upper.
399 * We intentionally don't check header's own location. We assume that in future we'll
400 * be able to put MEMF_CHIP headers inside MEMF_FAST memory, for speed up.
402 static BOOL validateHeader(struct MemHeader *mh, UBYTE op, APTR addr, IPTR size, struct TraceLocation *tp, struct ExecBase *SysBase)
404 if (((IPTR)mh->mh_First & (MEMCHUNK_TOTAL - 1)) || (mh->mh_Free & (MEMCHUNK_TOTAL - 1)) || /* 1 */
405 (mh->mh_First &&
406 (((APTR)mh->mh_First < mh->mh_Lower) || ((APTR)mh->mh_First + mh->mh_Free > mh->mh_Upper)))) /* 2 */
408 if (tp)
410 /* TraceLocation is not supplied by PrepareExecBase(). Fail silently. */
411 struct MMContext alertData;
413 alertData.mh = mh;
414 alertData.mc = NULL;
415 alertData.mcPrev = NULL;
416 alertData.func = tp->function;
417 alertData.addr = addr;
418 alertData.size = size;
419 alertData.op = op;
421 Exec_ExtAlert(memAlerts[op], tp->caller, tp->stack, AT_MEMORY, &alertData, SysBase);
425 * Theoretically during very early boot we can fail to post an alert (no KernelBase yet).
426 * In this case we return with fault indication.
428 return FALSE;
430 return TRUE;
434 * MemChunk consistency check. Rules are:
436 * 1. Both mc_Next and mc_Bytes must me MEMCHUNK_TOTAL-aligned, and mc_Bytes can not be zero.
437 * 2. End of this chunk must not be greater than mh->mh_Upper
438 * 3. mc_Next (if present) must point in between end of this chunk and mh->mh_Upper - MEMCHUNK_TOTAL.
439 * There must be at least MEMHCUNK_TOTAL allocated bytes between free chunks.
441 * This function is inlined for speed improvements.
443 static inline BOOL validateChunk(struct MemChunk *p2, struct MemChunk *p1, struct MemHeader *mh,
444 UBYTE op, APTR addr, IPTR size,
445 struct TraceLocation *tp, struct ExecBase *SysBase)
447 if (((IPTR)p2->mc_Next & (MEMCHUNK_TOTAL-1)) || (p2->mc_Bytes == 0) || (p2->mc_Bytes & (MEMCHUNK_TOTAL-1)) || /* 1 */
448 ((APTR)p2 + p2->mc_Bytes > mh->mh_Upper) || /* 2 */
449 (p2->mc_Next && (((APTR)p2->mc_Next < (APTR)p2 + p2->mc_Bytes + MEMCHUNK_TOTAL) || /* 3 */
450 ((APTR)p2->mc_Next > mh->mh_Upper - MEMCHUNK_TOTAL))))
452 if (tp)
454 struct MMContext alertData;
456 alertData.mh = mh;
457 alertData.mc = p2;
458 alertData.mcPrev = (p1 == (struct MemChunk *)&mh->mh_First) ? NULL : p1;
459 alertData.func = tp->function;
460 alertData.addr = addr;
461 alertData.size = size;
462 alertData.op = op;
464 Exec_ExtAlert(memAlerts[op], tp->caller, tp->stack, AT_MEMORY, &alertData, SysBase);
466 return FALSE;
469 return TRUE;
472 #endif
475 * Allocate block from the given MemHeader in a specific way.
476 * This routine can be called with SysBase = NULL.
477 * MemHeaderAllocatorCtx
478 * This parameter is optional, allocation needs to work without it as well.
479 * However if it was passed once for a given MemHeader it needs to be passed
480 * in all consecutive calls.
482 APTR stdAlloc(struct MemHeader *mh, struct MemHeaderAllocatorCtx *mhac, IPTR size,
483 ULONG requirements, struct TraceLocation *tp, struct ExecBase *SysBase)
486 * The check has to be done for the second time. Exec uses stdAlloc on memheader
487 * passed upon startup. This is bad, very bad. So here a temporary hack :)
489 if ((mh->mh_Node.ln_Type == NT_MEMORY) && IsManagedMem(mh))
491 struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh;
493 if (mhe->mhe_Alloc)
495 return mhe->mhe_Alloc(mhe, size, &requirements);
497 else
498 return NULL;
500 else
502 /* First round byteSize up to a multiple of MEMCHUNK_TOTAL */
503 IPTR byteSize = AROS_ROUNDUP2(size, MEMCHUNK_TOTAL);
504 struct MemChunk *mc=NULL, *p1, *p2;
506 /* Validate MemHeader before doing anything. */
507 if (!validateHeader(mh, MM_ALLOC, NULL, size, tp, SysBase))
508 return NULL;
510 /* Validate if there is even enough total free memory */
511 if (mh->mh_Free < byteSize)
512 return NULL;
516 * The free memory list is only single linked, i.e. to remove
517 * elements from the list I need the node's predecessor. For the
518 * first element I can use mh->mh_First instead of a real predecessor.
520 p1 = mhac_GetBetterPrevMemChunk((struct MemChunk *)&mh->mh_First, size, mhac);
521 p2 = p1->mc_Next;
524 * Follow the memory list. p1 is the previous MemChunk, p2 is the current one.
525 * On 1st pass p1 points to mh->mh_First, so that changing p1->mc_Next actually
526 * changes mh->mh_First.
528 while (p2 != NULL)
530 /* Validate the current chunk */
531 if (!validateChunk(p2, p1, mh, MM_ALLOC, NULL, size, tp, SysBase))
532 return NULL;
534 /* Check if the current block is large enough */
535 if (p2->mc_Bytes>=byteSize)
537 /* It is. */
538 mc = p1;
540 /* Use this one if MEMF_REVERSE is not set.*/
541 if (!(requirements & MEMF_REVERSE))
542 break;
543 /* Else continue - there may be more to come. */
546 /* Go to next block */
547 p1 = p2;
548 p2 = p1->mc_Next;
551 /* Something found? */
552 if (mc != NULL)
554 /* Remember: if MEMF_REVERSE is set p1 and p2 are now invalid. */
555 p1 = mc;
556 p2 = p1->mc_Next;
558 mhac_MemChunkClaimed(p2, mhac);
560 /* Remove the block from the list and return it. */
561 if (p2->mc_Bytes == byteSize)
563 /* Fits exactly. Just relink the list. */
564 p1->mc_Next = p2->mc_Next;
565 mc = p2;
567 else
569 struct MemChunk * pp = p1;
571 if (requirements & MEMF_REVERSE)
573 /* Return the last bytes. */
574 p1->mc_Next=p2;
575 mc = (struct MemChunk *)((UBYTE *)p2+p2->mc_Bytes-byteSize);
577 else
579 /* Return the first bytes. */
580 p1->mc_Next=(struct MemChunk *)((UBYTE *)p2+byteSize);
581 mc=p2;
584 p1 = p1->mc_Next;
585 p1->mc_Next = p2->mc_Next;
586 p1->mc_Bytes = p2->mc_Bytes-byteSize;
588 mhac_MemChunkCreated(p1, pp, mhac);
591 mh->mh_Free -= byteSize;
593 /* Clear the block if requested */
594 if (requirements & MEMF_CLEAR)
595 memset(mc, 0, byteSize);
597 else
599 if (!mhac_IsIndexEmpty(mhac))
602 * Since chunks created during deallocation are not returned to index,
603 * retry with cleared index.
605 mhac_ClearIndex(mhac);
606 mc = stdAlloc(mh, mhac, size, requirements, tp, SysBase);
610 return mc;
615 * Free 'byteSize' bytes starting at 'memoryBlock' belonging to MemHeader 'freeList'
616 * MemHeaderAllocatorCtx
617 * See stdAlloc
619 void stdDealloc(struct MemHeader *freeList, struct MemHeaderAllocatorCtx *mhac, APTR addr, IPTR size, struct TraceLocation *tp, struct ExecBase *SysBase)
621 APTR memoryBlock;
622 IPTR byteSize;
623 struct MemChunk *p1, *p2, *p3;
624 UBYTE *p4;
626 if ((freeList->mh_Node.ln_Type == NT_MEMORY) && IsManagedMem(freeList))
628 struct MemHeaderExt *mhe = (struct MemHeaderExt *)freeList;
630 if (mhe->mhe_Free)
631 mhe->mhe_Free(mhe, addr, size);
633 else
635 /* Make sure the MemHeader is OK */
636 if (!validateHeader(freeList, MM_FREE, addr, size, tp, SysBase))
637 return;
639 /* Align size to the requirements */
640 byteSize = size + ((IPTR)addr & (MEMCHUNK_TOTAL - 1));
641 byteSize = (byteSize + MEMCHUNK_TOTAL-1) & ~(MEMCHUNK_TOTAL - 1);
643 /* Align the block as well */
644 memoryBlock = (APTR)((IPTR)addr & ~(MEMCHUNK_TOTAL-1));
647 The free memory list is only single linked, i.e. to insert
648 elements into the list I need the node as well as its
649 predecessor. For the first element I can use freeList->mh_First
650 instead of a real predecessor.
652 p1 = (struct MemChunk *)&freeList->mh_First;
653 p2 = freeList->mh_First;
655 /* Start and end(+1) of the block */
656 p3 = (struct MemChunk *)memoryBlock;
657 p4 = (UBYTE *)p3 + byteSize;
659 /* No chunk in list? Just insert the current one and return. */
660 if (p2 == NULL)
662 p3->mc_Bytes = byteSize;
663 p3->mc_Next = NULL;
664 p1->mc_Next = p3;
665 freeList->mh_Free += byteSize;
666 return;
669 /* Find closer chunk */
670 p1=mhac_GetCloserPrevMemChunk(p1, addr, mhac);
671 p2=p1->mc_Next;
673 /* Follow the list to find a place where to insert our memory. */
676 if (!validateChunk(p2, p1, freeList, MM_FREE, addr, size, tp, SysBase))
677 return;
679 /* Found a block with a higher address? */
680 if (p2 >= p3)
682 #if !defined(NO_CONSISTENCY_CHECKS)
684 If the memory to be freed overlaps with the current
685 block something must be wrong.
687 if (p4>(UBYTE *)p2)
689 bug("[MM] Chunk allocator error\n");
690 bug("[MM] Attempt to free %u bytes at 0x%p from MemHeader 0x%p\n", byteSize, memoryBlock, freeList);
691 bug("[MM] Block overlaps (1) with chunk 0x%p (%u bytes)\n", p2, p2->mc_Bytes);
693 Alert(AN_FreeTwice);
694 return;
696 #endif
697 /* End the loop with p2 non-zero */
698 break;
700 /* goto next block */
701 p1 = p2;
702 p2 = p2->mc_Next;
704 /* If the loop ends with p2 zero add it at the end. */
705 } while (p2 != NULL);
707 /* If there was a previous block merge with it. */
708 if (p1 != (struct MemChunk *)&freeList->mh_First)
710 #if !defined(NO_CONSISTENCY_CHECKS)
711 /* Check if they overlap. */
712 if ((UBYTE *)p1 + p1->mc_Bytes > (UBYTE *)p3)
714 bug("[MM] Chunk allocator error\n");
715 bug("[MM] Attempt to free %u bytes at 0x%p from MemHeader 0x%p\n", byteSize, memoryBlock, freeList);
716 bug("[MM] Block overlaps (2) with chunk 0x%p (%u bytes)\n", p1, p1->mc_Bytes);
718 Alert(AN_FreeTwice);
719 return;
721 #endif
722 /* Merge if possible */
723 if ((UBYTE *)p1 + p1->mc_Bytes == (UBYTE *)p3)
725 mhac_MemChunkClaimed(p1, mhac);
726 p3 = p1;
728 * Note: this case does not lead to mhac_MemChunkCreated, because
729 * we don't have chunk prev to p1
732 else
733 /* Not possible to merge */
734 p1->mc_Next = p3;
735 }else
737 There was no previous block. Just insert the memory at
738 the start of the list.
740 p1->mc_Next = p3;
742 /* Try to merge with next block (if there is one ;-) ). */
743 if (p4 == (UBYTE *)p2 && p2 != NULL)
746 Overlap checking already done. Doing it here after
747 the list potentially changed would be a bad idea.
749 mhac_MemChunkClaimed(p2, mhac);
750 p4 += p2->mc_Bytes;
751 p2 = p2->mc_Next;
753 /* relink the list and return. */
754 p3->mc_Next = p2;
755 p3->mc_Bytes = p4 - (UBYTE *)p3;
756 freeList->mh_Free += byteSize;
757 if (p1->mc_Next==p3) mhac_MemChunkCreated(p3, p1, mhac);
762 * TODO:
763 * During transition period four routines below use nommu allocator.
764 * When transition is complete they should use them only if MMU
765 * is inactive. Otherwise they should use KrnAllocPages()/KrnFreePages().
768 /* Non-mungwalled AllocAbs(). Does not destroy sideways regions. */
769 APTR InternalAllocAbs(APTR location, IPTR byteSize, struct ExecBase *SysBase)
771 return nommu_AllocAbs(location, byteSize, SysBase);
775 * Use this if you want to free region allocated by InternalAllocAbs().
776 * Otherwise you hit mungwall problem (FreeMem() expects header).
778 void InternalFreeMem(APTR location, IPTR byteSize, struct TraceLocation *loc, struct ExecBase *SysBase)
780 nommu_FreeMem(location, byteSize, loc, SysBase);
784 * Allocate a region managed by own header. Usable size is reduced by size
785 * of header.
787 APTR AllocMemHeader(IPTR size, ULONG flags, struct TraceLocation *loc, struct ExecBase *SysBase)
789 struct MemHeader *mh;
791 mh = nommu_AllocMem(size, flags, loc, SysBase);
792 DMH(bug("[AllocMemHeader] Allocated %u bytes at 0x%p\n", size, mh));
794 if (mh)
796 struct MemHeader *orig = FindMem(mh, SysBase);
798 if (IsManagedMem(orig))
800 struct MemHeaderExt *mhe_orig = (struct MemHeaderExt *)orig;
801 struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh;
802 IPTR header_size = (sizeof(struct MemHeaderExt) + 15) & ~15;
804 /* Copy the basic information */
805 mh->mh_Node.ln_Type = NT_MEMORY;
806 mh->mh_Node.ln_Pri = orig->mh_Node.ln_Pri;
807 mh->mh_Attributes = orig->mh_Attributes;
808 mh->mh_Upper = (void *)mh + size;
809 mh->mh_Lower = (void *)mh;
810 mh->mh_First = NULL;
811 mh->mh_Free = 0;
813 mhe->mhe_Magic = mhe_orig->mhe_Magic;
815 /* Copy init functions */
816 mhe->mhe_InitPool = mhe_orig->mhe_InitPool;
817 mhe->mhe_DestroyPool = mhe_orig->mhe_DestroyPool;
819 /* Copy memory allocation functions */
820 mhe->mhe_Alloc = mhe_orig->mhe_Alloc;
821 mhe->mhe_AllocAbs = mhe_orig->mhe_AllocAbs;
822 mhe->mhe_AllocVec = mhe_orig->mhe_AllocVec;
823 mhe->mhe_Avail = mhe_orig->mhe_Avail;
824 mhe->mhe_Free = mhe_orig->mhe_Free;
825 mhe->mhe_FreeVec = mhe_orig->mhe_FreeVec;
826 mhe->mhe_InBounds = mhe_orig->mhe_InBounds;
827 mhe->mhe_ReAlloc = mhe_orig->mhe_ReAlloc;
830 * User data will be initialized. Memory pool will get first region
831 * for free.
833 mhe->mhe_UserData = (APTR)mh + header_size;
835 /* Initialize the pool with rest size */
836 if (mhe->mhe_InitPool)
837 mhe->mhe_InitPool(mhe, size, size - header_size);
839 else
841 size -= MEMHEADER_TOTAL;
844 * Initialize new MemHeader.
845 * Inherit attributes from system MemHeader from which
846 * our chunk was allocated.
848 mh->mh_Node.ln_Type = NT_MEMORY;
849 mh->mh_Node.ln_Pri = orig->mh_Node.ln_Pri;
850 mh->mh_Attributes = orig->mh_Attributes;
851 mh->mh_Lower = (APTR)mh + MEMHEADER_TOTAL;
852 mh->mh_Upper = mh->mh_Lower + size;
853 mh->mh_First = mh->mh_Lower;
854 mh->mh_Free = size;
856 /* Create the first (and the only) MemChunk */
857 mh->mh_First->mc_Next = NULL;
858 mh->mh_First->mc_Bytes = size;
861 return mh;
864 /* Free a region allocated by AllocMemHeader() */
865 void FreeMemHeader(APTR addr, struct TraceLocation *loc, struct ExecBase *SysBase)
867 struct MemHeaderExt *mhe = (struct MemHeaderExt *)addr;
869 IPTR size = (IPTR)mhe->mhe_MemHeader.mh_Upper - (IPTR)addr;
871 if (IsManagedMem(mhe))
873 if (mhe->mhe_DestroyPool)
874 mhe->mhe_DestroyPool(mhe);
877 DMH(bug("[FreeMemHeader] Freeing %u bytes at 0x%p\n", size, addr));
878 nommu_FreeMem(addr, size, loc, SysBase);
882 * This is our own Enqueue() version. Currently the only differece is that
883 * we insert our node before the first node with LOWER OR EQUAL priority,
884 * so that for nodes with equal priority it will be LIFO, not FIFO queue.
885 * This speeds up the allocator.
886 * TODO: implement secondary sorting by mh_Free. This will allow to
887 * implement best-match algorithm (so that puddles with smaller free space
888 * will be picked up first). This way the smallest allocations will reuse
889 * smallest chunks instead of fragmenting large ones.
891 static void EnqueueMemHeader(struct MinList *list, struct MemHeader *mh)
893 struct MemHeader *next;
895 /* Look through the list */
896 ForeachNode (list, next)
899 Look for the first MemHeader with a lower or equal pri as the node
900 we have to insert into the list.
902 if (mh->mh_Node.ln_Pri >= next->mh_Node.ln_Pri)
903 break;
906 /* Insert the node before next */
907 mh->mh_Node.ln_Pred = next->mh_Node.ln_Pred;
908 mh->mh_Node.ln_Succ = &next->mh_Node;
909 next->mh_Node.ln_Pred->ln_Succ = &mh->mh_Node;
910 next->mh_Node.ln_Pred = &mh->mh_Node;
914 * Allocate memory with given physical properties from the given pool.
915 * Our pools can be mixed. This means that different puddles from the
916 * pool can have different physical flags. For example the same pool
917 * can contain puddles from both CHIP and FAST memory. This is done in
918 * order to provide a single system default pool for all types of memory.
920 APTR InternalAllocPooled(APTR poolHeader, IPTR memSize, ULONG flags, struct TraceLocation *loc, struct ExecBase *SysBase)
922 struct ProtectedPool *pool = poolHeader + MEMHEADER_TOTAL;
923 APTR ret = NULL;
924 IPTR origSize;
925 struct MemHeader *mh;
927 D(bug("[exec] InternalAllocPooled(0x%p, %u, 0x%08X), header 0x%p\n", poolHeader, memSize, flags, pool));
930 * Memory blocks allocated from the pool store pointers to the MemHeader they were
931 * allocated from. This is done in order to avoid slow lookups in InternalFreePooled().
932 * This is done in AllocVec()-alike manner; the pointer is placed right before the block.
934 memSize += sizeof(struct MemHeader *);
935 origSize = memSize;
937 /* If mungwall is enabled, count also size of walls */
938 if (PrivExecBase(SysBase)->IntFlags & EXECF_MungWall)
939 memSize += MUNGWALL_TOTAL_SIZE;
941 if (pool->pool.PoolMagic != POOL_MAGIC)
943 PoolManagerAlert(PME_ALLOC_INV_POOL, AT_DeadEnd, memSize, NULL, NULL, poolHeader);
946 if (pool->pool.Requirements & MEMF_SEM_PROTECTED)
948 ObtainSemaphore(&pool->sem);
951 /* Follow the list of MemHeaders */
952 mh = (struct MemHeader *)pool->pool.PuddleList.mlh_Head;
953 for(;;)
955 ULONG physFlags = flags & MEMF_PHYSICAL_MASK;
957 /* Are there no more MemHeaders? */
958 if (mh->mh_Node.ln_Succ == NULL)
961 * Get a new one.
962 * Usually we allocate puddles of default size, specified during
963 * pool creation. However we can be asked to allocate block whose
964 * size will be larger than default puddle size.
965 * Previously this was handled by threshSize parameter. In our new
966 * implementation we just allocate enlarged puddle. This is done
967 * in order not to waste page tails beyond the allocated large block.
968 * These tails will be used for our pool too. Their size is smaller
969 * than page size but they still perfectly fit for small allocations
970 * (the primary use for pools).
971 * Since our large block is also a puddle, it will be reused for our
972 * pool when the block is freed. It can also be reused for another
973 * large allocation, if it fits in.
974 * Our final puddle size still includes MEMHEADER_TOTAL +
975 * allocator ctx size in any case.
977 IPTR puddleSize = pool->pool.PuddleSize;
979 if (memSize > puddleSize - (MEMHEADER_TOTAL + mhac_GetCtxSize()))
981 IPTR align = PrivExecBase(SysBase)->PageSize - 1;
983 puddleSize = memSize + MEMHEADER_TOTAL + mhac_GetCtxSize();
984 /* Align the size up to page boundary */
985 puddleSize = (puddleSize + align) & ~align;
988 mh = AllocMemHeader(puddleSize, flags, loc, SysBase);
989 D(bug("[InternalAllocPooled] Allocated new puddle 0x%p, size %u\n", mh, puddleSize));
991 /* No memory left? */
992 if (mh == NULL)
993 break;
995 /* Add the new puddle to our pool */
996 mhac_PoolMemHeaderSetup(mh, pool);
997 Enqueue((struct List *)&pool->pool.PuddleList, &mh->mh_Node);
999 /* Fall through to get the memory */
1001 else
1003 /* Ignore existing MemHeaders with free memory smaller than allocation */
1004 if (mh->mh_Free < memSize)
1006 mh = (struct MemHeader *)mh->mh_Node.ln_Succ;
1007 continue;
1011 /* Ignore existing MemHeaders with memory type that differ from the requested ones */
1012 if (physFlags & ~mh->mh_Attributes)
1014 D(bug("[InternalAllocPooled] Wrong flags for puddle 0x%p (wanted 0x%08X, have 0x%08X\n", flags, mh->mh_Attributes));
1016 mh = (struct MemHeader *)mh->mh_Node.ln_Succ;
1017 continue;
1021 /* Try to get the memory */
1022 ret = stdAlloc(mh, mhac_PoolMemHeaderGetCtx(mh), memSize, flags, loc, SysBase);
1023 D(bug("[InternalAllocPooled] Allocated memory at 0x%p from puddle 0x%p\n", ret, mh));
1025 /* Got it? */
1026 if (ret != NULL)
1029 * If this is not the first MemHeader and it has some free space,
1030 * move it forward (so that the next allocation will attempt to use it first).
1031 * IMPORTANT: We use modification of Enqueue() because we still sort MemHeaders
1032 * according to their priority (which they inherit from system MemHeaders).
1033 * This allows us to have mixed pools (e.g. with both CHIP and FAST regions). This
1034 * will be needed in future for memory protection.
1036 if (mh->mh_Node.ln_Pred != NULL && mh->mh_Free > 32)
1038 D(bug("[InternalAllocPooled] Re-sorting puddle list\n"));
1039 Remove(&mh->mh_Node);
1040 EnqueueMemHeader(&pool->pool.PuddleList, mh);
1043 break;
1046 /* No. Try next MemHeader */
1047 mh = (struct MemHeader *)mh->mh_Node.ln_Succ;
1050 if (pool->pool.Requirements & MEMF_SEM_PROTECTED)
1052 ReleaseSemaphore(&pool->sem);
1055 if (ret)
1057 /* Build munge walls if requested */
1058 ret = MungWall_Build(ret, pool, origSize, flags, loc, SysBase);
1060 /* Remember where we were allocated from */
1061 *((struct MemHeader **)ret) = mh;
1062 ret += sizeof(struct MemHeader *);
1065 /* Everything fine */
1066 return ret;
1070 * This is a pair to InternalAllocPooled()
1071 * This code separated from FreePooled() in order to provide compatibility with various
1072 * memory tracking patches. If some exec code calls InternalAllocPooled() directly
1073 * (AllocMem() will do it), it has to call also InternalFreePooled() directly.
1074 * Our chunks remember from which pool they came, so we don't need a pointer to pool
1075 * header here. This will save us from headaches in future FreeMem() implementation.
1077 void InternalFreePooled(APTR poolHeader, APTR memory, IPTR memSize, struct TraceLocation *loc, struct ExecBase *SysBase)
1079 struct MemHeader *mh;
1080 APTR freeStart;
1081 IPTR freeSize;
1083 D(bug("[exec] InternalFreePooled(0x%p, %u)\n", memory, memSize));
1085 if (!memory || !memSize) return;
1087 /* Get MemHeader pointer. It is stored right before our block. */
1088 freeStart = memory - sizeof(struct MemHeader *);
1089 freeSize = memSize + sizeof(struct MemHeader *);
1090 mh = *((struct MemHeader **)freeStart);
1092 /* Check walls first */
1093 freeStart = MungWall_Check(freeStart, freeSize, loc, SysBase);
1094 if (PrivExecBase(SysBase)->IntFlags & EXECF_MungWall)
1095 freeSize += MUNGWALL_TOTAL_SIZE;
1097 /* Verify that MemHeader pointer is correct */
1098 if ((mh->mh_Node.ln_Type != NT_MEMORY) ||
1099 (freeStart < mh->mh_Lower) || (freeStart + freeSize > mh->mh_Upper))
1101 /* Something is wrong. */
1102 PoolManagerAlert(PME_FREE_NO_CHUNK, 0, memSize, memory, NULL, NULL);
1104 else
1106 struct ProtectedPool *pool = (struct ProtectedPool *)mhac_PoolMemHeaderGetPool(mh);
1107 IPTR size;
1108 APTR poolHeaderMH = (APTR)((IPTR)pool - MEMHEADER_TOTAL);
1110 if (pool->pool.PoolMagic != POOL_MAGIC)
1112 PoolManagerAlert(PME_FREE_INV_POOL, AT_DeadEnd, memSize, memory, poolHeaderMH, NULL);
1115 if (poolHeaderMH != poolHeader)
1117 PoolManagerAlert(PME_FREE_MXD_POOL, 0, memSize, memory, poolHeaderMH, poolHeader);
1120 if (pool->pool.Requirements & MEMF_SEM_PROTECTED)
1122 ObtainSemaphore(&pool->sem);
1125 size = mh->mh_Upper - mh->mh_Lower;
1126 D(bug("[FreePooled] Allocated from puddle 0x%p, size %u\n", mh, size));
1128 /* Free the memory. */
1129 stdDealloc(mh, mhac_PoolMemHeaderGetCtx(mh), freeStart, freeSize, loc, SysBase);
1130 D(bug("[FreePooled] Deallocated chunk, %u free bytes in the puddle\n", mh->mh_Free));
1132 /* Is this MemHeader completely free now? */
1133 if ((mh->mh_Free + mhac_GetCtxSize()) == size)
1135 D(bug("[FreePooled] Puddle is empty, giving back to the system\n"));
1137 /* Yes. Remove it from the list. */
1138 Remove(&mh->mh_Node);
1139 /* And free it. */
1140 FreeMemHeader(mh, loc, SysBase);
1142 /* All done. */
1144 if (pool->pool.Requirements & MEMF_SEM_PROTECTED)
1146 ReleaseSemaphore(&pool->sem);
1151 ULONG checkMemHandlers(struct checkMemHandlersState *cmhs, struct ExecBase *SysBase)
1153 struct Node *tmp;
1154 struct Interrupt *lmh;
1156 if (cmhs->cmhs_Data.memh_RequestFlags & MEMF_NO_EXPUNGE)
1157 return MEM_DID_NOTHING;
1159 /* In order to keep things clean, we must run in a single thread */
1160 ObtainSemaphore(&PrivExecBase(SysBase)->LowMemSem);
1163 * Loop over low memory handlers. Handlers can remove
1164 * themselves from the list while being invoked, thus
1165 * we need to be careful!
1167 for (lmh = (struct Interrupt *)cmhs->cmhs_CurNode;
1168 (tmp = lmh->is_Node.ln_Succ);
1169 lmh = (struct Interrupt *)(cmhs->cmhs_CurNode = tmp))
1171 ULONG ret;
1173 ret = AROS_UFC3 (LONG, lmh->is_Code,
1174 AROS_UFCA(struct MemHandlerData *, &cmhs->cmhs_Data, A0),
1175 AROS_UFCA(APTR, lmh->is_Data, A1),
1176 AROS_UFCA(struct ExecBase *, SysBase, A6)
1179 if (ret == MEM_TRY_AGAIN)
1181 /* MemHandler said he did something. Try again. */
1182 /* Is there any program that depends on this flag??? */
1183 cmhs->cmhs_Data.memh_Flags |= MEMHF_RECYCLE;
1185 ReleaseSemaphore(&PrivExecBase(SysBase)->LowMemSem);
1186 return MEM_TRY_AGAIN;
1188 else
1190 /* Nothing more to expect from this handler. */
1191 cmhs->cmhs_Data.memh_Flags &= ~MEMHF_RECYCLE;
1195 ReleaseSemaphore(&PrivExecBase(SysBase)->LowMemSem);
1196 return MEM_DID_NOTHING;
1199 void PoolManagerAlert(ULONG code, ULONG flags, IPTR memSize, APTR memory, APTR poolHeaderMH, APTR poolHeader)
1202 * TODO: the following should actually be printed as part of the alert.
1203 * In future there should be some kind of "alert context". CPU alerts
1204 * (like illegal access) should remember CPU context there. Memory manager
1205 * alerts (like this one) should remember some own information.
1208 bug("[MM] Pool manager error\n");
1209 switch(code)
1211 case PME_FREE_NO_CHUNK:
1212 case PME_FREE_INV_POOL:
1213 case PME_FREE_MXD_POOL:
1214 bug("[MM] Attempt to free %u bytes at 0x%p\n", memSize, memory);
1215 break;
1216 case PME_ALLOC_INV_POOL:
1217 bug("[MM] Attempt to allocate %u bytes\n", memSize);
1218 break;
1219 case PME_DEL_POOL_INV_POOL:
1220 bug("[MM] Attempt to free pool 0x%p which is not marked as valid\n", poolHeader);
1221 break;
1222 default:
1223 break;
1226 switch(code)
1228 case PME_FREE_NO_CHUNK:
1229 bug("[MM] The chunk does not belong to a pool\n");
1230 break;
1231 case PME_FREE_INV_POOL:
1232 bug("[MM] The chunk belongs to pool 0x%p which is not marked as valid\n", poolHeaderMH);
1233 break;
1234 case PME_FREE_MXD_POOL:
1235 bug("[MM] The chunk belongs to pool 0x%p, but call indicated pool 0x%p\n", poolHeaderMH, poolHeader);
1236 break;
1237 case PME_ALLOC_INV_POOL:
1238 bug("[MM] Requested to allocate from pool 0x%p which is not marked as valid\n", poolHeader);
1239 break;
1240 default:
1241 break;
1245 Alert(AN_BadFreeAddr | flags);