2 Copyright � 1995-2012, The AROS Development Team. All rights reserved.
6 #include <aros/debug.h>
7 #include <exec/rawfmt.h>
8 #include <exec/memheaderext.h>
9 #include <proto/kernel.h>
11 #include "exec_intern.h"
12 #include "exec_util.h"
20 * Find MemHeader to which address belongs.
21 * This function is legal to be called in supervisor mode (we use TypeOfMem()
22 * in order to validate addresses in tons of places). So, here are checks.
24 struct MemHeader
*FindMem(APTR address
, struct ExecBase
*SysBase
)
26 int usermode
= (KernelBase
!= NULL
) && (KrnIsSuper() == 0);
29 /* Nobody should change the memory list now. */
30 if (usermode
) MEM_LOCK_SHARED
;
32 /* Follow the list of MemHeaders */
33 mh
= (struct MemHeader
*)SysBase
->MemList
.lh_Head
;
35 while (mh
->mh_Node
.ln_Succ
!= NULL
)
39 struct MemHeaderExt
*mhe
= (struct MemHeaderExt
*)mh
;
41 if (mhe
->mhe_InBounds
)
43 if (mhe
->mhe_InBounds(mhe
, address
, address
))
45 if (usermode
) MEM_UNLOCK
;
52 /* Check if this MemHeader fits */
53 if (address
>= mh
->mh_Lower
&& address
< mh
->mh_Upper
)
56 if (usermode
) MEM_UNLOCK
;
60 /* Go to next MemHeader */
61 mh
= (struct MemHeader
*)mh
->mh_Node
.ln_Succ
;
64 if (usermode
) MEM_UNLOCK
;
68 char *FormatMMContext(char *buffer
, struct MMContext
*ctx
, struct ExecBase
*SysBase
)
71 buffer
= NewRawDoFmt("In %s, block at 0x%p, size %lu", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->func
, ctx
->addr
, ctx
->size
) - 1;
73 buffer
= NewRawDoFmt("In %s, size %lu", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->func
, ctx
->size
) - 1;
77 buffer
= NewRawDoFmt("\nCorrupted MemChunk 0x%p (next 0x%p, size %lu)", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->mc
, ctx
->mc
->mc_Next
, ctx
->mc
->mc_Bytes
) - 1;
80 buffer
= NewRawDoFmt("\nPrevious MemChunk 0x%p (next 0x%p, size %lu)", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->mcPrev
, ctx
->mcPrev
->mc_Next
, ctx
->mcPrev
->mc_Bytes
) - 1;
83 /* Print MemHeader details */
84 buffer
= NewRawDoFmt("\nMemHeader 0x%p (0x%p - 0x%p)", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->mh
, ctx
->mh
->mh_Lower
, ctx
->mh
->mh_Upper
) - 1;
85 if ((IPTR
)ctx
->mh
->mh_First
& (MEMCHUNK_TOTAL
- 1))
86 buffer
= NewRawDoFmt("\n- Unaligned first chunk address (0x%p)", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->mh
->mh_First
) - 1;
88 if (ctx
->mh
->mh_Free
& (MEMCHUNK_TOTAL
- 1))
89 buffer
= NewRawDoFmt("\n- Unaligned free space count (0x%p)", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->mh
->mh_Free
) - 1;
91 if (ctx
->mh
->mh_First
)
93 if ((APTR
)ctx
->mh
->mh_First
< ctx
->mh
->mh_Lower
)
94 buffer
= NewRawDoFmt("\n- First chunk (0x%p) below lower address", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->mh
->mh_First
) - 1;
96 if (((APTR
)ctx
->mh
->mh_First
+ ctx
->mh
->mh_Free
> ctx
->mh
->mh_Upper
))
97 buffer
= NewRawDoFmt("\n- Free space count too large (%lu, first chunk 0x%xp)", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->mh
->mh_Free
, ctx
->mh
->mh_First
) - 1;
103 /* #define NO_ALLOCATOR_CONTEXT */
105 #ifdef NO_ALLOCATOR_CONTEXT
107 struct MemHeaderAllocatorCtx
* mhac_GetSysCtx(struct MemHeader
* mh
, struct ExecBase
* SysBase
)
112 void mhac_PoolMemHeaderSetup(struct MemHeader
* mh
, struct ProtectedPool
* pool
)
114 mh
->mh_Node
.ln_Name
= (STRPTR
)pool
;
117 ULONG
mhac_GetCtxSize()
122 void mhac_ClearSysCtx(struct MemHeader
* mh
, struct ExecBase
* SysBase
)
126 #define mhac_MemChunkClaimed(a, b)
127 #define mhac_IsIndexEmpty(a) (TRUE)
128 #define mhac_ClearIndex(a)
129 #define mhac_MemChunkCreated(a, b, c) { (void)b; }
130 #define mhac_GetBetterPrevMemChunk(a, b, c) (a)
131 #define mhac_GetCloserPrevMemChunk(a, b, c) (a)
132 #define mhac_PoolMemHeaderGetCtx(a) (NULL)
133 #define mhac_PoolMemHeaderGetPool(a) (a->mh_Node.ln_Name)
136 /* Allocator optimization support */
139 * The array contains pointers to chunk previous to first chunk of at least size N
141 * N = 1 << (FIRSTPOTBIT + (i * POTSTEP)), where i is index in array
142 * first is defined as MemChunk with lowest address
144 * Each chunk in array locates the place where search should start, not necesarly
145 * where allocation should happen.
147 * If chunk is taken from MemHeader and is present in the index, it must be removed
150 * If chunk is returned to MemHeader it may be registered with index.
153 #define FIRSTPOTBIT (5)
154 #define FIRSTPOT (1 << FIRSTPOTBIT)
155 #define POTSTEP (1) /* Distance between each level */
156 #define ALLOCATORCTXINDEXSIZE (10) /* Number of levels in index */
158 struct MemHeaderAllocatorCtx
160 struct Node mhac_Node
;
161 struct MemHeader
*mhac_MemHeader
;
164 ULONG mhac_IndexSize
;
165 struct MemChunk
*mhac_PrevChunks
[ALLOCATORCTXINDEXSIZE
];
168 ULONG
mhac_GetCtxSize()
170 return (AROS_ROUNDUP2(sizeof(struct MemHeaderAllocatorCtx
), MEMCHUNK_TOTAL
));
173 static BOOL
mhac_IsIndexEmpty(struct MemHeaderAllocatorCtx
* mhac
)
179 for (i
= 0; i
< mhac
->mhac_IndexSize
; i
++)
180 if (mhac
->mhac_PrevChunks
[i
] != NULL
)
186 static void mhac_ClearIndex(struct MemHeaderAllocatorCtx
* mhac
)
193 for (i
= 0; i
< ALLOCATORCTXINDEXSIZE
; i
++)
194 mhac
->mhac_PrevChunks
[i
] = NULL
;
197 static void mhac_SetupMemHeaderAllocatorCtx(struct MemHeader
* mh
, ULONG maxindexsize
,
198 struct MemHeaderAllocatorCtx
* mhac
)
200 /* Adjust index size to space in MemHeader */
201 IPTR size
= (IPTR
)mh
->mh_Upper
- (IPTR
)mh
->mh_Lower
;
204 size
= size
>> FIRSTPOTBIT
;
205 size
= size
>> POTSTEP
;
207 for (; size
> 0; size
= size
>> POTSTEP
) indexsize
++;
209 if (indexsize
< 0) indexsize
= 0;
210 if (indexsize
> maxindexsize
) indexsize
= maxindexsize
;
211 if (indexsize
> ALLOCATORCTXINDEXSIZE
) indexsize
= ALLOCATORCTXINDEXSIZE
;
213 mhac
->mhac_MemHeader
= mh
;
214 mhac
->mhac_IndexSize
= indexsize
;
215 mhac_ClearIndex(mhac
);
218 void mhac_ClearSysCtx(struct MemHeader
* mh
, struct ExecBase
* SysBase
)
220 struct MemHeaderAllocatorCtx
* mhac
= NULL
;
222 ForeachNode(&PrivExecBase(SysBase
)->AllocatorCtxList
, mhac
)
224 if (mhac
->mhac_MemHeader
== mh
)
226 mhac_ClearIndex(mhac
);
232 struct MemHeaderAllocatorCtx
* mhac_GetSysCtx(struct MemHeader
* mh
, struct ExecBase
* SysBase
)
234 struct MemHeaderAllocatorCtx
* mhac
= NULL
;
236 ForeachNode(&PrivExecBase(SysBase
)->AllocatorCtxList
, mhac
)
238 if (mhac
->mhac_MemHeader
== mh
)
242 /* New context is needed */
243 mhac
= Allocate(mh
, sizeof(struct MemHeaderAllocatorCtx
));
244 mhac_SetupMemHeaderAllocatorCtx(mh
, ALLOCATORCTXINDEXSIZE
, mhac
);
245 AddTail(&PrivExecBase(SysBase
)->AllocatorCtxList
, (struct Node
*)mhac
);
250 static void mhac_MemChunkClaimed(struct MemChunk
* mc
, struct MemHeaderAllocatorCtx
* mhac
)
257 for (i
= 0; i
< mhac
->mhac_IndexSize
; i
++)
259 if (mhac
->mhac_PrevChunks
[i
] != NULL
&&
260 (mhac
->mhac_PrevChunks
[i
] == mc
|| mhac
->mhac_PrevChunks
[i
]->mc_Next
== mc
))
262 mhac
->mhac_PrevChunks
[i
] = NULL
;
267 static LONG
mhac_CalcIndex(LONG size
, ULONG indexsize
)
270 size
>>= FIRSTPOTBIT
;
274 if (r
> indexsize
- 1) r
= indexsize
- 1;
279 static void mhac_MemChunkCreated(struct MemChunk
* mc
, struct MemChunk
*mcprev
, struct MemHeaderAllocatorCtx
* mhac
)
281 LONG i
, v
= FIRSTPOT
;
283 if (mc
->mc_Bytes
< FIRSTPOT
) /* Allocation too small for index */
289 for (i
= 0; i
< mhac
->mhac_IndexSize
; i
++, v
= v
<< POTSTEP
)
291 if (mc
->mc_Bytes
< v
)
292 break; /* Chunk smaller than index at i. Stop */
294 /* If no chunk in index or given passed chunk has lower address than chunk in index */
295 if (mhac
->mhac_PrevChunks
[i
] == NULL
||
296 (mhac
->mhac_PrevChunks
[i
] != NULL
&& mhac
->mhac_PrevChunks
[i
]->mc_Next
> mc
))
298 mhac
->mhac_PrevChunks
[i
] = mcprev
;
304 * Function returned pointer to chunk that is prev to chunk that will allow
305 * to locate faster chunk big enough for allocation. Function never returns NULL.
306 * Current implementation:
307 * Function returns pointer to chunk that is prev to first biggest chunk,
308 * not bigger than requested size
310 static struct MemChunk
* mhac_GetBetterPrevMemChunk(struct MemChunk
* prev
, IPTR size
, struct MemHeaderAllocatorCtx
* mhac
)
312 struct MemChunk
* _return
= prev
;
315 return _return
; /* Allocation too small for index */
320 LONG ii
= mhac_CalcIndex(size
, mhac
->mhac_IndexSize
);
322 if (mhac
->mhac_PrevChunks
[ii
] != NULL
)
323 _return
= mhac
->mhac_PrevChunks
[ii
];
326 for (i
= ii
- 1; i
>= 0; i
--)
328 if (mhac
->mhac_PrevChunks
[i
] != NULL
)
330 _return
= mhac
->mhac_PrevChunks
[i
];
340 static struct MemChunk
* mhac_GetCloserPrevMemChunk(struct MemChunk
* prev
, APTR addr
, struct MemHeaderAllocatorCtx
* mhac
)
342 struct MemChunk
* _return
= prev
;
348 for (i
= 0; i
< mhac
->mhac_IndexSize
; i
++)
350 if (mhac
->mhac_PrevChunks
[i
] != NULL
&&
351 (APTR
)mhac
->mhac_PrevChunks
[i
]->mc_Next
< addr
&&
352 mhac
->mhac_PrevChunks
[i
]->mc_Next
> _return
->mc_Next
)
354 _return
= mhac
->mhac_PrevChunks
[i
];
363 * Enhace MemHeader that is part of pool with MemHeaderAllocatorContext
365 void mhac_PoolMemHeaderSetup(struct MemHeader
* mh
, struct ProtectedPool
* pool
)
367 struct MemHeaderAllocatorCtx
* mhac
= Allocate(mh
, sizeof(struct MemHeaderAllocatorCtx
));
369 mhac_SetupMemHeaderAllocatorCtx(mh
, 5, mhac
);
371 mhac
->mhac_Data1
= pool
;
372 mh
->mh_Node
.ln_Name
= (STRPTR
)mhac
;
375 #define mhac_PoolMemHeaderGetCtx(a) ((struct MemHeaderAllocatorCtx *)(a->mh_Node.ln_Name))
376 #define mhac_PoolMemHeaderGetPool(a) (mhac_PoolMemHeaderGetCtx(a)->mhac_Data1)
381 #ifdef NO_CONSISTENCY_CHECKS
383 #define validateHeader(mh, op, addr, size, SysBase) TRUE
384 #define validateChunk(mc, prev, mh, op, addr, size, SysBase) TRUE
388 static ULONG memAlerts
[] =
390 AT_DeadEnd
|AN_MemoryInsane
, /* MM_ALLOC */
391 AT_DeadEnd
|AN_MemCorrupt
, /* MM_FREE */
392 AN_FreeTwice
/* MM_OVERLAP */
396 * MemHeader validation routine. Rules are:
398 * 1. Both mh_First and mh_Free must be MEMCHUNK_TOTAL-aligned.
399 * 2. Free space (if present) must completely fit in between mh_Lower and mh_Upper.
400 * We intentionally don't check header's own location. We assume that in future we'll
401 * be able to put MEMF_CHIP headers inside MEMF_FAST memory, for speed up.
403 static BOOL
validateHeader(struct MemHeader
*mh
, UBYTE op
, APTR addr
, IPTR size
, struct TraceLocation
*tp
, struct ExecBase
*SysBase
)
405 if (((IPTR
)mh
->mh_First
& (MEMCHUNK_TOTAL
- 1)) || (mh
->mh_Free
& (MEMCHUNK_TOTAL
- 1)) || /* 1 */
407 (((APTR
)mh
->mh_First
< mh
->mh_Lower
) || ((APTR
)mh
->mh_First
+ mh
->mh_Free
> mh
->mh_Upper
)))) /* 2 */
411 /* TraceLocation is not supplied by PrepareExecBase(). Fail silently. */
412 struct MMContext alertData
;
416 alertData
.mcPrev
= NULL
;
417 alertData
.func
= tp
->function
;
418 alertData
.addr
= addr
;
419 alertData
.size
= size
;
422 Exec_ExtAlert(memAlerts
[op
], tp
->caller
, tp
->stack
, AT_MEMORY
, &alertData
, SysBase
);
426 * Theoretically during very early boot we can fail to post an alert (no KernelBase yet).
427 * In this case we return with fault indication.
435 * MemChunk consistency check. Rules are:
437 * 1. Both mc_Next and mc_Bytes must me MEMCHUNK_TOTAL-aligned, and mc_Bytes can not be zero.
438 * 2. End of this chunk must not be greater than mh->mh_Upper
439 * 3. mc_Next (if present) must point in between end of this chunk and mh->mh_Upper - MEMCHUNK_TOTAL.
440 * There must be at least MEMHCUNK_TOTAL allocated bytes between free chunks.
442 * This function is inlined for speed improvements.
444 static inline BOOL
validateChunk(struct MemChunk
*p2
, struct MemChunk
*p1
, struct MemHeader
*mh
,
445 UBYTE op
, APTR addr
, IPTR size
,
446 struct TraceLocation
*tp
, struct ExecBase
*SysBase
)
448 if (((IPTR
)p2
->mc_Next
& (MEMCHUNK_TOTAL
-1)) || (p2
->mc_Bytes
== 0) || (p2
->mc_Bytes
& (MEMCHUNK_TOTAL
-1)) || /* 1 */
449 ((APTR
)p2
+ p2
->mc_Bytes
> mh
->mh_Upper
) || /* 2 */
450 (p2
->mc_Next
&& (((APTR
)p2
->mc_Next
< (APTR
)p2
+ p2
->mc_Bytes
+ MEMCHUNK_TOTAL
) || /* 3 */
451 ((APTR
)p2
->mc_Next
> mh
->mh_Upper
- MEMCHUNK_TOTAL
))))
455 struct MMContext alertData
;
459 alertData
.mcPrev
= (p1
== (struct MemChunk
*)&mh
->mh_First
) ? NULL
: p1
;
460 alertData
.func
= tp
->function
;
461 alertData
.addr
= addr
;
462 alertData
.size
= size
;
465 Exec_ExtAlert(memAlerts
[op
], tp
->caller
, tp
->stack
, AT_MEMORY
, &alertData
, SysBase
);
476 * Allocate block from the given MemHeader in a specific way.
477 * This routine can be called with SysBase = NULL.
478 * MemHeaderAllocatorCtx
479 * This parameter is optional, allocation needs to work without it as well.
480 * However if it was passed once for a given MemHeader it needs to be passed
481 * in all consecutive calls.
483 APTR
stdAlloc(struct MemHeader
*mh
, struct MemHeaderAllocatorCtx
*mhac
, IPTR size
,
484 ULONG requirements
, struct TraceLocation
*tp
, struct ExecBase
*SysBase
)
487 * The check has to be done for the second time. Exec uses stdAlloc on memheader
488 * passed upon startup. This is bad, very bad. So here a temporary hack :)
490 if ((mh
->mh_Node
.ln_Type
== NT_MEMORY
) && IsManagedMem(mh
))
492 struct MemHeaderExt
*mhe
= (struct MemHeaderExt
*)mh
;
496 return mhe
->mhe_Alloc(mhe
, size
, &requirements
);
503 /* First round byteSize up to a multiple of MEMCHUNK_TOTAL */
504 IPTR byteSize
= AROS_ROUNDUP2(size
, MEMCHUNK_TOTAL
);
505 struct MemChunk
*mc
=NULL
, *p1
, *p2
;
507 /* Validate MemHeader before doing anything. */
508 if (!validateHeader(mh
, MM_ALLOC
, NULL
, size
, tp
, SysBase
))
511 /* Validate if there is even enough total free memory */
512 if (mh
->mh_Free
< byteSize
)
517 * The free memory list is only single linked, i.e. to remove
518 * elements from the list I need the node's predecessor. For the
519 * first element I can use mh->mh_First instead of a real predecessor.
521 p1
= mhac_GetBetterPrevMemChunk((struct MemChunk
*)&mh
->mh_First
, size
, mhac
);
525 * Follow the memory list. p1 is the previous MemChunk, p2 is the current one.
526 * On 1st pass p1 points to mh->mh_First, so that changing p1->mc_Next actually
527 * changes mh->mh_First.
531 /* Validate the current chunk */
532 if (!validateChunk(p2
, p1
, mh
, MM_ALLOC
, NULL
, size
, tp
, SysBase
))
535 /* Check if the current block is large enough */
536 if (p2
->mc_Bytes
>=byteSize
)
541 /* Use this one if MEMF_REVERSE is not set.*/
542 if (!(requirements
& MEMF_REVERSE
))
544 /* Else continue - there may be more to come. */
547 /* Go to next block */
552 /* Something found? */
555 /* Remember: if MEMF_REVERSE is set p1 and p2 are now invalid. */
559 mhac_MemChunkClaimed(p2
, mhac
);
561 /* Remove the block from the list and return it. */
562 if (p2
->mc_Bytes
== byteSize
)
564 /* Fits exactly. Just relink the list. */
565 p1
->mc_Next
= p2
->mc_Next
;
570 struct MemChunk
* pp
= p1
;
572 if (requirements
& MEMF_REVERSE
)
574 /* Return the last bytes. */
576 mc
= (struct MemChunk
*)((UBYTE
*)p2
+p2
->mc_Bytes
-byteSize
);
580 /* Return the first bytes. */
581 p1
->mc_Next
=(struct MemChunk
*)((UBYTE
*)p2
+byteSize
);
586 p1
->mc_Next
= p2
->mc_Next
;
587 p1
->mc_Bytes
= p2
->mc_Bytes
-byteSize
;
589 mhac_MemChunkCreated(p1
, pp
, mhac
);
592 mh
->mh_Free
-= byteSize
;
594 /* Clear the block if requested */
595 if (requirements
& MEMF_CLEAR
)
596 memset(mc
, 0, byteSize
);
600 if (!mhac_IsIndexEmpty(mhac
))
603 * Since chunks created during deallocation are not returned to index,
604 * retry with cleared index.
606 mhac_ClearIndex(mhac
);
607 mc
= stdAlloc(mh
, mhac
, size
, requirements
, tp
, SysBase
);
616 * Free 'byteSize' bytes starting at 'memoryBlock' belonging to MemHeader 'freeList'
617 * MemHeaderAllocatorCtx
620 void stdDealloc(struct MemHeader
*freeList
, struct MemHeaderAllocatorCtx
*mhac
, APTR addr
, IPTR size
, struct TraceLocation
*tp
, struct ExecBase
*SysBase
)
624 struct MemChunk
*p1
, *p2
, *p3
;
627 if ((freeList
->mh_Node
.ln_Type
== NT_MEMORY
) && IsManagedMem(freeList
))
629 struct MemHeaderExt
*mhe
= (struct MemHeaderExt
*)freeList
;
632 mhe
->mhe_Free(mhe
, addr
, size
);
636 /* Make sure the MemHeader is OK */
637 if (!validateHeader(freeList
, MM_FREE
, addr
, size
, tp
, SysBase
))
640 /* Align size to the requirements */
641 byteSize
= size
+ ((IPTR
)addr
& (MEMCHUNK_TOTAL
- 1));
642 byteSize
= (byteSize
+ MEMCHUNK_TOTAL
-1) & ~(MEMCHUNK_TOTAL
- 1);
644 /* Align the block as well */
645 memoryBlock
= (APTR
)((IPTR
)addr
& ~(MEMCHUNK_TOTAL
-1));
648 The free memory list is only single linked, i.e. to insert
649 elements into the list I need the node as well as its
650 predecessor. For the first element I can use freeList->mh_First
651 instead of a real predecessor.
653 p1
= (struct MemChunk
*)&freeList
->mh_First
;
654 p2
= freeList
->mh_First
;
656 /* Start and end(+1) of the block */
657 p3
= (struct MemChunk
*)memoryBlock
;
658 p4
= (UBYTE
*)p3
+ byteSize
;
660 /* No chunk in list? Just insert the current one and return. */
663 p3
->mc_Bytes
= byteSize
;
666 freeList
->mh_Free
+= byteSize
;
670 /* Find closer chunk */
671 p1
=mhac_GetCloserPrevMemChunk(p1
, addr
, mhac
);
674 /* Follow the list to find a place where to insert our memory. */
677 if (!validateChunk(p2
, p1
, freeList
, MM_FREE
, addr
, size
, tp
, SysBase
))
680 /* Found a block with a higher address? */
683 #if !defined(NO_CONSISTENCY_CHECKS)
685 If the memory to be freed overlaps with the current
686 block something must be wrong.
690 bug("[MM] Chunk allocator error\n");
691 bug("[MM] Attempt to free %u bytes at 0x%p from MemHeader 0x%p\n", byteSize
, memoryBlock
, freeList
);
692 bug("[MM] Block overlaps (1) with chunk 0x%p (%u bytes)\n", p2
, p2
->mc_Bytes
);
698 /* End the loop with p2 non-zero */
701 /* goto next block */
705 /* If the loop ends with p2 zero add it at the end. */
706 } while (p2
!= NULL
);
708 /* If there was a previous block merge with it. */
709 if (p1
!= (struct MemChunk
*)&freeList
->mh_First
)
711 #if !defined(NO_CONSISTENCY_CHECKS)
712 /* Check if they overlap. */
713 if ((UBYTE
*)p1
+ p1
->mc_Bytes
> (UBYTE
*)p3
)
715 bug("[MM] Chunk allocator error\n");
716 bug("[MM] Attempt to free %u bytes at 0x%p from MemHeader 0x%p\n", byteSize
, memoryBlock
, freeList
);
717 bug("[MM] Block overlaps (2) with chunk 0x%p (%u bytes)\n", p1
, p1
->mc_Bytes
);
723 /* Merge if possible */
724 if ((UBYTE
*)p1
+ p1
->mc_Bytes
== (UBYTE
*)p3
)
726 mhac_MemChunkClaimed(p1
, mhac
);
729 * Note: this case does not lead to mhac_MemChunkCreated, because
730 * we don't have chunk prev to p1
734 /* Not possible to merge */
738 There was no previous block. Just insert the memory at
739 the start of the list.
743 /* Try to merge with next block (if there is one ;-) ). */
744 if (p4
== (UBYTE
*)p2
&& p2
!= NULL
)
747 Overlap checking already done. Doing it here after
748 the list potentially changed would be a bad idea.
750 mhac_MemChunkClaimed(p2
, mhac
);
754 /* relink the list and return. */
756 p3
->mc_Bytes
= p4
- (UBYTE
*)p3
;
757 freeList
->mh_Free
+= byteSize
;
758 if (p1
->mc_Next
==p3
) mhac_MemChunkCreated(p3
, p1
, mhac
);
764 * During transition period four routines below use nommu allocator.
765 * When transition is complete they should use them only if MMU
766 * is inactive. Otherwise they should use KrnAllocPages()/KrnFreePages().
769 /* Non-mungwalled AllocAbs(). Does not destroy sideways regions. */
770 APTR
InternalAllocAbs(APTR location
, IPTR byteSize
, struct ExecBase
*SysBase
)
772 return nommu_AllocAbs(location
, byteSize
, SysBase
);
776 * Use this if you want to free region allocated by InternalAllocAbs().
777 * Otherwise you hit mungwall problem (FreeMem() expects header).
779 void InternalFreeMem(APTR location
, IPTR byteSize
, struct TraceLocation
*loc
, struct ExecBase
*SysBase
)
781 nommu_FreeMem(location
, byteSize
, loc
, SysBase
);
785 * Allocate a region managed by own header. Usable size is reduced by size
788 APTR
AllocMemHeader(IPTR size
, ULONG flags
, struct TraceLocation
*loc
, struct ExecBase
*SysBase
)
790 struct MemHeader
*mh
;
792 mh
= nommu_AllocMem(size
, flags
, loc
, SysBase
);
793 DMH(bug("[AllocMemHeader] Allocated %u bytes at 0x%p\n", size
, mh
));
797 struct MemHeader
*orig
= FindMem(mh
, SysBase
);
799 if (IsManagedMem(orig
))
801 struct MemHeaderExt
*mhe_orig
= (struct MemHeaderExt
*)orig
;
802 struct MemHeaderExt
*mhe
= (struct MemHeaderExt
*)mh
;
803 IPTR header_size
= (sizeof(struct MemHeaderExt
) + 15) & ~15;
805 /* Copy the basic information */
806 mh
->mh_Node
.ln_Type
= NT_MEMORY
;
807 mh
->mh_Node
.ln_Pri
= orig
->mh_Node
.ln_Pri
;
808 mh
->mh_Attributes
= orig
->mh_Attributes
;
809 mh
->mh_Upper
= (void *)mh
+ size
;
810 mh
->mh_Lower
= (void *)mh
;
814 mhe
->mhe_Magic
= mhe_orig
->mhe_Magic
;
816 /* Copy init functions */
817 mhe
->mhe_InitPool
= mhe_orig
->mhe_InitPool
;
818 mhe
->mhe_DestroyPool
= mhe_orig
->mhe_DestroyPool
;
820 /* Copy memory allocation functions */
821 mhe
->mhe_Alloc
= mhe_orig
->mhe_Alloc
;
822 mhe
->mhe_AllocAbs
= mhe_orig
->mhe_AllocAbs
;
823 mhe
->mhe_AllocVec
= mhe_orig
->mhe_AllocVec
;
824 mhe
->mhe_Avail
= mhe_orig
->mhe_Avail
;
825 mhe
->mhe_Free
= mhe_orig
->mhe_Free
;
826 mhe
->mhe_FreeVec
= mhe_orig
->mhe_FreeVec
;
827 mhe
->mhe_InBounds
= mhe_orig
->mhe_InBounds
;
828 mhe
->mhe_ReAlloc
= mhe_orig
->mhe_ReAlloc
;
831 * User data will be initialized. Memory pool will get first region
834 mhe
->mhe_UserData
= (APTR
)mh
+ header_size
;
836 /* Initialize the pool with rest size */
837 if (mhe
->mhe_InitPool
)
838 mhe
->mhe_InitPool(mhe
, size
, size
- header_size
);
842 size
-= MEMHEADER_TOTAL
;
845 * Initialize new MemHeader.
846 * Inherit attributes from system MemHeader from which
847 * our chunk was allocated.
849 mh
->mh_Node
.ln_Type
= NT_MEMORY
;
850 mh
->mh_Node
.ln_Pri
= orig
->mh_Node
.ln_Pri
;
851 mh
->mh_Attributes
= orig
->mh_Attributes
;
852 mh
->mh_Lower
= (APTR
)mh
+ MEMHEADER_TOTAL
;
853 mh
->mh_Upper
= mh
->mh_Lower
+ size
;
854 mh
->mh_First
= mh
->mh_Lower
;
857 /* Create the first (and the only) MemChunk */
858 mh
->mh_First
->mc_Next
= NULL
;
859 mh
->mh_First
->mc_Bytes
= size
;
865 /* Free a region allocated by AllocMemHeader() */
866 void FreeMemHeader(APTR addr
, struct TraceLocation
*loc
, struct ExecBase
*SysBase
)
868 struct MemHeaderExt
*mhe
= (struct MemHeaderExt
*)addr
;
870 IPTR size
= (IPTR
)mhe
->mhe_MemHeader
.mh_Upper
- (IPTR
)addr
;
872 if (IsManagedMem(mhe
))
874 if (mhe
->mhe_DestroyPool
)
875 mhe
->mhe_DestroyPool(mhe
);
878 DMH(bug("[FreeMemHeader] Freeing %u bytes at 0x%p\n", size
, addr
));
879 nommu_FreeMem(addr
, size
, loc
, SysBase
);
883 * This is our own Enqueue() version. Currently the only differece is that
884 * we insert our node before the first node with LOWER OR EQUAL priority,
885 * so that for nodes with equal priority it will be LIFO, not FIFO queue.
886 * This speeds up the allocator.
887 * TODO: implement secondary sorting by mh_Free. This will allow to
888 * implement best-match algorithm (so that puddles with smaller free space
889 * will be picked up first). This way the smallest allocations will reuse
890 * smallest chunks instead of fragmenting large ones.
892 static void EnqueueMemHeader(struct MinList
*list
, struct MemHeader
*mh
)
894 struct MemHeader
*next
;
896 /* Look through the list */
897 ForeachNode (list
, next
)
900 Look for the first MemHeader with a lower or equal pri as the node
901 we have to insert into the list.
903 if (mh
->mh_Node
.ln_Pri
>= next
->mh_Node
.ln_Pri
)
907 /* Insert the node before next */
908 mh
->mh_Node
.ln_Pred
= next
->mh_Node
.ln_Pred
;
909 mh
->mh_Node
.ln_Succ
= &next
->mh_Node
;
910 next
->mh_Node
.ln_Pred
->ln_Succ
= &mh
->mh_Node
;
911 next
->mh_Node
.ln_Pred
= &mh
->mh_Node
;
915 * Allocate memory with given physical properties from the given pool.
916 * Our pools can be mixed. This means that different puddles from the
917 * pool can have different physical flags. For example the same pool
918 * can contain puddles from both CHIP and FAST memory. This is done in
919 * order to provide a single system default pool for all types of memory.
921 APTR
InternalAllocPooled(APTR poolHeader
, IPTR memSize
, ULONG flags
, struct TraceLocation
*loc
, struct ExecBase
*SysBase
)
923 struct ProtectedPool
*pool
= poolHeader
+ MEMHEADER_TOTAL
;
926 struct MemHeader
*mh
;
928 D(bug("[exec] InternalAllocPooled(0x%p, %u, 0x%08X), header 0x%p\n", poolHeader
, memSize
, flags
, pool
));
931 * Memory blocks allocated from the pool store pointers to the MemHeader they were
932 * allocated from. This is done in order to avoid slow lookups in InternalFreePooled().
933 * This is done in AllocVec()-alike manner; the pointer is placed right before the block.
935 memSize
+= sizeof(struct MemHeader
*);
938 /* If mungwall is enabled, count also size of walls */
939 if (PrivExecBase(SysBase
)->IntFlags
& EXECF_MungWall
)
940 memSize
+= MUNGWALL_TOTAL_SIZE
;
942 if (pool
->pool
.PoolMagic
!= POOL_MAGIC
)
944 PoolManagerAlert(PME_ALLOC_INV_POOL
, AT_DeadEnd
, memSize
, NULL
, NULL
, poolHeader
);
947 if (pool
->pool
.Requirements
& MEMF_SEM_PROTECTED
)
949 ObtainSemaphore(&pool
->sem
);
952 /* Follow the list of MemHeaders */
953 mh
= (struct MemHeader
*)pool
->pool
.PuddleList
.mlh_Head
;
956 ULONG physFlags
= flags
& MEMF_PHYSICAL_MASK
;
958 /* Are there no more MemHeaders? */
959 if (mh
->mh_Node
.ln_Succ
== NULL
)
963 * Usually we allocate puddles of default size, specified during
964 * pool creation. However we can be asked to allocate block whose
965 * size will be larger than default puddle size.
966 * Previously this was handled by threshSize parameter. In our new
967 * implementation we just allocate enlarged puddle. This is done
968 * in order not to waste page tails beyond the allocated large block.
969 * These tails will be used for our pool too. Their size is smaller
970 * than page size but they still perfectly fit for small allocations
971 * (the primary use for pools).
972 * Since our large block is also a puddle, it will be reused for our
973 * pool when the block is freed. It can also be reused for another
974 * large allocation, if it fits in.
975 * Our final puddle size still includes MEMHEADER_TOTAL +
976 * allocator ctx size in any case.
978 IPTR puddleSize
= pool
->pool
.PuddleSize
;
980 if (memSize
> puddleSize
- (MEMHEADER_TOTAL
+ mhac_GetCtxSize()))
982 IPTR align
= PrivExecBase(SysBase
)->PageSize
- 1;
984 puddleSize
= memSize
+ MEMHEADER_TOTAL
+ mhac_GetCtxSize();
985 /* Align the size up to page boundary */
986 puddleSize
= (puddleSize
+ align
) & ~align
;
989 mh
= AllocMemHeader(puddleSize
, flags
, loc
, SysBase
);
990 D(bug("[InternalAllocPooled] Allocated new puddle 0x%p, size %u\n", mh
, puddleSize
));
992 /* No memory left? */
996 /* Add the new puddle to our pool */
997 mhac_PoolMemHeaderSetup(mh
, pool
);
998 Enqueue((struct List
*)&pool
->pool
.PuddleList
, &mh
->mh_Node
);
1000 /* Fall through to get the memory */
1004 /* Ignore existing MemHeaders with free memory smaller than allocation */
1005 if (mh
->mh_Free
< memSize
)
1007 mh
= (struct MemHeader
*)mh
->mh_Node
.ln_Succ
;
1012 /* Ignore existing MemHeaders with memory type that differ from the requested ones */
1013 if (physFlags
& ~mh
->mh_Attributes
)
1015 D(bug("[InternalAllocPooled] Wrong flags for puddle 0x%p (wanted 0x%08X, have 0x%08X\n", flags
, mh
->mh_Attributes
));
1017 mh
= (struct MemHeader
*)mh
->mh_Node
.ln_Succ
;
1022 /* Try to get the memory */
1023 ret
= stdAlloc(mh
, mhac_PoolMemHeaderGetCtx(mh
), memSize
, flags
, loc
, SysBase
);
1024 D(bug("[InternalAllocPooled] Allocated memory at 0x%p from puddle 0x%p\n", ret
, mh
));
1030 * If this is not the first MemHeader and it has some free space,
1031 * move it forward (so that the next allocation will attempt to use it first).
1032 * IMPORTANT: We use modification of Enqueue() because we still sort MemHeaders
1033 * according to their priority (which they inherit from system MemHeaders).
1034 * This allows us to have mixed pools (e.g. with both CHIP and FAST regions). This
1035 * will be needed in future for memory protection.
1037 if (mh
->mh_Node
.ln_Pred
!= NULL
&& mh
->mh_Free
> 32)
1039 D(bug("[InternalAllocPooled] Re-sorting puddle list\n"));
1040 Remove(&mh
->mh_Node
);
1041 EnqueueMemHeader(&pool
->pool
.PuddleList
, mh
);
1047 /* No. Try next MemHeader */
1048 mh
= (struct MemHeader
*)mh
->mh_Node
.ln_Succ
;
1051 if (pool
->pool
.Requirements
& MEMF_SEM_PROTECTED
)
1053 ReleaseSemaphore(&pool
->sem
);
1058 /* Build munge walls if requested */
1059 ret
= MungWall_Build(ret
, pool
, origSize
, flags
, loc
, SysBase
);
1061 /* Remember where we were allocated from */
1062 *((struct MemHeader
**)ret
) = mh
;
1063 ret
+= sizeof(struct MemHeader
*);
1066 /* Everything fine */
1071 * This is a pair to InternalAllocPooled()
1072 * This code separated from FreePooled() in order to provide compatibility with various
1073 * memory tracking patches. If some exec code calls InternalAllocPooled() directly
1074 * (AllocMem() will do it), it has to call also InternalFreePooled() directly.
1075 * Our chunks remember from which pool they came, so we don't need a pointer to pool
1076 * header here. This will save us from headaches in future FreeMem() implementation.
1078 void InternalFreePooled(APTR poolHeader
, APTR memory
, IPTR memSize
, struct TraceLocation
*loc
, struct ExecBase
*SysBase
)
1080 struct MemHeader
*mh
;
1084 D(bug("[exec] InternalFreePooled(0x%p, %u)\n", memory
, memSize
));
1086 if (!memory
|| !memSize
) return;
1088 /* Get MemHeader pointer. It is stored right before our block. */
1089 freeStart
= memory
- sizeof(struct MemHeader
*);
1090 freeSize
= memSize
+ sizeof(struct MemHeader
*);
1091 mh
= *((struct MemHeader
**)freeStart
);
1093 /* Check walls first */
1094 freeStart
= MungWall_Check(freeStart
, freeSize
, loc
, SysBase
);
1095 if (PrivExecBase(SysBase
)->IntFlags
& EXECF_MungWall
)
1096 freeSize
+= MUNGWALL_TOTAL_SIZE
;
1098 /* Verify that MemHeader pointer is correct */
1099 if ((mh
->mh_Node
.ln_Type
!= NT_MEMORY
) ||
1100 (freeStart
< mh
->mh_Lower
) || (freeStart
+ freeSize
> mh
->mh_Upper
))
1102 /* Something is wrong. */
1103 PoolManagerAlert(PME_FREE_NO_CHUNK
, 0, memSize
, memory
, NULL
, NULL
);
1107 struct ProtectedPool
*pool
= (struct ProtectedPool
*)mhac_PoolMemHeaderGetPool(mh
);
1109 APTR poolHeaderMH
= (APTR
)((IPTR
)pool
- MEMHEADER_TOTAL
);
1111 if (pool
->pool
.PoolMagic
!= POOL_MAGIC
)
1113 PoolManagerAlert(PME_FREE_INV_POOL
, AT_DeadEnd
, memSize
, memory
, poolHeaderMH
, NULL
);
1116 if (poolHeaderMH
!= poolHeader
)
1118 PoolManagerAlert(PME_FREE_MXD_POOL
, 0, memSize
, memory
, poolHeaderMH
, poolHeader
);
1121 if (pool
->pool
.Requirements
& MEMF_SEM_PROTECTED
)
1123 ObtainSemaphore(&pool
->sem
);
1126 size
= mh
->mh_Upper
- mh
->mh_Lower
;
1127 D(bug("[FreePooled] Allocated from puddle 0x%p, size %u\n", mh
, size
));
1129 /* Free the memory. */
1130 stdDealloc(mh
, mhac_PoolMemHeaderGetCtx(mh
), freeStart
, freeSize
, loc
, SysBase
);
1131 D(bug("[FreePooled] Deallocated chunk, %u free bytes in the puddle\n", mh
->mh_Free
));
1133 /* Is this MemHeader completely free now? */
1134 if ((mh
->mh_Free
+ mhac_GetCtxSize()) == size
)
1136 D(bug("[FreePooled] Puddle is empty, giving back to the system\n"));
1138 /* Yes. Remove it from the list. */
1139 Remove(&mh
->mh_Node
);
1141 FreeMemHeader(mh
, loc
, SysBase
);
1145 if (pool
->pool
.Requirements
& MEMF_SEM_PROTECTED
)
1147 ReleaseSemaphore(&pool
->sem
);
1152 ULONG
checkMemHandlers(struct checkMemHandlersState
*cmhs
, struct ExecBase
*SysBase
)
1155 struct Interrupt
*lmh
;
1157 if (cmhs
->cmhs_Data
.memh_RequestFlags
& MEMF_NO_EXPUNGE
)
1158 return MEM_DID_NOTHING
;
1160 /* In order to keep things clean, we must run in a single thread */
1161 ObtainSemaphore(&PrivExecBase(SysBase
)->LowMemSem
);
1164 * Loop over low memory handlers. Handlers can remove
1165 * themselves from the list while being invoked, thus
1166 * we need to be careful!
1168 for (lmh
= (struct Interrupt
*)cmhs
->cmhs_CurNode
;
1169 (tmp
= lmh
->is_Node
.ln_Succ
);
1170 lmh
= (struct Interrupt
*)(cmhs
->cmhs_CurNode
= tmp
))
1174 ret
= AROS_UFC3 (LONG
, lmh
->is_Code
,
1175 AROS_UFCA(struct MemHandlerData
*, &cmhs
->cmhs_Data
, A0
),
1176 AROS_UFCA(APTR
, lmh
->is_Data
, A1
),
1177 AROS_UFCA(struct ExecBase
*, SysBase
, A6
)
1180 if (ret
== MEM_TRY_AGAIN
)
1182 /* MemHandler said he did something. Try again. */
1183 /* Is there any program that depends on this flag??? */
1184 cmhs
->cmhs_Data
.memh_Flags
|= MEMHF_RECYCLE
;
1186 ReleaseSemaphore(&PrivExecBase(SysBase
)->LowMemSem
);
1187 return MEM_TRY_AGAIN
;
1191 /* Nothing more to expect from this handler. */
1192 cmhs
->cmhs_Data
.memh_Flags
&= ~MEMHF_RECYCLE
;
1196 ReleaseSemaphore(&PrivExecBase(SysBase
)->LowMemSem
);
1197 return MEM_DID_NOTHING
;
1200 void PoolManagerAlert(ULONG code
, ULONG flags
, IPTR memSize
, APTR memory
, APTR poolHeaderMH
, APTR poolHeader
)
1203 * TODO: the following should actually be printed as part of the alert.
1204 * In future there should be some kind of "alert context". CPU alerts
1205 * (like illegal access) should remember CPU context there. Memory manager
1206 * alerts (like this one) should remember some own information.
1209 bug("[MM] Pool manager error\n");
1212 case PME_FREE_NO_CHUNK
:
1213 case PME_FREE_INV_POOL
:
1214 case PME_FREE_MXD_POOL
:
1215 bug("[MM] Attempt to free %u bytes at 0x%p\n", memSize
, memory
);
1217 case PME_ALLOC_INV_POOL
:
1218 bug("[MM] Attempt to allocate %u bytes\n", memSize
);
1220 case PME_DEL_POOL_INV_POOL
:
1221 bug("[MM] Attempt to free pool 0x%p which is not marked as valid\n", poolHeader
);
1229 case PME_FREE_NO_CHUNK
:
1230 bug("[MM] The chunk does not belong to a pool\n");
1232 case PME_FREE_INV_POOL
:
1233 bug("[MM] The chunk belongs to pool 0x%p which is not marked as valid\n", poolHeaderMH
);
1235 case PME_FREE_MXD_POOL
:
1236 bug("[MM] The chunk belongs to pool 0x%p, but call indicated pool 0x%p\n", poolHeaderMH
, poolHeader
);
1238 case PME_ALLOC_INV_POOL
:
1239 bug("[MM] Requested to allocate from pool 0x%p which is not marked as valid\n", poolHeader
);
1246 Alert(AN_BadFreeAddr
| flags
);