2 Copyright © 1995-2012, The AROS Development Team. All rights reserved.
6 #include <aros/debug.h>
7 #include <exec/rawfmt.h>
8 #include <proto/kernel.h>
10 #include "exec_intern.h"
11 #include "exec_util.h"
19 * Find MemHeader to which address belongs.
20 * This function is legal to be called in supervisor mode (we use TypeOfMem()
21 * in order to validate addresses in tons of places). So, here are checks.
23 struct MemHeader
*FindMem(APTR address
, struct ExecBase
*SysBase
)
25 int usermode
= (KernelBase
!= NULL
) && (KrnIsSuper() == 0);
28 /* Nobody should change the memory list now. */
29 if (usermode
) MEM_LOCK_SHARED
;
31 /* Follow the list of MemHeaders */
32 mh
= (struct MemHeader
*)SysBase
->MemList
.lh_Head
;
34 while (mh
->mh_Node
.ln_Succ
!= NULL
)
36 /* Check if this MemHeader fits */
37 if (address
>= mh
->mh_Lower
&& address
< mh
->mh_Upper
)
40 if (usermode
) MEM_UNLOCK
;
44 /* Go to next MemHeader */
45 mh
= (struct MemHeader
*)mh
->mh_Node
.ln_Succ
;
48 if (usermode
) MEM_UNLOCK
;
52 char *FormatMMContext(char *buffer
, struct MMContext
*ctx
, struct ExecBase
*SysBase
)
55 buffer
= NewRawDoFmt("In %s, block at 0x%p, size %lu", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->func
, ctx
->addr
, ctx
->size
) - 1;
57 buffer
= NewRawDoFmt("In %s, size %lu", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->func
, ctx
->size
) - 1;
61 buffer
= NewRawDoFmt("\nCorrupted MemChunk 0x%p (next 0x%p, size %lu)", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->mc
, ctx
->mc
->mc_Next
, ctx
->mc
->mc_Bytes
) - 1;
64 buffer
= NewRawDoFmt("\nPrevious MemChunk 0x%p (next 0x%p, size %lu)", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->mcPrev
, ctx
->mcPrev
->mc_Next
, ctx
->mcPrev
->mc_Bytes
) - 1;
67 /* Print MemHeader details */
68 buffer
= NewRawDoFmt("\nMemHeader 0x%p (0x%p - 0x%p)", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->mh
, ctx
->mh
->mh_Lower
, ctx
->mh
->mh_Upper
) - 1;
69 if ((IPTR
)ctx
->mh
->mh_First
& (MEMCHUNK_TOTAL
- 1))
70 buffer
= NewRawDoFmt("\n- Unaligned first chunk address (0x%p)", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->mh
->mh_First
) - 1;
72 if (ctx
->mh
->mh_Free
& (MEMCHUNK_TOTAL
- 1))
73 buffer
= NewRawDoFmt("\n- Unaligned free space count (0x%p)", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->mh
->mh_Free
) - 1;
75 if (ctx
->mh
->mh_First
)
77 if ((APTR
)ctx
->mh
->mh_First
< ctx
->mh
->mh_Lower
)
78 buffer
= NewRawDoFmt("\n- First chunk (0x%p) below lower address", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->mh
->mh_First
) - 1;
80 if (((APTR
)ctx
->mh
->mh_First
+ ctx
->mh
->mh_Free
> ctx
->mh
->mh_Upper
))
81 buffer
= NewRawDoFmt("\n- Free space count too large (%lu, first chunk 0x%xp)", (VOID_FUNC
)RAWFMTFUNC_STRING
, buffer
, ctx
->mh
->mh_Free
, ctx
->mh
->mh_First
) - 1;
87 #ifdef NO_CONSISTENCY_CHECKS
89 #define validateHeader(mh, op, addr, size, SysBase) TRUE
90 #define validateChunk(mc, prev, mh, op, addr, size, SysBase) TRUE
94 static ULONG memAlerts
[] =
96 AT_DeadEnd
|AN_MemoryInsane
, /* MM_ALLOC */
97 AT_DeadEnd
|AN_MemCorrupt
, /* MM_FREE */
98 AN_FreeTwice
/* MM_OVERLAP */
102 * MemHeader validation routine. Rules are:
104 * 1. Both mh_First and mh_Free must be MEMCHUNK_TOTAL-aligned.
105 * 2. Free space (if present) must completely fit in between mh_Lower and mh_Upper.
106 * We intentionally don't check header's own location. We assume that in future we'll
107 * be able to put MEMF_CHIP headers inside MEMF_FAST memory, for speed up.
109 static BOOL
validateHeader(struct MemHeader
*mh
, UBYTE op
, APTR addr
, IPTR size
, struct TraceLocation
*tp
, struct ExecBase
*SysBase
)
111 if (((IPTR
)mh
->mh_First
& (MEMCHUNK_TOTAL
- 1)) || (mh
->mh_Free
& (MEMCHUNK_TOTAL
- 1)) || /* 1 */
113 (((APTR
)mh
->mh_First
< mh
->mh_Lower
) || ((APTR
)mh
->mh_First
+ mh
->mh_Free
> mh
->mh_Upper
)))) /* 2 */
117 /* TraceLocation is not supplied by PrepareExecBase(). Fail silently. */
118 struct MMContext alertData
;
122 alertData
.mcPrev
= NULL
;
123 alertData
.func
= tp
->function
;
124 alertData
.addr
= addr
;
125 alertData
.size
= size
;
128 Exec_ExtAlert(memAlerts
[op
], tp
->caller
, tp
->stack
, AT_MEMORY
, &alertData
, SysBase
);
132 * Theoretically during very early boot we can fail to post an alert (no KernelBase yet).
133 * In this case we return with fault indication.
141 * MemChunk consistency check. Rules are:
143 * 1. Both mc_Next and mc_Bytes must me MEMCHUNK_TOTAL-aligned, and mc_Bytes can not be zero.
144 * 2. End of this chunk must not be greater than mh->mh_Upper
145 * 3. mc_Next (if present) must point in between end of this chunk and mh->mh_Upper - MEMCHUNK_TOTAL.
146 * There must be at least MEMHCUNK_TOTAL allocated bytes between free chunks.
148 * This function is inlined for speed improvements.
150 static inline BOOL
validateChunk(struct MemChunk
*p2
, struct MemChunk
*p1
, struct MemHeader
*mh
,
151 UBYTE op
, APTR addr
, IPTR size
,
152 struct TraceLocation
*tp
, struct ExecBase
*SysBase
)
154 if (((IPTR
)p2
->mc_Next
& (MEMCHUNK_TOTAL
-1)) || (p2
->mc_Bytes
== 0) || (p2
->mc_Bytes
& (MEMCHUNK_TOTAL
-1)) || /* 1 */
155 ((APTR
)p2
+ p2
->mc_Bytes
> mh
->mh_Upper
) || /* 2 */
156 (p2
->mc_Next
&& (((APTR
)p2
->mc_Next
< (APTR
)p2
+ p2
->mc_Bytes
+ MEMCHUNK_TOTAL
) || /* 3 */
157 ((APTR
)p2
->mc_Next
> mh
->mh_Upper
- MEMCHUNK_TOTAL
))))
161 struct MMContext alertData
;
165 alertData
.mcPrev
= (p1
== (struct MemChunk
*)&mh
->mh_First
) ? NULL
: p1
;
166 alertData
.func
= tp
->function
;
167 alertData
.addr
= addr
;
168 alertData
.size
= size
;
171 Exec_ExtAlert(memAlerts
[op
], tp
->caller
, tp
->stack
, AT_MEMORY
, &alertData
, SysBase
);
182 * Allocate block from the given MemHeader in a specific way.
183 * This routine can be called with SysBase = NULL.
185 APTR
stdAlloc(struct MemHeader
*mh
, IPTR size
, ULONG requirements
, struct TraceLocation
*tp
, struct ExecBase
*SysBase
)
187 /* First round byteSize up to a multiple of MEMCHUNK_TOTAL */
188 IPTR byteSize
= AROS_ROUNDUP2(size
, MEMCHUNK_TOTAL
);
189 struct MemChunk
*mc
=NULL
, *p1
, *p2
;
191 /* Validate MemHeader before doing anything. */
192 if (!validateHeader(mh
, MM_ALLOC
, NULL
, size
, tp
, SysBase
))
196 * The free memory list is only single linked, i.e. to remove
197 * elements from the list I need the node's predecessor. For the
198 * first element I can use mh->mh_First instead of a real predecessor.
200 p1
= (struct MemChunk
*)&mh
->mh_First
;
204 * Follow the memory list. p1 is the previous MemChunk, p2 is the current one.
205 * On 1st pass p1 points to mh->mh_First, so that changing p1->mc_Next actually
206 * changes mh->mh_First.
210 /* Validate the current chunk */
211 if (!validateChunk(p2
, p1
, mh
, MM_ALLOC
, NULL
, size
, tp
, SysBase
))
214 /* Check if the current block is large enough */
215 if (p2
->mc_Bytes
>=byteSize
)
220 /* Use this one if MEMF_REVERSE is not set.*/
221 if (!(requirements
& MEMF_REVERSE
))
223 /* Else continue - there may be more to come. */
226 /* Go to next block */
231 /* Something found? */
234 /* Remember: if MEMF_REVERSE is set p1 and p2 are now invalid. */
238 /* Remove the block from the list and return it. */
239 if (p2
->mc_Bytes
== byteSize
)
241 /* Fits exactly. Just relink the list. */
242 p1
->mc_Next
= p2
->mc_Next
;
247 if (requirements
& MEMF_REVERSE
)
249 /* Return the last bytes. */
251 mc
= (struct MemChunk
*)((UBYTE
*)p2
+p2
->mc_Bytes
-byteSize
);
255 /* Return the first bytes. */
256 p1
->mc_Next
=(struct MemChunk
*)((UBYTE
*)p2
+byteSize
);
261 p1
->mc_Next
= p2
->mc_Next
;
262 p1
->mc_Bytes
= p2
->mc_Bytes
-byteSize
;
265 mh
->mh_Free
-= byteSize
;
267 /* Clear the block if requested */
268 if (requirements
& MEMF_CLEAR
)
269 memset(mc
, 0, byteSize
);
275 /* Free 'size' bytes starting at 'addr' belonging to MemHeader 'freeList' */
276 void stdDealloc(struct MemHeader
*freeList
, APTR addr
, IPTR size
, struct TraceLocation
*tp
, struct ExecBase
*SysBase
)
280 struct MemChunk
*p1
, *p2
, *p3
;
283 /* Make sure the MemHeader is OK */
284 if (!validateHeader(freeList
, MM_FREE
, addr
, size
, tp
, SysBase
))
287 /* Align size to the requirements */
288 byteSize
= size
+ ((IPTR
)addr
& (MEMCHUNK_TOTAL
- 1));
289 byteSize
= (byteSize
+ MEMCHUNK_TOTAL
-1) & ~(MEMCHUNK_TOTAL
- 1);
291 /* Align the block as well */
292 memoryBlock
= (APTR
)((IPTR
)addr
& ~(MEMCHUNK_TOTAL
-1));
295 The free memory list is only single linked, i.e. to insert
296 elements into the list I need the node as well as its
297 predecessor. For the first element I can use freeList->mh_First
298 instead of a real predecessor.
300 p1
= (struct MemChunk
*)&freeList
->mh_First
;
301 p2
= freeList
->mh_First
;
303 /* Start and end(+1) of the block */
304 p3
= (struct MemChunk
*)memoryBlock
;
305 p4
= (UBYTE
*)p3
+ byteSize
;
307 /* No chunk in list? Just insert the current one and return. */
310 p3
->mc_Bytes
= byteSize
;
313 freeList
->mh_Free
+= byteSize
;
317 /* Follow the list to find a place where to insert our memory. */
320 if (!validateChunk(p2
, p1
, freeList
, MM_FREE
, addr
, size
, tp
, SysBase
))
323 /* Found a block with a higher address? */
326 #if !defined(NO_CONSISTENCY_CHECKS)
328 If the memory to be freed overlaps with the current
329 block something must be wrong.
333 bug("[MM] Chunk allocator error\n");
334 bug("[MM] Attempt to free %u bytes at 0x%p from MemHeader 0x%p\n", byteSize
, memoryBlock
, freeList
);
335 bug("[MM] Block overlaps (1) with chunk 0x%p (%u bytes)\n", p2
, p2
->mc_Bytes
);
341 /* End the loop with p2 non-zero */
344 /* goto next block */
348 /* If the loop ends with p2 zero add it at the end. */
349 } while (p2
!= NULL
);
351 /* If there was a previous block merge with it. */
352 if (p1
!= (struct MemChunk
*)&freeList
->mh_First
)
354 #if !defined(NO_CONSISTENCY_CHECKS)
355 /* Check if they overlap. */
356 if ((UBYTE
*)p1
+ p1
->mc_Bytes
> (UBYTE
*)p3
)
358 bug("[MM] Chunk allocator error\n");
359 bug("[MM] Attempt to free %u bytes at 0x%p from MemHeader 0x%p\n", byteSize
, memoryBlock
, freeList
);
360 bug("[MM] Block overlaps (2) with chunk 0x%p (%u bytes)\n", p1
, p1
->mc_Bytes
);
366 /* Merge if possible */
367 if ((UBYTE
*)p1
+ p1
->mc_Bytes
== (UBYTE
*)p3
)
370 /* Not possible to merge */
374 There was no previous block. Just insert the memory at
375 the start of the list.
379 /* Try to merge with next block (if there is one ;-) ). */
380 if (p4
== (UBYTE
*)p2
&& p2
!= NULL
)
383 Overlap checking already done. Doing it here after
384 the list potentially changed would be a bad idea.
389 /* relink the list and return. */
391 p3
->mc_Bytes
= p4
- (UBYTE
*)p3
;
392 freeList
->mh_Free
+= byteSize
;
397 * During transition period four routines below use nommu allocator.
398 * When transition is complete they should use them only if MMU
399 * is inactive. Otherwise they should use KrnAllocPages()/KrnFreePages().
402 /* Non-mungwalled AllocAbs(). Does not destroy sideways regions. */
403 APTR
InternalAllocAbs(APTR location
, IPTR byteSize
, struct ExecBase
*SysBase
)
405 return nommu_AllocAbs(location
, byteSize
, SysBase
);
409 * Use this if you want to free region allocated by InternalAllocAbs().
410 * Otherwise you hit mungwall problem (FreeMem() expects header).
412 void InternalFreeMem(APTR location
, IPTR byteSize
, struct TraceLocation
*loc
, struct ExecBase
*SysBase
)
414 nommu_FreeMem(location
, byteSize
, loc
, SysBase
);
418 * Allocate a region managed by own header. Usable size is reduced by size
421 APTR
AllocMemHeader(IPTR size
, ULONG flags
, struct TraceLocation
*loc
, struct ExecBase
*SysBase
)
423 struct MemHeader
*mh
;
425 mh
= nommu_AllocMem(size
, flags
, loc
, SysBase
);
426 DMH(bug("[AllocMemHeader] Allocated %u bytes at 0x%p\n", size
, mh
));
430 struct MemHeader
*orig
= FindMem(mh
, SysBase
);
432 size
-= MEMHEADER_TOTAL
;
435 * Initialize new MemHeader.
436 * Inherit attributes from system MemHeader from which
437 * our chunk was allocated.
439 mh
->mh_Node
.ln_Type
= NT_MEMORY
;
440 mh
->mh_Node
.ln_Pri
= orig
->mh_Node
.ln_Pri
;
441 mh
->mh_Attributes
= orig
->mh_Attributes
;
442 mh
->mh_Lower
= (APTR
)mh
+ MEMHEADER_TOTAL
;
443 mh
->mh_Upper
= mh
->mh_Lower
+ size
;
444 mh
->mh_First
= mh
->mh_Lower
;
447 /* Create the first (and the only) MemChunk */
448 mh
->mh_First
->mc_Next
= NULL
;
449 mh
->mh_First
->mc_Bytes
= size
;
454 /* Free a region allocated by AllocMemHeader() */
455 void FreeMemHeader(APTR addr
, struct TraceLocation
*loc
, struct ExecBase
*SysBase
)
457 ULONG size
= ((struct MemHeader
*)addr
)->mh_Upper
- addr
;
459 DMH(bug("[FreeMemHeader] Freeing %u bytes at 0x%p\n", size
, addr
));
460 nommu_FreeMem(addr
, size
, loc
, SysBase
);
464 * This is our own Enqueue() version. Currently the only differece is that
465 * we insert our node before the first node with LOWER OR EQUAL priority,
466 * so that for nodes with equal priority it will be LIFO, not FIFO queue.
467 * This speeds up the allocator.
468 * TODO: implement secondary sorting by mh_Free. This will allow to
469 * implement best-match algorithm (so that puddles with smaller free space
470 * will be picked up first). This way the smallest allocations will reuse
471 * smallest chunks instead of fragmenting large ones.
473 static void EnqueueMemHeader(struct MinList
*list
, struct MemHeader
*mh
)
475 struct MemHeader
*next
;
477 /* Look through the list */
478 ForeachNode (list
, next
)
481 Look for the first MemHeader with a lower or equal pri as the node
482 we have to insert into the list.
484 if (mh
->mh_Node
.ln_Pri
>= next
->mh_Node
.ln_Pri
)
488 /* Insert the node before next */
489 mh
->mh_Node
.ln_Pred
= next
->mh_Node
.ln_Pred
;
490 mh
->mh_Node
.ln_Succ
= &next
->mh_Node
;
491 next
->mh_Node
.ln_Pred
->ln_Succ
= &mh
->mh_Node
;
492 next
->mh_Node
.ln_Pred
= &mh
->mh_Node
;
496 * Allocate memory with given physical properties from the given pool.
497 * Our pools can be mixed. This means that different puddles from the
498 * pool can have different physical flags. For example the same pool
499 * can contain puddles from both CHIP and FAST memory. This is done in
500 * order to provide a single system default pool for all types of memory.
502 APTR
InternalAllocPooled(APTR poolHeader
, IPTR memSize
, ULONG flags
, struct TraceLocation
*loc
, struct ExecBase
*SysBase
)
504 struct ProtectedPool
*pool
= poolHeader
+ MEMHEADER_TOTAL
;
507 struct MemHeader
*mh
;
509 D(bug("[exec] InternalAllocPooled(0x%p, %u, 0x%08X), header 0x%p\n", poolHeader
, memSize
, flags
, pool
));
512 * Memory blocks allocated from the pool store pointers to the MemHeader they were
513 * allocated from. This is done in order to avoid slow lookups in InternalFreePooled().
514 * This is done in AllocVec()-alike manner; the pointer is placed right before the block.
516 memSize
+= sizeof(struct MemHeader
*);
519 /* If mungwall is enabled, count also size of walls */
520 if (PrivExecBase(SysBase
)->IntFlags
& EXECF_MungWall
)
521 memSize
+= MUNGWALL_TOTAL_SIZE
;
523 if (pool
->pool
.Requirements
& MEMF_SEM_PROTECTED
)
525 ObtainSemaphore(&pool
->sem
);
528 /* Follow the list of MemHeaders */
529 mh
= (struct MemHeader
*)pool
->pool
.PuddleList
.mlh_Head
;
532 ULONG physFlags
= flags
& MEMF_PHYSICAL_MASK
;
534 /* Are there no more MemHeaders? */
535 if (mh
->mh_Node
.ln_Succ
== NULL
)
539 * Usually we allocate puddles of default size, specified during
540 * pool creation. However we can be asked to allocate block whose
541 * size will be larger than default puddle size.
542 * Previously this was handled by threshSize parameter. In our new
543 * implementation we just allocate enlarged puddle. This is done
544 * in order not to waste page tails beyond the allocated large block.
545 * These tails will be used for our pool too. Their size is smaller
546 * than page size but they still perfectly fit for small allocations
547 * (the primary use for pools).
548 * Since our large block is also a puddle, it will be reused for our
549 * pool when the block is freed. It can also be reused for another
550 * large allocation, if it fits in.
551 * Our final puddle size still includes MEMHEADER_TOTAL in any case.
553 IPTR puddleSize
= pool
->pool
.PuddleSize
;
555 if (memSize
> puddleSize
- MEMHEADER_TOTAL
)
557 IPTR align
= PrivExecBase(SysBase
)->PageSize
- 1;
559 puddleSize
= memSize
+ MEMHEADER_TOTAL
;
560 /* Align the size up to page boundary */
561 puddleSize
= (puddleSize
+ align
) & ~align
;
564 mh
= AllocMemHeader(puddleSize
, flags
, loc
, SysBase
);
565 D(bug("[InternalAllocPooled] Allocated new puddle 0x%p, size %u\n", mh
, puddleSize
));
567 /* No memory left? */
571 /* Add the new puddle to our pool */
572 mh
->mh_Node
.ln_Name
= (STRPTR
)pool
;
573 Enqueue((struct List
*)&pool
->pool
.PuddleList
, &mh
->mh_Node
);
575 /* Fall through to get the memory */
579 /* Ignore existing MemHeaders with memory type that differ from the requested ones */
580 if (physFlags
& ~mh
->mh_Attributes
)
582 D(bug("[InternalAllocPooled] Wrong flags for puddle 0x%p (wanted 0x%08X, have 0x%08X\n", flags
, mh
->mh_Attributes
));
584 mh
= (struct MemHeader
*)mh
->mh_Node
.ln_Succ
;
589 /* Try to get the memory */
590 ret
= stdAlloc(mh
, memSize
, flags
, loc
, SysBase
);
591 D(bug("[InternalAllocPooled] Allocated memory at 0x%p from puddle 0x%p\n", ret
, mh
));
597 * If this is not the first MemHeader and it has some free space,
598 * move it forward (so that the next allocation will attempt to use it first).
599 * IMPORTANT: We use modification of Enqueue() because we still sort MemHeaders
600 * according to their priority (which they inherit from system MemHeaders).
601 * This allows us to have mixed pools (e.g. with both CHIP and FAST regions). This
602 * will be needed in future for memory protection.
604 if (mh
->mh_Node
.ln_Pred
!= NULL
&& mh
->mh_Free
> 32)
606 D(bug("[InternalAllocPooled] Re-sorting puddle list\n"));
607 Remove(&mh
->mh_Node
);
608 EnqueueMemHeader(&pool
->pool
.PuddleList
, mh
);
614 /* No. Try next MemHeader */
615 mh
= (struct MemHeader
*)mh
->mh_Node
.ln_Succ
;
618 if (pool
->pool
.Requirements
& MEMF_SEM_PROTECTED
)
620 ReleaseSemaphore(&pool
->sem
);
625 /* Build munge walls if requested */
626 ret
= MungWall_Build(ret
, pool
, origSize
, flags
, loc
, SysBase
);
628 /* Remember where we were allocated from */
629 *((struct MemHeader
**)ret
) = mh
;
630 ret
+= sizeof(struct MemHeader
*);
633 /* Everything fine */
638 * This is a pair to InternalAllocPooled()
639 * This code separated from FreePooled() in order to provide compatibility with various
640 * memory tracking patches. If some exec code calls InternalAllocPooled() directly
641 * (AllocMem() will do it), it has to call also InternalFreePooled() directly.
642 * Our chunks remember from which pool they came, so we don't need a pointer to pool
643 * header here. This will save us from headaches in future FreeMem() implementation.
645 void InternalFreePooled(APTR memory
, IPTR memSize
, struct TraceLocation
*loc
, struct ExecBase
*SysBase
)
647 struct MemHeader
*mh
;
651 D(bug("[exec] InternalFreePooled(0x%p, %u)\n", memory
, memSize
));
653 if (!memory
|| !memSize
) return;
655 /* Get MemHeader pointer. It is stored right before our block. */
656 freeStart
= memory
- sizeof(struct MemHeader
*);
657 freeSize
= memSize
+ sizeof(struct MemHeader
*);
658 mh
= *((struct MemHeader
**)freeStart
);
660 /* Check walls first */
661 freeStart
= MungWall_Check(freeStart
, freeSize
, loc
, SysBase
);
662 if (PrivExecBase(SysBase
)->IntFlags
& EXECF_MungWall
)
663 freeSize
+= MUNGWALL_TOTAL_SIZE
;
665 /* Verify that MemHeader pointer is correct */
666 if ((mh
->mh_Node
.ln_Type
!= NT_MEMORY
) ||
667 (freeStart
< mh
->mh_Lower
) || (freeStart
+ freeSize
> mh
->mh_Upper
))
670 * Something is wrong.
671 * TODO: the following should actually be printed as part of the alert.
672 * In future there should be some kind of "alert context". CPU alerts
673 * (like illegal access) should remember CPU context there. Memory manager
674 * alerts (like this one) should remember some own information.
676 bug("[MM] Pool manager error\n");
677 bug("[MM] Attempt to free %u bytes at 0x%p\n", memSize
, memory
);
678 bug("[MM] The chunk does not belong to a pool\n");
680 Alert(AN_BadFreeAddr
);
684 struct ProtectedPool
*pool
= (struct ProtectedPool
*)mh
->mh_Node
.ln_Name
;
687 if (pool
->pool
.Requirements
& MEMF_SEM_PROTECTED
)
689 ObtainSemaphore(&pool
->sem
);
692 size
= mh
->mh_Upper
- mh
->mh_Lower
;
693 D(bug("[FreePooled] Allocated from puddle 0x%p, size %u\n", mh
, size
));
695 /* Free the memory. */
696 stdDealloc(mh
, freeStart
, freeSize
, loc
, SysBase
);
697 D(bug("[FreePooled] Deallocated chunk, %u free bytes in the puddle\n", mh
->mh_Free
));
699 /* Is this MemHeader completely free now? */
700 if (mh
->mh_Free
== size
)
702 D(bug("[FreePooled] Puddle is empty, giving back to the system\n"));
704 /* Yes. Remove it from the list. */
705 Remove(&mh
->mh_Node
);
707 FreeMemHeader(mh
, loc
, SysBase
);
711 if (pool
->pool
.Requirements
& MEMF_SEM_PROTECTED
)
713 ReleaseSemaphore(&pool
->sem
);
718 ULONG
checkMemHandlers(struct checkMemHandlersState
*cmhs
, struct ExecBase
*SysBase
)
721 struct Interrupt
*lmh
;
723 if (cmhs
->cmhs_Data
.memh_RequestFlags
& MEMF_NO_EXPUNGE
)
724 return MEM_DID_NOTHING
;
726 /* In order to keep things clean, we must run in a single thread */
727 ObtainSemaphore(&PrivExecBase(SysBase
)->LowMemSem
);
730 * Loop over low memory handlers. Handlers can remove
731 * themselves from the list while being invoked, thus
732 * we need to be careful!
734 for (lmh
= (struct Interrupt
*)cmhs
->cmhs_CurNode
;
735 (tmp
= lmh
->is_Node
.ln_Succ
);
736 lmh
= (struct Interrupt
*)(cmhs
->cmhs_CurNode
= tmp
))
740 ret
= AROS_UFC3 (LONG
, lmh
->is_Code
,
741 AROS_UFCA(struct MemHandlerData
*, &cmhs
->cmhs_Data
, A0
),
742 AROS_UFCA(APTR
, lmh
->is_Data
, A1
),
743 AROS_UFCA(struct ExecBase
*, SysBase
, A6
)
746 if (ret
== MEM_TRY_AGAIN
)
748 /* MemHandler said he did something. Try again. */
749 /* Is there any program that depends on this flag??? */
750 cmhs
->cmhs_Data
.memh_Flags
|= MEMHF_RECYCLE
;
752 ReleaseSemaphore(&PrivExecBase(SysBase
)->LowMemSem
);
753 return MEM_TRY_AGAIN
;
756 /* Nothing more to expect from this handler. */
757 cmhs
->cmhs_Data
.memh_Flags
&= ~MEMHF_RECYCLE
;
760 ReleaseSemaphore(&PrivExecBase(SysBase
)->LowMemSem
);
761 return MEM_DID_NOTHING
;