Fix IO memory access .. SB128 driver makes noises in VMWare - CMI is untested (Curren...
[AROS.git] / rom / exec / memory.c
blob259305264a928177ce86a3f2a41b49e440545c75
1 #include <aros/debug.h>
2 #include <proto/kernel.h>
4 #include "exec_intern.h"
5 #include "memory.h"
6 #include "mungwall.h"
8 #define DMH(x)
10 /* Find MemHeader to which address belongs */
11 struct MemHeader *FindMem(APTR address, struct ExecBase *SysBase)
13 struct MemHeader *mh;
15 /* Nobody should change the memory list now. */
16 MEM_LOCK_SHARED;
18 /* Follow the list of MemHeaders */
19 mh = (struct MemHeader *)SysBase->MemList.lh_Head;
21 while(mh->mh_Node.ln_Succ != NULL)
23 /* Check if this MemHeader fits */
24 if(address >= mh->mh_Lower && address < mh->mh_Upper)
26 /* Yes. Return it. */
27 MEM_UNLOCK;
28 return mh;
31 /* Go to next MemHeader */
32 mh = (struct MemHeader *)mh->mh_Node.ln_Succ;
35 MEM_UNLOCK;
36 return NULL;
40 * Allocate block from the given MemHeader in a specific way.
41 * This routine can be called with SysBase = NULL.
43 APTR stdAlloc(struct MemHeader *mh, IPTR byteSize, ULONG requirements, struct ExecBase *SysBase)
45 struct MemChunk *mc=NULL, *p1, *p2;
47 /* First round byteSize to a multiple of MEMCHUNK_TOTAL */
48 byteSize = AROS_ROUNDUP2(byteSize, MEMCHUNK_TOTAL);
51 * The free memory list is only single linked, i.e. to remove
52 * elements from the list I need node's predessor. For the
53 * first element I can use mh->mh_First instead of a real predessor.
55 p1 = (struct MemChunk *)&mh->mh_First;
56 p2 = p1->mc_Next;
58 /* Follow the memory list */
59 while (p2 != NULL)
61 /* p1 is the previous MemChunk, p2 is the current one */
62 #if !defined(NO_CONSISTENCY_CHECKS)
64 * Memory list consistency checks.
65 * 1. Check alignment restrictions
67 if (((IPTR)p2|(IPTR)p2->mc_Bytes) & (MEMCHUNK_TOTAL-1))
69 if (SysBase && SysBase->DebugAROSBase)
71 bug("[MM] Chunk allocator error\n");
72 bug("[MM] Attempt to allocate %lu bytes from MemHeader 0x%p\n", byteSize, mh);
73 bug("[MM] Misaligned chunk at 0x%p (%u bytes)\n", p2, p2->mc_Bytes);
75 Alert(AN_MemoryInsane|AT_DeadEnd);
77 return NULL;
80 /* 2. Check against overlapping blocks */
81 if (p2->mc_Next && ((UBYTE *)p2 + p2->mc_Bytes >= (UBYTE *)p2->mc_Next))
83 if (SysBase && SysBase->DebugAROSBase)
85 bug("[MM] Chunk allocator error\n");
86 bug("[MM] Attempt to allocate %lu bytes from MemHeader 0x%p\n", byteSize, mh);
87 bug("[MM] Overlapping chunks 0x%p (%u bytes) and 0x%p (%u bytes)\n", p2, p2->mc_Bytes, p2->mc_Next, p2->mc_Next->mc_Bytes);
89 Alert(AN_MemoryInsane|AT_DeadEnd);
91 return NULL;
93 #endif
95 /* Check if the current block is large enough */
96 if (p2->mc_Bytes>=byteSize)
98 /* It is. */
99 mc = p1;
101 /* Use this one if MEMF_REVERSE is not set.*/
102 if (!(requirements & MEMF_REVERSE))
103 break;
104 /* Else continue - there may be more to come. */
107 /* Go to next block */
108 p1 = p2;
109 p2 = p1->mc_Next;
112 /* Something found? */
113 if (mc != NULL)
115 /* Remember: if MEMF_REVERSE is set p1 and p2 are now invalid. */
116 p1 = mc;
117 p2 = p1->mc_Next;
119 /* Remove the block from the list and return it. */
120 if (p2->mc_Bytes == byteSize)
122 /* Fits exactly. Just relink the list. */
123 p1->mc_Next = p2->mc_Next;
124 mc = p2;
126 else
128 if (requirements & MEMF_REVERSE)
130 /* Return the last bytes. */
131 p1->mc_Next=p2;
132 mc = (struct MemChunk *)((UBYTE *)p2+p2->mc_Bytes-byteSize);
134 else
136 /* Return the first bytes. */
137 p1->mc_Next=(struct MemChunk *)((UBYTE *)p2+byteSize);
138 mc=p2;
141 p1 = p1->mc_Next;
142 p1->mc_Next = p2->mc_Next;
143 p1->mc_Bytes = p2->mc_Bytes-byteSize;
146 mh->mh_Free -= byteSize;
148 /* Clear the block if requested */
149 if (requirements & MEMF_CLEAR)
150 memset(mc, 0, byteSize);
153 return mc;
156 /* Free 'byteSize' bytes starting at 'memoryBlock' belonging to MemHeader 'freeList' */
157 void stdDealloc(struct MemHeader *freeList, APTR memoryBlock, IPTR byteSize, struct ExecBase *SysBase)
159 struct MemChunk *p1, *p2, *p3;
160 UBYTE *p4;
162 /* Align size to the requirements */
163 byteSize+=(IPTR)memoryBlock&(MEMCHUNK_TOTAL-1);
164 byteSize=(byteSize+MEMCHUNK_TOTAL-1)&~(MEMCHUNK_TOTAL-1);
166 /* Align the block as well */
167 memoryBlock=(APTR)((IPTR)memoryBlock&~(MEMCHUNK_TOTAL-1));
170 The free memory list is only single linked, i.e. to insert
171 elements into the list I need the node as well as it's
172 predessor. For the first element I can use freeList->mh_First
173 instead of a real predessor.
175 p1=(struct MemChunk *)&freeList->mh_First;
176 p2=freeList->mh_First;
178 /* Start and end(+1) of the block */
179 p3=(struct MemChunk *)memoryBlock;
180 p4=(UBYTE *)p3+byteSize;
182 /* No chunk in list? Just insert the current one and return. */
183 if(p2==NULL)
185 p3->mc_Bytes=byteSize;
186 p3->mc_Next=NULL;
187 p1->mc_Next=p3;
188 freeList->mh_Free+=byteSize;
189 return;
192 /* Follow the list to find a place where to insert our memory. */
195 #if !defined(NO_CONSISTENCY_CHECKS)
197 * Do some constistency checks:
198 * 1. All MemChunks must be aligned to MEMCHUNK_TOTAL.
200 if (((IPTR)p2|(IPTR)p2->mc_Bytes) & (MEMCHUNK_TOTAL-1))
202 bug("[MM] Chunk allocator error\n");
203 bug("[MM] Attempt to free %u bytes at 0x%p from MemHeader 0x%p\n", byteSize, memoryBlock, freeList);
204 bug("[MM] Misaligned chunk at 0x%p (%u bytes)\n", p2, p2->mc_Bytes);
206 Alert(AN_MemCorrupt|AT_DeadEnd);
210 * 2. The end (+1) of the current MemChunk
211 * must be lower than the start of the next one.
213 if (p2->mc_Next && ((UBYTE *)p2 + p2->mc_Bytes >= (UBYTE *)p2->mc_Next))
215 bug("[MM] Chunk allocator error\n");
216 bug("[MM] Attempt to free %lu bytes at 0x%p from MemHeader 0x%p\n", byteSize, memoryBlock, freeList);
217 bug("[MM] Overlapping chunks 0x%p (%u bytes) and 0x%p (%u bytes)\n", p2, p2->mc_Bytes, p2->mc_Next, p2->mc_Next->mc_Bytes);
219 Alert(AN_MemCorrupt|AT_DeadEnd);
221 #endif
222 /* Found a block with a higher address? */
223 if (p2 >= p3)
225 #if !defined(NO_CONSISTENCY_CHECKS)
227 If the memory to be freed overlaps with the current
228 block something must be wrong.
230 if (p4>(UBYTE *)p2)
232 bug("[MM] Chunk allocator error\n");
233 bug("[MM] Attempt to free %u bytes at 0x%p from MemHeader 0x%p\n", byteSize, memoryBlock, freeList);
234 bug("[MM] Block overlaps with chunk 0x%p (%u bytes)\n", p2, p2->mc_Bytes);
236 Alert(AN_FreeTwice);
237 return;
239 #endif
240 /* End the loop with p2 non-zero */
241 break;
243 /* goto next block */
244 p1=p2;
245 p2=p2->mc_Next;
247 /* If the loop ends with p2 zero add it at the end. */
248 }while(p2!=NULL);
250 /* If there was a previous block merge with it. */
251 if(p1!=(struct MemChunk *)&freeList->mh_First)
253 #if !defined(NO_CONSISTENCY_CHECKS)
254 /* Check if they overlap. */
255 if ((UBYTE *)p1+p1->mc_Bytes>(UBYTE *)p3)
257 bug("[MM] Chunk allocator error\n");
258 bug("[MM] Attempt to free %u bytes at 0x%p from MemHeader 0x%p\n", byteSize, memoryBlock, freeList);
259 bug("[MM] Block overlaps with chunk 0x%p (%u bytes)\n", p1, p1->mc_Bytes);
261 Alert(AN_FreeTwice);
262 return;
264 #endif
265 /* Merge if possible */
266 if((UBYTE *)p1+p1->mc_Bytes==(UBYTE *)p3)
267 p3=p1;
268 else
269 /* Not possible to merge */
270 p1->mc_Next=p3;
271 }else
273 There was no previous block. Just insert the memory at
274 the start of the list.
276 p1->mc_Next=p3;
278 /* Try to merge with next block (if there is one ;-) ). */
279 if(p4==(UBYTE *)p2&&p2!=NULL)
282 Overlap checking already done. Doing it here after
283 the list potentially changed would be a bad idea.
285 p4+=p2->mc_Bytes;
286 p2=p2->mc_Next;
288 /* relink the list and return. */
289 p3->mc_Next=p2;
290 p3->mc_Bytes=p4-(UBYTE *)p3;
291 freeList->mh_Free+=byteSize;
295 * TODO:
296 * During transition period four routines below use nommu allocator.
297 * When transition is complete they should use them only if MMU
298 * is inactive. Otherwise they should use KrnAllocPages()/KrnFreePages().
301 /* Non-mungwalled AllocAbs(). Does not destroy sideways regions. */
302 APTR InternalAllocAbs(APTR location, IPTR byteSize, struct ExecBase *SysBase)
304 return nommu_AllocAbs(location, byteSize, SysBase);
308 * Use this if you want to free region allocated by InternalAllocAbs().
309 * Otherwise you hit mungwall problem (FreeMem() expects header).
311 void InternalFreeMem(APTR location, IPTR byteSize, struct ExecBase *SysBase)
313 nommu_FreeMem(location, byteSize, SysBase);
316 /* Allocate a region managed by own header */
317 APTR AllocMemHeader(IPTR size, ULONG flags, struct ExecBase *SysBase)
319 struct MemHeader *mh;
321 mh = nommu_AllocMem(size, flags, SysBase);
322 DMH(bug("[AllocMemHeader] Allocated %u bytes at 0x%p\n", size, mh));
324 if (mh)
326 struct MemHeader *orig = FindMem(mh, SysBase);
328 size -= MEMHEADER_TOTAL;
331 * Initialize new MemHeader.
332 * Inherit attributes from system MemHeader from which
333 * our chunk was allocated.
335 mh->mh_Node.ln_Type = NT_MEMORY;
336 mh->mh_Node.ln_Pri = orig->mh_Node.ln_Pri;
337 mh->mh_Attributes = orig->mh_Attributes;
338 mh->mh_Lower = (APTR)mh + MEMHEADER_TOTAL;
339 mh->mh_Upper = mh->mh_Lower + size;
340 mh->mh_First = mh->mh_Lower;
341 mh->mh_Free = size;
343 /* Create the first (and the only) MemChunk */
344 mh->mh_First->mc_Next = NULL;
345 mh->mh_First->mc_Bytes = size;
347 return mh;
350 /* Free a region allocated by AllocMemHeader() */
351 void FreeMemHeader(APTR addr, struct ExecBase *SysBase)
353 ULONG size = ((struct MemHeader *)addr)->mh_Upper - addr;
355 DMH(bug("[FreeMemHeader] Freeing %u bytes at 0x%p\n", size, addr));
356 nommu_FreeMem(addr, size, SysBase);
360 * This is our own Enqueue() version. Currently the only differece is that
361 * we insert our node before the first node with LOWER OR EQUAL priority,
362 * so that for nodes with equal priority it will be LIFO, not FIFO queue.
363 * This speeds up the allocator.
364 * TODO: implement secondary sorting by mh_Free. This will allow to
365 * implement best-match algorithm (so that puddles with smaller free space
366 * will be picked up first). This way the smallest allocations will reuse
367 * smallest chunks instead of fragmenting large ones.
369 static void EnqueueMemHeader(struct MinList *list, struct MemHeader *mh)
371 struct MemHeader *next;
373 /* Look through the list */
374 ForeachNode (list, next)
377 Look for the first MemHeader with a lower or equal pri as the node
378 we have to insert into the list.
380 if (mh->mh_Node.ln_Pri >= next->mh_Node.ln_Pri)
381 break;
384 /* Insert the node before next */
385 mh->mh_Node.ln_Pred = next->mh_Node.ln_Pred;
386 mh->mh_Node.ln_Succ = &next->mh_Node;
387 next->mh_Node.ln_Pred->ln_Succ = &mh->mh_Node;
388 next->mh_Node.ln_Pred = &mh->mh_Node;
392 * Allocate memory with given physical properties from the given pool.
393 * Our pools can be mixed. This means that different puddles from the
394 * pool can have different physical flags. For example the same pool
395 * can contain puddles from both CHIP and FAST memory. This is done in
396 * order to provide a single system default pool for all types of memory.
398 APTR InternalAllocPooled(APTR poolHeader, IPTR memSize, ULONG flags, const char *function, APTR caller, struct ExecBase *SysBase)
400 struct ProtectedPool *pool = poolHeader + MEMHEADER_TOTAL;
401 APTR ret = NULL;
402 IPTR origSize;
403 struct MemHeader *mh;
405 D(bug("[exec] InternalAllocPooled(0x%p, %u, 0x%08X), header 0x%p\n", poolHeader, memSize, flags, pool));
408 * Memory blocks allocated from the pool store pointers to the MemHeader they were
409 * allocated from. This is done in order to avoid slow lookups in InternalFreePooled().
410 * This is done in AllocVec()-alike manner, the pointer is placed right before the block.
412 memSize += sizeof(struct MemHeader *);
413 origSize = memSize;
415 /* If mungwall is enabled, count also size of walls */
416 if (PrivExecBase(SysBase)->IntFlags & EXECF_MungWall)
417 memSize += MUNGWALL_TOTAL_SIZE;
419 if (pool->pool.Requirements & MEMF_SEM_PROTECTED)
421 ObtainSemaphore(&pool->sem);
424 /* Follow the list of MemHeaders */
425 mh = (struct MemHeader *)pool->pool.PuddleList.mlh_Head;
426 for(;;)
428 ULONG physFlags = flags & MEMF_PHYSICAL_MASK;
430 /* Are there no more MemHeaders? */
431 if (mh->mh_Node.ln_Succ==NULL)
434 * Get a new one.
435 * Usually we allocate puddles of default size, specified during
436 * pool creation. However we can be asked to allocate block whose
437 * size will be larger than default puddle size.
438 * Previously this was handled by threshSize parameter. In our new
439 * implementation we just allocate enlarged puddle. This is done
440 * in order not to waste page tails beyond the allocated large block.
441 * These tails will be used for our pool too. Their size is smaller
442 * than page size but they still perfectly fit for small allocations
443 * (the primary use for pools).
444 * Since our large block is also a puddle, it will be reused for our
445 * pool when the block is freed. It can also be reused for another
446 * large allocation, if it fits in.
447 * Our final puddle size still includes MEMHEADER_TOTAL in any case.
449 IPTR puddleSize = pool->pool.PuddleSize;
451 if (memSize > puddleSize - MEMHEADER_TOTAL)
453 IPTR align = PrivExecBase(SysBase)->PageSize - 1;
455 puddleSize = memSize + MEMHEADER_TOTAL;
456 /* Align the size up to page boundary */
457 puddleSize = (puddleSize + align) & ~align;
460 mh = AllocMemHeader(puddleSize, flags, SysBase);
461 D(bug("[InternalAllocPooled] Allocated new puddle 0x%p, size %u\n", mh, puddleSize));
463 /* No memory left? */
464 if(mh == NULL)
465 break;
467 /* Add the new puddle to our pool */
468 mh->mh_Node.ln_Name = (STRPTR)pool;
469 Enqueue((struct List *)&pool->pool.PuddleList, &mh->mh_Node);
471 /* Fall through to get the memory */
473 else
475 /* Ignore existing MemHeaders with memory type that differ from the requested ones */
476 if (physFlags & ~mh->mh_Attributes)
478 D(bug("[InternalAllocPooled] Wrong flags for puddle 0x%p (wanted 0x%08X, have 0x%08X\n", flags, mh->mh_Attributes));
480 mh = (struct MemHeader *)mh->mh_Node.ln_Succ;
481 continue;
485 /* Try to get the memory */
486 ret = stdAlloc(mh, memSize, flags, SysBase);
487 D(bug("[InternalAllocPooled] Allocated memory at 0x%p from puddle 0x%p\n", ret, mh));
489 /* Got it? */
490 if (ret != NULL)
493 * If this is not the first MemHeader and it has some free space,
494 * move it forward (so that the next allocation will attempt to use it first).
495 * IMPORTANT: We use modification of Enqueue() because we still sort MemHeaders
496 * according to their priority (which they inherit from system MemHeaders).
497 * This allows us to have mixed pools (e. g. with both CHIP and FAST regions). This
498 * will be needed in future for memory protection.
500 if (mh->mh_Node.ln_Pred != NULL && mh->mh_Free > 32)
502 D(bug("[InternalAllocPooled] Re-sorting puddle list\n"));
503 Remove(&mh->mh_Node);
504 EnqueueMemHeader(&pool->pool.PuddleList, mh);
507 break;
510 /* No. Try next MemHeader */
511 mh = (struct MemHeader *)mh->mh_Node.ln_Succ;
514 if (pool->pool.Requirements & MEMF_SEM_PROTECTED)
516 ReleaseSemaphore(&pool->sem);
519 if (ret)
521 /* Build munge walls if requested */
522 ret = MungWall_Build(ret, pool, origSize, flags, function, caller, SysBase);
524 /* Remember where we were allocated from */
525 *((struct MemHeader **)ret) = mh;
526 ret += sizeof(struct MemHeader *);
529 /* Everything fine */
530 return ret;
534 * This is a pair to InternalAllocPooled()
535 * This code separated from FreePooled() in order to provide compatibility with various
536 * memory tracking patches. If some exec code calls InternalAllocPooled() directly
537 * (AllocMem() will do it), it has to call also InternalFreePooled() directly.
538 * Our chunks remember from which pool they came, so we don't need a pointer to pool
539 * header here. This will save us from headaches in future FreeMem() implementation.
541 void InternalFreePooled(APTR memory, IPTR memSize, const char *function, APTR caller, APTR stack, struct ExecBase *SysBase)
543 struct MemHeader *mh;
544 APTR freeStart;
545 IPTR freeSize;
547 D(bug("[exec] InternalFreePooled(0x%p, %u)\n", memory, memSize));
549 if (!memory || !memSize) return;
551 /* Get MemHeader pointer. It is stored right before our block. */
552 freeStart = memory - sizeof(struct MemHeader *);
553 freeSize = memSize + sizeof(struct MemHeader *);
554 mh = *((struct MemHeader **)freeStart);
556 /* Check walls first */
557 freeStart = MungWall_Check(freeStart, freeSize, function, caller, stack, SysBase);
558 if (PrivExecBase(SysBase)->IntFlags & EXECF_MungWall)
559 freeSize += MUNGWALL_TOTAL_SIZE;
561 /* Verify that MemHeader pointer is correct */
562 if ((mh->mh_Node.ln_Type != NT_MEMORY) ||
563 (freeStart < mh->mh_Lower) || (freeStart + freeSize > mh->mh_Upper))
566 * Something is wrong.
567 * TODO: the following should actually be printed as part of the alert.
568 * In future there should be some kind of "alert context". CPU alerts
569 * (like illegal access) should remember CPU context there. Memory manager
570 * alerts (like this one) should remember some own information.
572 bug("[MM] Pool manager error\n");
573 bug("[MM] Attempt to free %u bytes at 0x%p\n", memSize, memory);
574 bug("[MM] The chunk does not belong to a pool\n");
576 Alert(AN_BadFreeAddr);
578 else
580 struct ProtectedPool *pool = (struct ProtectedPool *)mh->mh_Node.ln_Name;
581 IPTR size;
583 if (pool->pool.Requirements & MEMF_SEM_PROTECTED)
585 ObtainSemaphore(&pool->sem);
588 size = mh->mh_Upper - mh->mh_Lower;
589 D(bug("[FreePooled] Allocated from puddle 0x%p, size %u\n", mh, size));
591 /* Free the memory. */
592 stdDealloc(mh, freeStart, freeSize, SysBase);
593 D(bug("[FreePooled] Deallocated chunk, %u free bytes in the puddle\n", mh->mh_Free));
595 /* Is this MemHeader completely free now? */
596 if (mh->mh_Free == size)
598 D(bug("[FreePooled] Puddle is empty, giving back to the system\n"));
600 /* Yes. Remove it from the list. */
601 Remove(&mh->mh_Node);
602 /* And free it. */
603 FreeMemHeader(mh, SysBase);
605 /* All done. */
607 if (pool->pool.Requirements & MEMF_SEM_PROTECTED)
609 ReleaseSemaphore(&pool->sem);
614 ULONG checkMemHandlers(struct checkMemHandlersState *cmhs, struct ExecBase *SysBase)
616 struct Node *tmp;
617 struct Interrupt *lmh;
619 if (cmhs->cmhs_Data.memh_RequestFlags & MEMF_NO_EXPUNGE)
620 return MEM_DID_NOTHING;
622 /* In order to keep things clean, we must run in a single thread */
623 ObtainSemaphore(&PrivExecBase(SysBase)->LowMemSem);
626 * Loop over low memory handlers. Handlers can remove
627 * themselves from the list while being invoked, thus
628 * we need to be careful!
630 for (lmh = (struct Interrupt *)cmhs->cmhs_CurNode;
631 (tmp = lmh->is_Node.ln_Succ);
632 lmh = (struct Interrupt *)(cmhs->cmhs_CurNode = tmp))
634 ULONG ret;
636 ret = AROS_UFC3 (LONG, lmh->is_Code,
637 AROS_UFCA(struct MemHandlerData *, &cmhs->cmhs_Data, A0),
638 AROS_UFCA(APTR, lmh->is_Data, A1),
639 AROS_UFCA(struct ExecBase *, SysBase, A6)
642 if (ret == MEM_TRY_AGAIN)
644 /* MemHandler said he did something. Try again. */
645 /* Is there any program that depends on this flag??? */
646 cmhs->cmhs_Data.memh_Flags |= MEMHF_RECYCLE;
648 ReleaseSemaphore(&PrivExecBase(SysBase)->LowMemSem);
649 return MEM_TRY_AGAIN;
651 else
652 /* Nothing more to expect from this handler. */
653 cmhs->cmhs_Data.memh_Flags &= ~MEMHF_RECYCLE;
656 ReleaseSemaphore(&PrivExecBase(SysBase)->LowMemSem);
657 return MEM_DID_NOTHING;