If MEMF_REVERSE is specified, it is documented the memlist should be traversed in...
[AROS.git] / rom / exec / memory_nommu.c
blob3fcdc0261c2ba6ffa4372fd41df59a7f806713bb
1 /*
2 Copyright � 1995-2011, The AROS Development Team. All rights reserved.
3 $Id$
5 Desc: System memory allocator for MMU-less systems.
6 Used also as boot-time memory allocator on systems with MMU.
7 Lang: english
8 */
10 #include <aros/debug.h>
11 #include <exec/execbase.h>
12 #include <exec/memory.h>
13 #include <exec/memheaderext.h>
14 #include <proto/exec.h>
16 #include <string.h>
18 #include "exec_intern.h"
19 #include "exec_util.h"
20 #include "memory.h"
22 APTR nommu_AllocMem(IPTR byteSize, ULONG flags, struct TraceLocation *loc, struct ExecBase *SysBase)
24 APTR res = NULL;
25 struct MemHeader *mh, *mhn;
26 ULONG requirements = flags & MEMF_PHYSICAL_MASK;
28 /* Protect memory list against other tasks */
29 MEM_LOCK;
31 if (flags & MEMF_REVERSE)
32 mhn = GetTail(&SysBase->MemList);
33 else
34 mhn = GetHead(&SysBase->MemList);
36 /* Loop over MemHeader structures */
37 while (mhn)
39 mh = mhn;
41 if (flags & MEMF_REVERSE)
42 mhn = (((struct Node *)(mh))->ln_Pred);
43 else
44 mhn = (((struct Node *)(mh))->ln_Succ);
47 * Check for the right requirements and enough free memory.
48 * The requirements are OK if there's no bit in the
49 * 'attributes' that isn't set in the 'mh->mh_Attributes'.
51 if ((requirements & ~mh->mh_Attributes)
52 || mh->mh_Free < byteSize)
53 continue;
55 if (IsManagedMem(mh))
57 struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh;
59 if (mhe->mhe_Alloc)
60 res = mhe->mhe_Alloc(mhe, byteSize, &flags);
62 else
64 res = stdAlloc(mh, mhac_GetSysCtx(mh, SysBase), byteSize, flags, loc, SysBase);
66 if (res)
67 break;
70 MEM_UNLOCK;
72 return res;
75 APTR nommu_AllocAbs(APTR location, IPTR byteSize, struct ExecBase *SysBase)
77 struct MemHeader *mh;
78 APTR ret = NULL;
79 APTR endlocation = location + byteSize;
81 /* Protect the memory list from access by other tasks. */
82 MEM_LOCK;
84 /* Loop over MemHeader structures */
85 ForeachNode(&SysBase->MemList, mh)
87 if (IsManagedMem(mh))
89 struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh;
90 if (mhe->mhe_InBounds(mhe, location, endlocation))
92 if (mhe->mhe_AllocAbs)
94 void * ret = mhe->mhe_AllocAbs(mhe, byteSize, location);
96 MEM_UNLOCK;
98 return ret;
102 else
103 if (mh->mh_Lower <= location && mh->mh_Upper >= endlocation)
104 break;
107 /* If no header was found which matched the requirements, just give up. */
108 if (mh->mh_Node.ln_Succ)
110 struct MemChunk *p1, *p2, *p3, *p4;
112 /* Align size to the requirements */
113 byteSize += (IPTR)location&(MEMCHUNK_TOTAL - 1);
114 byteSize = (byteSize + MEMCHUNK_TOTAL-1) & ~(MEMCHUNK_TOTAL-1);
116 /* Align the location as well */
117 location=(APTR)((IPTR)location & ~(MEMCHUNK_TOTAL-1));
119 /* Start and end(+1) of the block */
120 p3=(struct MemChunk *)location;
121 p4=(struct MemChunk *)((UBYTE *)p3+byteSize);
124 The free memory list is only single linked, i.e. to remove
125 elements from the list I need the node's predessor. For the
126 first element I can use freeList->mh_First instead of a real
127 predecessor.
129 p1 = (struct MemChunk *)&mh->mh_First;
130 p2 = p1->mc_Next;
132 /* Follow the list to find a chunk with our memory. */
133 while (p2 != NULL)
135 #if !defined(NO_CONSISTENCY_CHECKS)
137 * Memory list consistency checks.
138 * 1. Check alignment restrictions
140 if (((IPTR)p2|(IPTR)p2->mc_Bytes) & (MEMCHUNK_TOTAL-1))
142 if (SysBase && SysBase->DebugAROSBase)
144 bug("[MM] Chunk allocator error\n");
145 bug("[MM] Attempt to allocate %lu bytes at 0x%p from MemHeader 0x%p\n", byteSize, location, mh);
146 bug("[MM] Misaligned chunk at 0x%p (%u bytes)\n", p2, p2->mc_Bytes);
148 Alert(AN_MemoryInsane|AT_DeadEnd);
150 break;
153 /* 2. Check against overlapping blocks */
154 if (p2->mc_Next && ((UBYTE *)p2 + p2->mc_Bytes >= (UBYTE *)p2->mc_Next))
156 if (SysBase && SysBase->DebugAROSBase)
158 bug("[MM] Chunk allocator error\n");
159 bug("[MM] Attempt to allocate %lu bytes at 0x%p from MemHeader 0x%p\n", byteSize, location, mh);
160 bug("[MM] Overlapping chunks 0x%p (%u bytes) and 0x%p (%u bytes)\n", p2, p2->mc_Bytes, p2->mc_Next, p2->mc_Next->mc_Bytes);
162 Alert(AN_MemoryInsane|AT_DeadEnd);
164 break;
166 #endif
168 /* Found a chunk that fits? */
169 if((UBYTE *)p2+p2->mc_Bytes>=(UBYTE *)p4&&p2<=p3)
171 /* Since AllocAbs allocations never allocate/update a ctx, they need to clear it if it exists */
172 mhac_ClearSysCtx(mh, SysBase);
174 /* Check if there's memory left at the end. */
175 if((UBYTE *)p2+p2->mc_Bytes!=(UBYTE *)p4)
177 /* Yes. Add it to the list */
178 p4->mc_Next = p2->mc_Next;
179 p4->mc_Bytes = (UBYTE *)p2+p2->mc_Bytes-(UBYTE *)p4;
180 p2->mc_Next = p4;
183 /* Check if there's memory left at the start. */
184 if(p2!=p3)
185 /* Yes. Adjust the size */
186 p2->mc_Bytes=(UBYTE *)p3-(UBYTE *)p2;
187 else
188 /* No. Skip the old chunk */
189 p1->mc_Next=p2->mc_Next;
191 /* Adjust free memory count */
192 mh->mh_Free-=byteSize;
194 /* Return the memory */
195 ret = p3;
196 break;
198 /* goto next chunk */
200 p1=p2;
201 p2=p2->mc_Next;
205 MEM_UNLOCK;
207 return ret;
210 void nommu_FreeMem(APTR memoryBlock, IPTR byteSize, struct TraceLocation *loc, struct ExecBase *SysBase)
212 struct MemHeader *mh;
213 APTR blockEnd;
215 /* It is legal to free zero bytes */
216 if (!byteSize)
217 return;
219 blockEnd = memoryBlock + byteSize;
221 /* Protect the memory list from access by other tasks. */
222 MEM_LOCK;
224 ForeachNode(&SysBase->MemList, mh)
226 if (IsManagedMem(mh))
228 struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh;
230 /* Test if the memory belongs to this MemHeader. */
231 if (!mhe->mhe_InBounds(mhe, memoryBlock, blockEnd))
232 continue;
234 if (mhe->mhe_Free)
235 mhe->mhe_Free(mhe, memoryBlock, byteSize);
238 else
240 /* Test if the memory belongs to this MemHeader. */
241 if (mh->mh_Lower > memoryBlock || mh->mh_Upper < blockEnd)
242 continue;
244 stdDealloc(mh, mhac_GetSysCtx(mh, SysBase), memoryBlock, byteSize, loc, SysBase);
247 MEM_UNLOCK;
248 ReturnVoid ("nommu_FreeMem");
251 MEM_UNLOCK;
253 #if !defined(NO_CONSISTENCY_CHECKS)
254 /* Some memory that didn't fit into any MemHeader? */
255 bug("[MM] Chunk allocator error\n");
256 bug("[MM] Attempt to free %u bytes at 0x%p\n", byteSize, memoryBlock);
257 bug("[MM] The block does not belong to any MemHeader\n");
259 Alert(AN_BadFreeAddr);
260 #endif
262 ReturnVoid ("nommu_FreeMem");
265 IPTR nommu_AvailMem(ULONG attributes, struct ExecBase *SysBase)
267 IPTR ret = 0;
268 struct MemHeader *mh;
269 ULONG physFlags = attributes & MEMF_PHYSICAL_MASK;
271 D(bug("[MM] nommu_AvailMem(0x%08X)\n", attributes));
272 D(bug("[MM] physical memory flags: 0x%08X\n", physFlags));
274 /* Nobody else should access the memory lists now. */
275 MEM_LOCK_SHARED;
277 ForeachNode(&SysBase->MemList, mh)
279 D(bug("[MM] Checking MemHeader 0x%p\n", mh));
282 * The current memheader is OK if there's no bit in the
283 * 'physFlags' that isn't set in the 'mh->mh_Attributes'.
285 if (physFlags & ~mh->mh_Attributes)
287 D(bug("[MM] Skipping (mh_Attributes = 0x%08X\n", mh->mh_Attributes));
288 continue;
291 if (IsManagedMem(mh))
293 struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh;
295 if (mhe->mhe_Avail)
297 IPTR val = mhe->mhe_Avail(mhe, attributes);
299 if (attributes & MEMF_LARGEST)
301 if (val > ret)
302 ret = val;
304 else
305 ret += val;
307 continue;
311 /* Find largest chunk? */
312 if (attributes & MEMF_LARGEST)
315 * Yes. Follow the list of MemChunks and set 'ret' to
316 * each value that is bigger than all previous ones.
318 struct MemChunk *mc;
320 for (mc = mh->mh_First; mc; mc = mc->mc_Next)
322 #if !defined(NO_CONSISTENCY_CHECKS)
324 * Do some constistency checks:
325 * 1. All MemChunks must be aligned to MEMCHUNK_TOTAL.
327 if (((IPTR)mc | mc->mc_Bytes) & (MEMCHUNK_TOTAL-1))
329 bug("[MM] Chunk allocator error in MemHeader 0x%p\n", mh);
330 bug("[MM] Misaligned chunk at 0x%p (%u bytes)\n", mc, mc->mc_Bytes);
332 Alert(AN_MemoryInsane|AT_DeadEnd);
334 /* 2. The end (+1) of the current MemChunk must be lower than the start of the next one. */
335 if (mc->mc_Next && ((UBYTE *)mc + mc->mc_Bytes >= (UBYTE *)mc->mc_Next))
337 bug("[MM] Chunk allocator error in MemHeader 0x%p\n");
338 bug("[MM] Overlapping chunks 0x%p (%u bytes) and 0x%p (%u bytes)\n", mc, mc->mc_Bytes, mc->mc_Next, mc->mc_Next->mc_Bytes);
340 Alert(AN_MemoryInsane|AT_DeadEnd);
342 #endif
343 if (mc->mc_Bytes>ret)
344 ret=mc->mc_Bytes;
347 else if (attributes & MEMF_TOTAL)
348 /* Determine total size. */
349 ret += (IPTR)mh->mh_Upper - (IPTR)mh->mh_Lower;
350 else
351 /* Sum up free memory. */
352 ret += mh->mh_Free;
355 /* All done */
356 MEM_UNLOCK;
358 return ret;