Enforce libdir. On OpenSUSE the libs where stored in "lib64". They weren't
[AROS.git] / rom / exec / memory_nommu.c
blob8a0f8c447690261fbb76899b7a9c0ae308e72b39
1 /*
2 Copyright � 1995-2011, The AROS Development Team. All rights reserved.
3 $Id$
5 Desc: System memory allocator for MMU-less systems.
6 Used also as boot-time memory allocator on systems with MMU.
7 Lang: english
8 */
10 #include <aros/debug.h>
11 #include <exec/execbase.h>
12 #include <exec/memory.h>
13 #include <exec/memheaderext.h>
14 #include <proto/exec.h>
16 #include <string.h>
18 #include "exec_intern.h"
19 #include "exec_util.h"
20 #include "memory.h"
22 APTR nommu_AllocMem(IPTR byteSize, ULONG flags, struct TraceLocation *loc, struct ExecBase *SysBase)
24 APTR res = NULL;
25 struct MemHeader *mh;
26 ULONG requirements = flags & MEMF_PHYSICAL_MASK;
28 /* Protect memory list against other tasks */
29 MEM_LOCK;
31 /* Loop over MemHeader structures */
32 ForeachNode(&SysBase->MemList, mh)
35 * Check for the right requirements and enough free memory.
36 * The requirements are OK if there's no bit in the
37 * 'attributes' that isn't set in the 'mh->mh_Attributes'.
39 if ((requirements & ~mh->mh_Attributes)
40 || mh->mh_Free < byteSize)
41 continue;
43 if (IsManagedMem(mh))
45 struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh;
47 if (mhe->mhe_Alloc)
48 res = mhe->mhe_Alloc(mhe, byteSize, &flags);
50 else
52 res = stdAlloc(mh, mhac_GetSysCtx(mh, SysBase), byteSize, flags, loc, SysBase);
54 if (res)
55 break;
58 MEM_UNLOCK;
60 return res;
63 APTR nommu_AllocAbs(APTR location, IPTR byteSize, struct ExecBase *SysBase)
65 struct MemHeader *mh;
66 APTR ret = NULL;
67 APTR endlocation = location + byteSize;
69 /* Protect the memory list from access by other tasks. */
70 MEM_LOCK;
72 /* Loop over MemHeader structures */
73 ForeachNode(&SysBase->MemList, mh)
75 if (IsManagedMem(mh))
77 struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh;
78 if (mhe->mhe_InBounds(mhe, location, endlocation))
80 if (mhe->mhe_AllocAbs)
82 void * ret = mhe->mhe_AllocAbs(mhe, byteSize, location);
84 MEM_UNLOCK;
86 return ret;
90 else
91 if (mh->mh_Lower <= location && mh->mh_Upper >= endlocation)
92 break;
95 /* If no header was found which matched the requirements, just give up. */
96 if (mh->mh_Node.ln_Succ)
98 struct MemChunk *p1, *p2, *p3, *p4;
100 /* Align size to the requirements */
101 byteSize += (IPTR)location&(MEMCHUNK_TOTAL - 1);
102 byteSize = (byteSize + MEMCHUNK_TOTAL-1) & ~(MEMCHUNK_TOTAL-1);
104 /* Align the location as well */
105 location=(APTR)((IPTR)location & ~(MEMCHUNK_TOTAL-1));
107 /* Start and end(+1) of the block */
108 p3=(struct MemChunk *)location;
109 p4=(struct MemChunk *)((UBYTE *)p3+byteSize);
112 The free memory list is only single linked, i.e. to remove
113 elements from the list I need the node's predessor. For the
114 first element I can use freeList->mh_First instead of a real
115 predecessor.
117 p1 = (struct MemChunk *)&mh->mh_First;
118 p2 = p1->mc_Next;
120 /* Follow the list to find a chunk with our memory. */
121 while (p2 != NULL)
123 #if !defined(NO_CONSISTENCY_CHECKS)
125 * Memory list consistency checks.
126 * 1. Check alignment restrictions
128 if (((IPTR)p2|(IPTR)p2->mc_Bytes) & (MEMCHUNK_TOTAL-1))
130 if (SysBase && SysBase->DebugAROSBase)
132 bug("[MM] Chunk allocator error\n");
133 bug("[MM] Attempt to allocate %lu bytes at 0x%p from MemHeader 0x%p\n", byteSize, location, mh);
134 bug("[MM] Misaligned chunk at 0x%p (%u bytes)\n", p2, p2->mc_Bytes);
136 Alert(AN_MemoryInsane|AT_DeadEnd);
138 break;
141 /* 2. Check against overlapping blocks */
142 if (p2->mc_Next && ((UBYTE *)p2 + p2->mc_Bytes >= (UBYTE *)p2->mc_Next))
144 if (SysBase && SysBase->DebugAROSBase)
146 bug("[MM] Chunk allocator error\n");
147 bug("[MM] Attempt to allocate %lu bytes at 0x%p from MemHeader 0x%p\n", byteSize, location, mh);
148 bug("[MM] Overlapping chunks 0x%p (%u bytes) and 0x%p (%u bytes)\n", p2, p2->mc_Bytes, p2->mc_Next, p2->mc_Next->mc_Bytes);
150 Alert(AN_MemoryInsane|AT_DeadEnd);
152 break;
154 #endif
156 /* Found a chunk that fits? */
157 if((UBYTE *)p2+p2->mc_Bytes>=(UBYTE *)p4&&p2<=p3)
159 /* Since AllocAbs allocations never allocate/update a ctx, they need to clear it if it exists */
160 mhac_ClearSysCtx(mh, SysBase);
162 /* Check if there's memory left at the end. */
163 if((UBYTE *)p2+p2->mc_Bytes!=(UBYTE *)p4)
165 /* Yes. Add it to the list */
166 p4->mc_Next = p2->mc_Next;
167 p4->mc_Bytes = (UBYTE *)p2+p2->mc_Bytes-(UBYTE *)p4;
168 p2->mc_Next = p4;
171 /* Check if there's memory left at the start. */
172 if(p2!=p3)
173 /* Yes. Adjust the size */
174 p2->mc_Bytes=(UBYTE *)p3-(UBYTE *)p2;
175 else
176 /* No. Skip the old chunk */
177 p1->mc_Next=p2->mc_Next;
179 /* Adjust free memory count */
180 mh->mh_Free-=byteSize;
182 /* Return the memory */
183 ret = p3;
184 break;
186 /* goto next chunk */
188 p1=p2;
189 p2=p2->mc_Next;
193 MEM_UNLOCK;
195 return ret;
198 void nommu_FreeMem(APTR memoryBlock, IPTR byteSize, struct TraceLocation *loc, struct ExecBase *SysBase)
200 struct MemHeader *mh;
201 APTR blockEnd;
203 /* It is legal to free zero bytes */
204 if (!byteSize)
205 return;
207 blockEnd = memoryBlock + byteSize;
209 /* Protect the memory list from access by other tasks. */
210 MEM_LOCK;
212 ForeachNode(&SysBase->MemList, mh)
214 if (IsManagedMem(mh))
216 struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh;
218 /* Test if the memory belongs to this MemHeader. */
219 if (!mhe->mhe_InBounds(mhe, memoryBlock, blockEnd))
220 continue;
222 if (mhe->mhe_Free)
223 mhe->mhe_Free(mhe, memoryBlock, byteSize);
226 else
228 /* Test if the memory belongs to this MemHeader. */
229 if (mh->mh_Lower > memoryBlock || mh->mh_Upper < blockEnd)
230 continue;
232 stdDealloc(mh, mhac_GetSysCtx(mh, SysBase), memoryBlock, byteSize, loc, SysBase);
235 MEM_UNLOCK;
236 ReturnVoid ("nommu_FreeMem");
239 MEM_UNLOCK;
241 #if !defined(NO_CONSISTENCY_CHECKS)
242 /* Some memory that didn't fit into any MemHeader? */
243 bug("[MM] Chunk allocator error\n");
244 bug("[MM] Attempt to free %u bytes at 0x%p\n", byteSize, memoryBlock);
245 bug("[MM] The block does not belong to any MemHeader\n");
247 Alert(AN_BadFreeAddr);
248 #endif
250 ReturnVoid ("nommu_FreeMem");
253 IPTR nommu_AvailMem(ULONG attributes, struct ExecBase *SysBase)
255 IPTR ret = 0;
256 struct MemHeader *mh;
257 ULONG physFlags = attributes & MEMF_PHYSICAL_MASK;
259 D(bug("[MM] nommu_AvailMem(0x%08X)\n", attributes));
260 D(bug("[MM] physical memory flags: 0x%08X\n", physFlags));
262 /* Nobody else should access the memory lists now. */
263 MEM_LOCK_SHARED;
265 ForeachNode(&SysBase->MemList, mh)
267 D(bug("[MM] Checking MemHeader 0x%p\n", mh));
270 * The current memheader is OK if there's no bit in the
271 * 'physFlags' that isn't set in the 'mh->mh_Attributes'.
273 if (physFlags & ~mh->mh_Attributes)
275 D(bug("[MM] Skipping (mh_Attributes = 0x%08X\n", mh->mh_Attributes));
276 continue;
279 if (IsManagedMem(mh))
281 struct MemHeaderExt *mhe = (struct MemHeaderExt *)mh;
283 if (mhe->mhe_Avail)
285 IPTR val = mhe->mhe_Avail(mhe, attributes);
287 if (attributes & MEMF_LARGEST)
289 if (val > ret)
290 ret = val;
292 else
293 ret += val;
295 continue;
299 /* Find largest chunk? */
300 if (attributes & MEMF_LARGEST)
303 * Yes. Follow the list of MemChunks and set 'ret' to
304 * each value that is bigger than all previous ones.
306 struct MemChunk *mc;
308 for (mc = mh->mh_First; mc; mc = mc->mc_Next)
310 #if !defined(NO_CONSISTENCY_CHECKS)
312 * Do some constistency checks:
313 * 1. All MemChunks must be aligned to MEMCHUNK_TOTAL.
315 if (((IPTR)mc | mc->mc_Bytes) & (MEMCHUNK_TOTAL-1))
317 bug("[MM] Chunk allocator error in MemHeader 0x%p\n", mh);
318 bug("[MM] Misaligned chunk at 0x%p (%u bytes)\n", mc, mc->mc_Bytes);
320 Alert(AN_MemoryInsane|AT_DeadEnd);
322 /* 2. The end (+1) of the current MemChunk must be lower than the start of the next one. */
323 if (mc->mc_Next && ((UBYTE *)mc + mc->mc_Bytes >= (UBYTE *)mc->mc_Next))
325 bug("[MM] Chunk allocator error in MemHeader 0x%p\n");
326 bug("[MM] Overlapping chunks 0x%p (%u bytes) and 0x%p (%u bytes)\n", mc, mc->mc_Bytes, mc->mc_Next, mc->mc_Next->mc_Bytes);
328 Alert(AN_MemoryInsane|AT_DeadEnd);
330 #endif
331 if (mc->mc_Bytes>ret)
332 ret=mc->mc_Bytes;
335 else if (attributes & MEMF_TOTAL)
336 /* Determine total size. */
337 ret += (IPTR)mh->mh_Upper - (IPTR)mh->mh_Lower;
338 else
339 /* Sum up free memory. */
340 ret += mh->mh_Free;
343 /* All done */
344 MEM_UNLOCK;
346 return ret;