Register nameservers dynamically instead of writing them to disk. It is
[AROS.git] / rom / exec / memory_nommu.c
blob2714d0de276e8aa52b9bc92baa06f64d956c31c8
1 /*
2 Copyright © 1995-2011, The AROS Development Team. All rights reserved.
3 $Id$
5 Desc: System memory allocator for MMU-less systems.
6 Used also as boot-time memory allocator on systems with MMU.
7 Lang: english
8 */
10 #include <aros/debug.h>
11 #include <exec/execbase.h>
12 #include <exec/memory.h>
13 #include <proto/exec.h>
15 #include <string.h>
17 #include "exec_intern.h"
18 #include "exec_util.h"
19 #include "memory.h"
21 APTR nommu_AllocMem(IPTR byteSize, ULONG flags, struct TraceLocation *loc, struct ExecBase *SysBase)
23 APTR res = NULL;
24 struct MemHeader *mh;
25 ULONG requirements = flags & MEMF_PHYSICAL_MASK;
27 /* Protect memory list against other tasks */
28 MEM_LOCK;
30 /* Loop over MemHeader structures */
31 ForeachNode(&SysBase->MemList, mh)
34 * Check for the right requirements and enough free memory.
35 * The requirements are OK if there's no bit in the
36 * 'attributes' that isn't set in the 'mh->mh_Attributes'.
38 if ((requirements & ~mh->mh_Attributes)
39 || mh->mh_Free < byteSize)
40 continue;
42 res = stdAlloc(mh, byteSize, flags, loc, SysBase);
43 if (res)
44 break;
47 MEM_UNLOCK;
49 return res;
52 APTR nommu_AllocAbs(APTR location, IPTR byteSize, struct ExecBase *SysBase)
54 struct MemHeader *mh;
55 APTR ret = NULL;
56 APTR endlocation = location + byteSize;
58 /* Protect the memory list from access by other tasks. */
59 MEM_LOCK;
61 /* Loop over MemHeader structures */
62 ForeachNode(&SysBase->MemList, mh)
64 if (mh->mh_Lower <= location && mh->mh_Upper >= endlocation)
65 break;
68 /* If no header was found which matched the requirements, just give up. */
69 if (mh->mh_Node.ln_Succ)
71 struct MemChunk *p1, *p2, *p3, *p4;
73 /* Align size to the requirements */
74 byteSize += (IPTR)location&(MEMCHUNK_TOTAL - 1);
75 byteSize = (byteSize + MEMCHUNK_TOTAL-1) & ~(MEMCHUNK_TOTAL-1);
77 /* Align the location as well */
78 location=(APTR)((IPTR)location & ~(MEMCHUNK_TOTAL-1));
80 /* Start and end(+1) of the block */
81 p3=(struct MemChunk *)location;
82 p4=(struct MemChunk *)((UBYTE *)p3+byteSize);
85 The free memory list is only single linked, i.e. to remove
86 elements from the list I need the node's predessor. For the
87 first element I can use freeList->mh_First instead of a real
88 predecessor.
90 p1 = (struct MemChunk *)&mh->mh_First;
91 p2 = p1->mc_Next;
93 /* Follow the list to find a chunk with our memory. */
94 while (p2 != NULL)
96 #if !defined(NO_CONSISTENCY_CHECKS)
98 * Memory list consistency checks.
99 * 1. Check alignment restrictions
101 if (((IPTR)p2|(IPTR)p2->mc_Bytes) & (MEMCHUNK_TOTAL-1))
103 if (SysBase && SysBase->DebugAROSBase)
105 bug("[MM] Chunk allocator error\n");
106 bug("[MM] Attempt to allocate %lu bytes at 0x%p from MemHeader 0x%p\n", byteSize, location, mh);
107 bug("[MM] Misaligned chunk at 0x%p (%u bytes)\n", p2, p2->mc_Bytes);
109 Alert(AN_MemoryInsane|AT_DeadEnd);
111 break;
114 /* 2. Check against overlapping blocks */
115 if (p2->mc_Next && ((UBYTE *)p2 + p2->mc_Bytes >= (UBYTE *)p2->mc_Next))
117 if (SysBase && SysBase->DebugAROSBase)
119 bug("[MM] Chunk allocator error\n");
120 bug("[MM] Attempt to allocate %lu bytes at 0x%p from MemHeader 0x%p\n", byteSize, location, mh);
121 bug("[MM] Overlapping chunks 0x%p (%u bytes) and 0x%p (%u bytes)\n", p2, p2->mc_Bytes, p2->mc_Next, p2->mc_Next->mc_Bytes);
123 Alert(AN_MemoryInsane|AT_DeadEnd);
125 break;
127 #endif
129 /* Found a chunk that fits? */
130 if((UBYTE *)p2+p2->mc_Bytes>=(UBYTE *)p4&&p2<=p3)
132 /* Check if there's memory left at the end. */
133 if((UBYTE *)p2+p2->mc_Bytes!=(UBYTE *)p4)
135 /* Yes. Add it to the list */
136 p4->mc_Next = p2->mc_Next;
137 p4->mc_Bytes = (UBYTE *)p2+p2->mc_Bytes-(UBYTE *)p4;
138 p2->mc_Next = p4;
141 /* Check if there's memory left at the start. */
142 if(p2!=p3)
143 /* Yes. Adjust the size */
144 p2->mc_Bytes=(UBYTE *)p3-(UBYTE *)p2;
145 else
146 /* No. Skip the old chunk */
147 p1->mc_Next=p2->mc_Next;
149 /* Adjust free memory count */
150 mh->mh_Free-=byteSize;
152 /* Return the memory */
153 ret = p3;
154 break;
156 /* goto next chunk */
158 p1=p2;
159 p2=p2->mc_Next;
163 MEM_UNLOCK;
165 return ret;
168 void nommu_FreeMem(APTR memoryBlock, IPTR byteSize, struct TraceLocation *loc, struct ExecBase *SysBase)
170 struct MemHeader *mh;
171 APTR blockEnd;
173 /* It is legal to free zero bytes */
174 if (!byteSize)
175 return;
177 blockEnd = memoryBlock + byteSize;
179 /* Protect the memory list from access by other tasks. */
180 MEM_LOCK;
182 ForeachNode(&SysBase->MemList, mh)
184 /* Test if the memory belongs to this MemHeader. */
185 if (mh->mh_Lower > memoryBlock || mh->mh_Upper < blockEnd)
186 continue;
188 stdDealloc(mh, memoryBlock, byteSize, loc, SysBase);
190 MEM_UNLOCK;
191 ReturnVoid ("nommu_FreeMem");
194 MEM_UNLOCK;
196 #if !defined(NO_CONSISTENCY_CHECKS)
197 /* Some memory that didn't fit into any MemHeader? */
198 bug("[MM] Chunk allocator error\n");
199 bug("[MM] Attempt to free %u bytes at 0x%p\n", byteSize, memoryBlock);
200 bug("[MM] The block does not belong to any MemHeader\n");
202 Alert(AN_BadFreeAddr);
203 #endif
205 ReturnVoid ("nommu_FreeMem");
208 IPTR nommu_AvailMem(ULONG attributes, struct ExecBase *SysBase)
210 IPTR ret = 0;
211 struct MemHeader *mh;
212 ULONG physFlags = attributes & MEMF_PHYSICAL_MASK;
214 D(bug("[MM] nommu_AvailMem(0x%08X)\n", attributes));
215 D(bug("[MM] physical memory flags: 0x%08X\n", physFlags));
217 /* Nobody else should access the memory lists now. */
218 MEM_LOCK_SHARED;
220 ForeachNode(&SysBase->MemList, mh)
222 D(bug("[MM] Checking MemHeader 0x%p\n", mh));
225 * The current memheader is OK if there's no bit in the
226 * 'physFlags' that isn't set in the 'mh->mh_Attributes'.
228 if (physFlags & ~mh->mh_Attributes)
230 D(bug("[MM] Skipping (mh_Attributes = 0x%08X\n", mh->mh_Attributes));
231 continue;
234 /* Find largest chunk? */
235 if (attributes & MEMF_LARGEST)
238 Yes. Follow the list of MemChunks and set 'ret' to
239 each value that is bigger than all previous ones.
241 struct MemChunk *mc;
243 for (mc = mh->mh_First; mc; mc = mc->mc_Next)
245 #if !defined(NO_CONSISTENCY_CHECKS)
247 * Do some constistency checks:
248 * 1. All MemChunks must be aligned to MEMCHUNK_TOTAL.
250 if (((IPTR)mc | mc->mc_Bytes) & (MEMCHUNK_TOTAL-1))
252 bug("[MM] Chunk allocator error in MemHeader 0x%p\n", mh);
253 bug("[MM] Misaligned chunk at 0x%p (%u bytes)\n", mc, mc->mc_Bytes);
255 Alert(AN_MemoryInsane|AT_DeadEnd);
257 /* 2. The end (+1) of the current MemChunk must be lower than the start of the next one. */
258 if (mc->mc_Next && ((UBYTE *)mc + mc->mc_Bytes >= (UBYTE *)mc->mc_Next))
260 bug("[MM] Chunk allocator error in MemHeader 0x%p\n");
261 bug("[MM] Overlapping chunks 0x%p (%u bytes) and 0x%p (%u bytes)\n", mc, mc->mc_Bytes, mc->mc_Next, mc->mc_Next->mc_Bytes);
263 Alert(AN_MemoryInsane|AT_DeadEnd);
265 #endif
266 if (mc->mc_Bytes>ret)
267 ret=mc->mc_Bytes;
270 else if (attributes & MEMF_TOTAL)
271 /* Determine total size. */
272 ret += (IPTR)mh->mh_Upper - (IPTR)mh->mh_Lower;
273 else
274 /* Sum up free memory. */
275 ret += mh->mh_Free;
278 /* All done */
279 MEM_UNLOCK;
281 return ret;