Forgotten commit. Added automount.
[AROS.git] / arch / all-pc / kernel / kernel_memory.c
blob5ef4ade7983ec503991332f9d92c66da03a12954
1 /*
2 * Functions for dealing with Multiboot memory map.
3 * This file override basic MemHeader creation functions in rom/kernel,
4 * because if you have memory map you don't need them.
5 * This code builds a fully-functional set of MemHeaders and MemChunks
6 * based on memory map contents and physical breakout described in the array
7 * of MemRegion structures.
8 */
10 #include <aros/macros.h>
11 #include <aros/multiboot.h>
12 #include <exec/lists.h>
13 #include <exec/memory.h>
15 #include "kernel_base.h"
16 #include "kernel_debug.h"
17 #include "kernel_mmap.h"
19 #define D(x)
22 * Append a single chunk to a MemHeader.
23 * If MemHeader address is not set, a MemHeader will be created in this chunk
24 * with the parameters specified in MemRegion structure.
25 * Returns the last MemChunk in the chain, for linking.
27 static struct MemChunk *krnAddMemChunk(struct MemHeader **mhPtr, struct MemChunk *prev, IPTR start, IPTR end,
28 IPTR mh_Start, const struct MemRegion *reg)
30 struct MemChunk *mc;
32 if (*mhPtr == NULL)
34 /* Align start address - who knows... */
35 start = AROS_ROUNDUP2(start, sizeof(IPTR));
37 /* Ignore the chunk if it's too small to place the MemHeader there */
38 if (start > end)
39 return NULL;
40 if (end - start < sizeof(struct MemHeader))
41 return NULL;
43 /* Create MemHeader if it is not there yet */
44 *mhPtr = (struct MemHeader *)start;
45 start += sizeof(struct MemHeader);
47 (*mhPtr)->mh_Node.ln_Name = reg->name;
48 (*mhPtr)->mh_Node.ln_Type = NT_MEMORY;
49 (*mhPtr)->mh_Node.ln_Pri = reg->pri;
50 (*mhPtr)->mh_Attributes = reg->flags;
51 (*mhPtr)->mh_Lower = (APTR)mh_Start;
52 (*mhPtr)->mh_First = NULL; /* We don't actually have any single MemChunk yet */
53 (*mhPtr)->mh_Free = 0;
55 /* The next MemChunk will be linked to our MemHeader */
56 prev = (struct MemChunk *)&(*mhPtr)->mh_First;
59 (*mhPtr)->mh_Upper = (APTR)end;
61 /* MemChunk must start and end on aligned addresses */
62 start = AROS_ROUNDUP2(start, MEMCHUNK_TOTAL);
63 end = AROS_ROUNDDOWN2(end, MEMCHUNK_TOTAL);
65 /* If there is not enough space, skip this chunk */
66 if (start > end)
67 return prev;
68 if (end - start < MEMCHUNK_TOTAL)
69 return prev;
71 mc = (struct MemChunk *)start;
72 mc->mc_Next = NULL;
73 mc->mc_Bytes = end - start;
75 /* Append this chunk to a MemHeader */
76 prev->mc_Next = mc;
77 (*mhPtr)->mh_Free += mc->mc_Bytes;
79 return mc;
83 * Build conventional memory lists out of multiboot memory map structure.
84 * Will add all MemHeaders to the specified list in the same order they
85 * were created, not in the priority one.
86 * Memory breakup is specified by an array of MemRegion structures.
88 * The algorithm is the following:
89 * 1. Traverse MemRegion array. For each region repeat all of the following:
90 * 2. Set starting address (cur_start) to the beginning of the region.
91 * 3. Traverse the entire memory map, locating the lowest fitting chunk.
92 * 4. If we have found a chunk in (3), we add it to the memory list.
93 * 5. If there's a gap between this chunk and the previously added one, we also start a new MemHeader.
94 * 6, Set cur_start to the end of this repeat the process from step (3).
96 * This effectively sorts memory map entries in ascending order and merges adjacent chunks into single MemHeaders.
98 void mmap_InitMemory(struct mb_mmap *mmap_addr, unsigned long mmap_len, struct MinList *memList,
99 IPTR klo, IPTR khi, IPTR reserve, const struct MemRegion *reg)
101 while (reg->name)
103 struct MemHeader *mh = NULL;
104 struct MemChunk *mc = NULL;
105 IPTR phys_start = ~0;
106 IPTR cur_start = reg->start;
107 IPTR chunk_start;
108 IPTR chunk_end;
109 unsigned int chunk_type;
111 D(nbug("[MMAP] Processing region 0x%p - 0x%p (%s)...\n", reg->start, reg->end, reg->name));
115 struct mb_mmap *mmap = mmap_addr;
116 unsigned long len = mmap_len;
118 chunk_start = ~0;
119 chunk_end = 0;
120 chunk_type = 0;
122 while (len >= sizeof(struct mb_mmap))
124 IPTR start = mmap->addr;
125 IPTR end = 0;
127 #ifdef __i386__
128 /* We are on i386, ignore high memory */
129 if (mmap->addr_high)
131 /* Go to the next chunk */
132 len -= mmap->size + 4;
133 mmap = (struct mb_mmap *)(mmap->size + (IPTR)mmap + 4);
135 continue;
138 if (mmap->len_high)
139 end = 0x80000000;
140 else
141 #endif
142 end = mmap->addr + mmap->len;
144 if ((cur_start < end) && (reg->end > start))
146 if (cur_start > start)
147 start = cur_start;
148 if (reg->end < end)
149 end = reg->end;
151 if (start < chunk_start)
153 chunk_start = start;
154 chunk_end = end;
155 chunk_type = mmap->type;
157 if (chunk_start == cur_start)
160 * Terminate search early if the found chunk is in the beginning of the region
161 * to consider. There will be no better match.
163 break;
168 /* Go to the next chunk */
169 len -= mmap->size + 4;
170 mmap = (struct mb_mmap *)(mmap->size + (IPTR)mmap + 4);
173 if (chunk_end)
175 /* Have a chunk to add. Either reserved or free. */
177 if (mh && (chunk_start > cur_start))
180 * There is a physical gap in the memory. Add current MemHeader to the list and reset pointers
181 * in order to begin a new one.
183 D(nbug("[MMAP] Physical gap 0x%p - 0x%p\n", cur_start, chunk_start));
185 ADDTAIL(memList, mh);
186 mh = NULL;
187 phys_start = ~0;
190 if (phys_start == ~0)
191 phys_start = chunk_start;
193 if (chunk_type == MMAP_TYPE_RAM)
195 /* Take reserved space into account */
196 if (reserve > chunk_start)
197 chunk_start = reserve;
199 D(nbug("[MMAP] Usable chunk 0x%p - 0x%p\n", chunk_start, chunk_end));
202 * Now let's add the chunk. However, this is the right place to remember about klo and khi.
203 * Area occupied by kickstart must appear to be preallocated. This way our chunk can be
204 * split into up to three chunks, one of which will be occupied by the KS.
206 if ((klo >= chunk_end) || (khi <= chunk_start))
208 /* If the kickstart is placed outside of this region, just add it as it is */
209 mc = krnAddMemChunk(&mh, mc, chunk_start, chunk_end, phys_start, reg);
211 else
213 /* Have some usable space above the kickstart ? */
214 if (klo > chunk_start)
215 mc = krnAddMemChunk(&mh, mc, chunk_start, klo, phys_start, reg);
217 /* Have some usable space below the kickstart ? */
218 if (khi < chunk_end)
219 mc = krnAddMemChunk(&mh, mc, khi, chunk_end, phys_start, reg);
222 else if (mh)
224 /* Just expand physical MemHeader area, but do not add the chunk as free */
225 D(nbug("[MMAP] Reserved chunk 0x%p - 0x%p\n", chunk_start, chunk_end));
227 mh->mh_Upper = chunk_end;
230 if (chunk_end == reg->end)
232 /* Terminate early if we have reached the end of region */
233 break;
236 cur_start = chunk_end;
239 } while (chunk_end);
241 /* Add the last MemHeader if exists */
242 if (mh)
243 ADDTAIL(memList, mh);
245 reg++;
249 struct mb_mmap *mmap_FindRegion(IPTR addr, struct mb_mmap *mmap, unsigned long len)
251 while (len >= sizeof(struct mb_mmap))
253 IPTR end;
255 #ifdef __i386__
256 /* We are on i386, ignore high memory */
257 if (mmap->addr_high)
258 return NULL;
260 if (mmap->len_high)
261 end = 0x80000000;
262 else
263 #endif
264 end = mmap->addr + mmap->len;
266 /* Returh chunk pointer if matches */
267 if ((addr >= mmap->addr) && (addr < end))
268 return mmap;
270 /* Go to the next chunk */
271 len -= mmap->size + 4;
272 mmap = (struct mb_mmap *)(mmap->size + (IPTR)mmap + 4);
274 return NULL;
277 /* Validate the specified region via memory map */
278 BOOL mmap_ValidateRegion(unsigned long addr, unsigned long len, struct mb_mmap *mmap, unsigned long mmap_len)
280 /* Locate a memory region */
281 struct mb_mmap *region = mmap_FindRegion(addr, mmap, mmap_len);
283 /* If it exists, and free for usage... */
284 if (region && region->type == MMAP_TYPE_RAM)
286 IPTR end = region->addr + region->len;
288 /* Make sure it covers the whole our specified area */
289 if (addr + len < end)
290 return TRUE;
293 return FALSE;