2 Copyright © 2010-2011, The AROS Development Team. All rights reserved.
5 Desc: Page-based memory allocator, linear algorithm.
9 #define __KERNEL_NOLIBBASE__
11 #include <exec/alerts.h>
12 #include <exec/execbase.h>
13 #include <proto/arossupport.h>
14 #include <proto/exec.h>
15 #include <proto/kernel.h>
19 #include <kernel_base.h>
20 #include <kernel_debug.h>
21 #include <kernel_mm.h>
22 #include "mm_linear.h"
27 * 'Linear' memory page allocator implementation.
28 * Goals of this implementation are simplicity and reduced memory overhead.
30 * It's a modified version of exec.library allocator, which works with variable-length blocks
31 * of pages. Instead of lists, it keeps the information about allocated/free pages in
32 * a linear memory map, which is separated from the data itself. It allows to block all access
33 * to unallocated pages. When allocating blocks at arbitrary addresses, the memory space is
34 * searched for the best matching block. MEMF_REVERSE can be used to specify search direction.
39 * Change state of block of 'pages' pages starting at 'first' to 'state'.
40 * Checks blocks to the left and to the right from our block and merges/splits
41 * blocks if necessary, and updates counters.
43 static void SetBlockState(struct BlockHeader
*head
, IPTR first
, IPTR pages
, page_t state
)
45 /* Check state of block next to our region */
46 IPTR p
= first
+ pages
;
50 /* If we claim we want to access more pages than our BlockHeader has, this is bad */
51 Alert(AN_BadFreeAddr
);
52 else if (p
< head
->size
)
55 * If the block next to our region is in the same state as our
56 * block will have, pick up initial counter value from it. Our block
57 * will be merged with the next one.
59 if (P_STATUS(head
->map
[p
]) == state
)
61 cnt
= P_COUNT(head
->map
[p
]);
67 * Set state of our block. We set state from last to the first page, and every
68 * page adds 1 to the counter (until it hits the limit value).
72 head
->map
[--p
] = cnt
| state
;
77 * If our block starts at page 0, there's nothing to check before it.
84 * Preceding block can have either the same state as our new state,
86 * In both cases its counters need updating.
87 * - If it has the same state, we keep current counter value, so blocks
89 * - If it has different state, we restart counter from 1. This means
90 * we are splitting the block.
92 if (P_STATUS(head
->map
[p
-1]) != state
)
95 state
= P_STATUS(head
->map
[p
-1]);
99 * Update space counter for the block. We keep going until the state changes
100 * again. The block will keep its original state.
104 if (P_STATUS(head
->map
[--p
]) != state
)
107 head
->map
[p
] = cnt
| state
;
112 /* Allocate 'size' bytes from MemHeader mh */
113 APTR
mm_Allocate(struct MemHeader
*mh
, IPTR size
, ULONG flags
)
115 struct BlockHeader
*head
= (struct BlockHeader
*)mh
->mh_First
;
117 IPTR align
= head
->pageSize
- 1;
120 IPTR candidate
, candidate_size
;
122 D(bug("[mm_Allocate] Request for %u bytes from BlockHeader %p, KernelBase 0x%p\n", size
, head
, KernelBase
));
126 * If mh_First is NULL, it's ROM header. We can't allocate from it.
131 * If either mc_Next or mc_Bytes is not zero, this MemHeader is not
132 * managed by us. We can't allocate from it.
134 if (head
->mc
.mc_Next
|| head
->mc
.mc_Bytes
)
137 /* Pad up size and convert it to number of pages */
138 size
= (size
+ align
) & ~align
;
139 pages
= size
/ head
->pageSize
;
142 ObtainSemaphore(&head
->sem
);
144 /* Start looking up from page zero */
150 * Look up best matching free block.
151 * We walk through the whole memory map in order to identify free blocks
152 * and get their sizes. We use the best-match criteria in order to avoid
153 * excessive memory fragmentation.
157 IPTR start
= p
; /* Starting page of the block being examined */
158 IPTR free
= 0; /* Count of free pages in the block */
160 while (P_STATUS(head
->map
[p
]) == P_FREE
)
162 UBYTE cnt
= P_COUNT(head
->map
[p
]); /* Get (partial) block length */
164 free
+= cnt
; /* Add length to total count */
165 p
+= cnt
; /* Advance past the block */
167 if (p
== head
->size
) /* Reached end of this memory chunk ? */
169 if (p
> head
->size
) /* Went past the chunk? This must never happen! */
170 Alert(AN_MemCorrupt
);
173 D(bug("[mm_Allocate] Have %u free pages starting from %u\n", free
, start
));
175 /* Does the block fit ? */
178 if (flags
& MEMF_REVERSE
)
181 * If MEMF_REVERSE is set, we remember new candidate if its size is less
182 * or equal to current one. This is effectively the same as best-match
183 * lookup starting from the end of the region.
185 if (free
<= candidate_size
)
187 D(bug("[mm_Allocate] Old candidate %u (size %d)\n", candidate
, candidate_size
));
189 candidate_size
= free
;
190 D(bug("[mm_Allocate] New candidate %u (size %d)\n", candidate
, candidate_size
));
196 * If the found block has smaller size than the
197 * previous candidate, remember it as a new candidate.
199 if (free
< candidate_size
)
201 D(bug("[mm_Allocate] Old candidate %u (size %d)\n", candidate
, candidate_size
));
203 candidate_size
= free
;
204 D(bug("[mm_Allocate] New candidate %u (size %d)\n", candidate
, candidate_size
));
207 /* If found exact match, we can't do better, so stop searching */
210 D(bug("[mm_Allocate] Exact match\n"));
217 * If we are at the end of memory map, we have nothing
218 * more to look at. We either already have a candidate,
223 D(bug("[mm_Allocate] Reached end of chunk\n"));
227 D(bug("[mm_Allocate] Allocated block starts at %u\n", p
));
228 /* Skip past the end of the allocated block */
229 while (P_STATUS(head
->map
[p
]) == P_ALLOC
)
231 p
+= P_COUNT(head
->map
[p
]);
235 D(bug("[mm_Allocate] Reached end of chunk\n"));
239 Alert(AN_MemCorrupt
);
241 D(bug("[mm_Allocate] Skipped up to page %u\n", p
));
243 } while (p
< head
->size
);
246 if (candidate_size
!= -1)
248 /* Mark the block as allocated */
249 D(bug("[mm_Allocate] Allocating %u pages starting from %u\n", pages
, candidate
));
250 SetBlockState(head
, candidate
, pages
, P_ALLOC
);
252 /* Calculate starting address of the first page */
253 addr
= head
->start
+ candidate
* head
->pageSize
;
254 /* Update free memory counter */
259 ReleaseSemaphore(&head
->sem
);
261 D(bug("[mm_Allocate] Allocated at address 0x%p\n", addr
));
265 /* Allocate 'size' bytes starting at 'addr' from MemHeader mh */
266 APTR
mm_AllocAbs(struct MemHeader
*mh
, void *addr
, IPTR size
)
268 struct BlockHeader
*head
= (struct BlockHeader
*)mh
->mh_First
;
269 IPTR align
= head
->pageSize
- 1;
274 D(bug("[mm_Allocate] Request for %u bytes from BlockHeader %p\n", size
, head
));
278 * If mh_First is NULL, it's ROM header. We can't allocate from it.
283 * If either mc_Next or mc_Bytes is not zero, this MemHeader is not
284 * managed by us. We can't allocate from it.
286 if (head
->mc
.mc_Next
|| head
->mc
.mc_Bytes
)
289 /* Align starting address */
290 addr
= (void *)((IPTR
)addr
& ~align
);
292 /* Requested address cat hit our administrative area. We can't satisfy such a request */
293 if (addr
< head
->start
)
296 /* Pad up size and convert it to number of pages */
297 size
= (size
+ align
) & ~align
;
298 pages
= size
/ head
->pageSize
;
300 ObtainSemaphore(&head
->sem
);
302 /* Get start page number */
303 start
= (addr
- head
->start
) / head
->pageSize
;
305 /* Check if we have enough free pages starting from the first one */
307 while (P_STATUS(head
->map
[p
]) == P_FREE
)
309 p
+= P_COUNT(head
->map
[p
]); /* Advance past the block */
310 if (p
>= start
+ pages
) /* Counted enough free pages? */
312 /* Allocate the block and exit */
314 SetBlockState(head
, start
, pages
, P_ALLOC
);
318 if (p
== head
->size
) /* Reached end of this memory chunk? */
320 if (p
> head
->size
) /* Went past the chunk? This must never happen! */
321 Alert(AN_MemCorrupt
);
324 ReleaseSemaphore(&head
->sem
);
328 /* Free 'size' bytes starting from address 'addr' in the MemHeader mh */
329 void mm_Free(struct MemHeader
*mh
, APTR addr
, IPTR size
)
331 struct BlockHeader
*head
= (struct BlockHeader
*)mh
->mh_First
;
332 /* Calculate number of the starting page within the region */
333 IPTR first
= (addr
- head
->start
) / head
->pageSize
;
334 IPTR align
= head
->pageSize
- 1;
337 /* Pad up size and convert it to number of pages */
338 size
= (size
+ align
) & ~align
;
339 pages
= size
/ head
->pageSize
;
341 ObtainSemaphore(&head
->sem
);
343 /* Set block state to free */
344 SetBlockState(head
, first
, pages
, P_FREE
);
346 /* Update free memory counter */
349 ReleaseSemaphore(&head
->sem
);
353 * Iniialize memory management in a given MemHeader.
354 * This routine takes into account only mh_Lower and mh_Upper, the rest is
356 * TODO: Currently it's assumed that the MemHeader itself is placed in the beginning
357 * of the region. In future this may be not true.
359 void mm_Init(struct MemHeader
*mh
, ULONG pageSize
)
361 struct BlockHeader
*head
;
370 * Currently we assume the struct MemHeader to be in the beginning
373 head
= (APTR
)mh
+ MEMHEADER_TOTAL
;
374 align
= pageSize
- 1;
376 /* Fill in the BlockHeader */
377 head
->mc
.mc_Next
= NULL
;
378 head
->mc
.mc_Bytes
= 0;
379 head
->pageSize
= pageSize
;
382 * Page-align boundaries.
383 * We intentionally make it start pointing to the previous page,
384 * we'll jump to the next page later, in the loop.
386 head
->start
= (APTR
)((IPTR
)head
->map
& ~align
);
387 end
= (APTR
)(((IPTR
)mh
->mh_Upper
+ 1) & ~align
);
389 D(bug("[mm_Init] MemHeader 0x%p, BlockHeader 0x%p, usable 0x%p - 0x%p\n", mh
, head
, head
->start
, end
));
393 /* Skip one page. This reserves some space (one page or less) for allocations map. */
394 head
->start
+= pageSize
;
395 /* Calculate resulting map size */
396 mapsize
= (head
->start
- (APTR
)head
->map
) / sizeof(ULONG
);
397 /* Calculate number of free bytes and pages */
398 memsize
= end
- head
->start
;
399 head
->size
= memsize
/ pageSize
;
401 * Repeat the operation if there's not enough memory for allocations map.
402 * This will take one more page from the area and use it for the map.
404 } while (mapsize
< head
->size
);
406 D(bug("[mm_Init] Got %u usable pages\n", head
->size
));
408 /* Mark all pages as free */
412 head
->map
[--p
] = free
;
417 /* Set BlockHeader pointer and free space counter */
418 mh
->mh_First
= &head
->mc
;
419 mh
->mh_Free
= memsize
;
423 * Apply memory protection to the MemHeader.
424 * This is a separate routine because MMU by itself requires some memory to place its
425 * control structures, and they need to be allocated using already working allocator.
426 * Only once MMU is up and running, we can apply protection to our memory.
428 void mm_Protect(struct MemHeader
*mh
, struct KernelBase
*KernelBase
)
430 struct BlockHeader
*head
= (struct BlockHeader
*)mh
->mh_First
;
432 InitSemaphore(&head
->sem
);
437 #define SET_LARGEST(ptr, val) \
444 #define SET_SMALLEST(ptr, val) \
456 /* Get statistics from the specified MemHeader */
457 void mm_StatMemHeader(struct MemHeader
*mh
, const struct TagItem
*query
, struct KernelBase
*KernelBase
)
459 struct TagItem
*tag
, *tstate
= (struct TagItem
*)query
;
460 IPTR
*largest_alloc
= NULL
;
461 IPTR
*smallest_alloc
= NULL
;
462 IPTR
*largest_free
= NULL
;
463 IPTR
*smallest_free
= NULL
;
464 IPTR
*num_alloc
= NULL
;
465 IPTR
*num_free
= NULL
;
466 BOOL do_traverse
= FALSE
;
468 while ((tag
= LibNextTagItem(&tstate
)))
473 *((IPTR
*)tag
->ti_Data
) += mh
->mh_Free
;
477 *((IPTR
*)tag
->ti_Data
) += mh
->mh_Upper
- mh
->mh_Lower
;
480 case KMS_LargestFree
:
481 largest_free
= (IPTR
*)tag
->ti_Data
;
485 case KMS_SmallestFree
:
486 smallest_free
= (IPTR
*)tag
->ti_Data
;
490 case KMS_LargestAlloc
:
491 largest_alloc
= (IPTR
*)tag
->ti_Data
;
495 case KMS_SmallestAlloc
:
496 smallest_alloc
= (IPTR
*)tag
->ti_Data
;
501 num_alloc
= (IPTR
*)tag
->ti_Data
;
506 num_free
= (IPTR
*)tag
->ti_Data
;
514 struct BlockHeader
*head
= (struct BlockHeader
*)mh
->mh_First
;
517 ObtainSemaphoreShared(&head
->sem
);
519 for (p
= 0; p
< head
->size
; )
521 /* Get total size and state of the current block */
523 page_t blkstate
= P_STATUS(head
->map
[p
]);
527 UBYTE cnt
= P_COUNT(head
->map
[p
]); /* Get (partial) block length */
529 blksize
+= cnt
; /* Add length to total count */
530 p
+= cnt
; /* Advance past the block */
532 if (p
== head
->size
) /* Reached end of this memory chunk ? */
534 if (p
> head
->size
) /* Went past the chunk? This must never happen! */
535 Alert(AN_MemCorrupt
);
536 } while (P_STATUS(head
->map
[p
]) == blkstate
);
538 blksize
*= head
->pageSize
; /* Convert to bytes */
540 if (blkstate
== P_ALLOC
)
542 SET_LARGEST(largest_alloc
, blksize
);
543 SET_SMALLEST(smallest_alloc
, blksize
);
550 SET_LARGEST(largest_free
, blksize
);
551 SET_SMALLEST(smallest_free
, blksize
);
558 ReleaseSemaphore(&head
->sem
);