revert between 56095 -> 55830 in arch
[AROS.git] / rom / kernel / mm_linear.c
blobac77e1a098ad77ec81c837f8cada3dc69d59102e
1 /*
2 Copyright © 2010-2017, The AROS Development Team. All rights reserved.
3 $Id$
5 Desc: Page-based memory allocator, linear algorithm.
6 Lang: english
7 */
9 #ifndef __KERNEL_NOLIBBASE__
10 #define __KERNEL_NOLIBBASE__
11 #endif /* !__KERNEL_NOLIBBASE__ */
13 #include <exec/alerts.h>
14 #include <exec/execbase.h>
15 #include <proto/arossupport.h>
16 #include <proto/exec.h>
17 #include <proto/kernel.h>
19 #include <inttypes.h>
21 #include <kernel_base.h>
22 #include <kernel_debug.h>
23 #include <kernel_mm.h>
24 #include "mm_linear.h"
26 #define D(x)
29 * 'Linear' memory page allocator implementation.
30 * Goals of this implementation are simplicity and reduced memory overhead.
32 * It's a modified version of exec.library allocator, which works with variable-length blocks
33 * of pages. Instead of lists, it keeps the information about allocated/free pages in
34 * a linear memory map, which is separated from the data itself. It allows to block all access
35 * to unallocated pages. When allocating blocks at arbitrary addresses, the memory space is
36 * searched for the best matching block. MEMF_REVERSE can be used to specify search direction.
40 * Utility function.
41 * Change state of block of 'pages' pages starting at 'first' to 'state'.
42 * Checks blocks to the left and to the right from our block and merges/splits
43 * blocks if necessary, and updates counters.
45 static void SetBlockState(struct BlockHeader *head, IPTR first, IPTR pages, page_t state)
47 /* Check state of block next to our region */
48 IPTR p = first + pages;
49 page_t cnt = 1;
51 if (p > head->size)
52 /* If we claim we want to access more pages than our BlockHeader has, this is bad */
53 Alert(AN_BadFreeAddr);
54 else if (p < head->size)
57 * If the block next to our region is in the same state as our
58 * block will have, pick up initial counter value from it. Our block
59 * will be merged with the next one.
61 if (P_STATUS(head->map[p]) == state)
63 cnt = P_COUNT(head->map[p]);
64 INC_COUNT(cnt);
69 * Set state of our block. We set state from last to the first page, and every
70 * page adds 1 to the counter (until it hits the limit value).
74 head->map[--p] = cnt | state;
75 INC_COUNT(cnt);
76 } while (p > first);
79 * If our block starts at page 0, there's nothing to check before it.
80 * We're done.
82 if (p == 0)
83 return;
86 * Preceding block can have either the same state as our new state,
87 * or different state.
88 * In both cases its counters need updating.
89 * - If it has the same state, we keep current counter value, so blocks
90 * get merged.
91 * - If it has different state, we restart counter from 1. This means
92 * we are splitting the block.
94 if (P_STATUS(head->map[p-1]) != state)
96 cnt = 1;
97 state = P_STATUS(head->map[p-1]);
101 * Update space counter for the block. We keep going until the state changes
102 * again. The block will keep its original state.
106 if (P_STATUS(head->map[--p]) != state)
107 break;
109 head->map[p] = cnt | state;
110 INC_COUNT(cnt);
111 } while (p > 0);
114 /* Allocate 'size' bytes from MemHeader mh */
115 APTR mm_Allocate(struct MemHeader *mh, IPTR size, ULONG flags)
117 struct BlockHeader *head = (struct BlockHeader *)mh->mh_First;
118 APTR addr = NULL;
119 IPTR align = head->pageSize - 1;
120 IPTR pages;
121 IPTR p;
122 IPTR candidate, candidate_size;
124 D(bug("[mm_Allocate] Request for %u bytes from BlockHeader %p, KernelBase 0x%p\n", size, head, KernelBase));
127 * Safety checks.
128 * If mh_First is NULL, it's ROM header. We can't allocate from it.
130 if (!head)
131 return NULL;
133 * If either mc_Next or mc_Bytes is not zero, this MemHeader is not
134 * managed by us. We can't allocate from it.
136 if (head->mc.mc_Next || head->mc.mc_Bytes)
137 return NULL;
139 /* Pad up size and convert it to number of pages */
140 size = (size + align) & ~align;
141 pages = size / head->pageSize;
143 if (KernelBase)
144 ObtainSemaphore(&head->sem);
146 /* Start looking up from page zero */
147 p = 0;
148 candidate = 0;
149 candidate_size = -1;
152 * Look up best matching free block.
153 * We walk through the whole memory map in order to identify free blocks
154 * and get their sizes. We use the best-match criteria in order to avoid
155 * excessive memory fragmentation.
159 IPTR start = p; /* Starting page of the block being examined */
160 IPTR free = 0; /* Count of free pages in the block */
162 while (P_STATUS(head->map[p]) == P_FREE)
164 UBYTE cnt = P_COUNT(head->map[p]); /* Get (partial) block length */
166 free += cnt; /* Add length to total count */
167 p += cnt; /* Advance past the block */
169 if (p == head->size) /* Reached end of this memory chunk ? */
170 break;
171 if (p > head->size) /* Went past the chunk? This must never happen! */
172 Alert(AN_MemCorrupt);
175 D(bug("[mm_Allocate] Have %u free pages starting from %u\n", free, start));
177 /* Does the block fit ? */
178 if (free >= pages)
180 if (flags & MEMF_REVERSE)
183 * If MEMF_REVERSE is set, we remember new candidate if its size is less
184 * or equal to current one. This is effectively the same as best-match
185 * lookup starting from the end of the region.
187 if (free <= candidate_size)
189 D(bug("[mm_Allocate] Old candidate %u (size %d)\n", candidate, candidate_size));
190 candidate = start;
191 candidate_size = free;
192 D(bug("[mm_Allocate] New candidate %u (size %d)\n", candidate, candidate_size));
195 else
198 * If the found block has smaller size than the
199 * previous candidate, remember it as a new candidate.
201 if (free < candidate_size)
203 D(bug("[mm_Allocate] Old candidate %u (size %d)\n", candidate, candidate_size));
204 candidate = start;
205 candidate_size = free;
206 D(bug("[mm_Allocate] New candidate %u (size %d)\n", candidate, candidate_size));
209 /* If found exact match, we can't do better, so stop searching */
210 if (free == pages)
212 D(bug("[mm_Allocate] Exact match\n"));
213 break;
219 * If we are at the end of memory map, we have nothing
220 * more to look at. We either already have a candidate,
221 * or no
223 if (p == head->size)
225 D(bug("[mm_Allocate] Reached end of chunk\n"));
226 break;
229 D(bug("[mm_Allocate] Allocated block starts at %u\n", p));
230 /* Skip past the end of the allocated block */
231 while (P_STATUS(head->map[p]) == P_ALLOC)
233 p += P_COUNT(head->map[p]);
235 if (p == head->size)
237 D(bug("[mm_Allocate] Reached end of chunk\n"));
238 break;
240 if (p > head->size)
241 Alert(AN_MemCorrupt);
243 D(bug("[mm_Allocate] Skipped up to page %u\n", p));
245 } while (p < head->size);
247 /* Found block ? */
248 if (candidate_size != -1)
250 /* Mark the block as allocated */
251 D(bug("[mm_Allocate] Allocating %u pages starting from %u\n", pages, candidate));
252 SetBlockState(head, candidate, pages, P_ALLOC);
254 /* Calculate starting address of the first page */
255 addr = head->start + candidate * head->pageSize;
256 /* Update free memory counter */
257 mh->mh_Free -= size;
260 if (KernelBase)
261 ReleaseSemaphore(&head->sem);
263 D(bug("[mm_Allocate] Allocated at address 0x%p\n", addr));
264 return addr;
267 /* Allocate 'size' bytes starting at 'addr' from MemHeader mh */
268 APTR mm_AllocAbs(struct MemHeader *mh, void *addr, IPTR size)
270 struct BlockHeader *head = (struct BlockHeader *)mh->mh_First;
271 IPTR align = head->pageSize - 1;
272 IPTR pages;
273 IPTR start, p;
274 void *ret = NULL;
276 D(bug("[mm_Allocate] Request for %u bytes from BlockHeader %p\n", size, head));
279 * Safety checks.
280 * If mh_First is NULL, it's ROM header. We can't allocate from it.
282 if (!head)
283 return NULL;
285 * If either mc_Next or mc_Bytes is not zero, this MemHeader is not
286 * managed by us. We can't allocate from it.
288 if (head->mc.mc_Next || head->mc.mc_Bytes)
289 return NULL;
291 /* Align starting address */
292 addr = (void *)((IPTR)addr & ~align);
294 /* Requested address cat hit our administrative area. We can't satisfy such a request */
295 if (addr < head->start)
296 return NULL;
298 /* Pad up size and convert it to number of pages */
299 size = (size + align) & ~align;
300 pages = size / head->pageSize;
302 ObtainSemaphore(&head->sem);
304 /* Get start page number */
305 start = (addr - head->start) / head->pageSize;
307 /* Check if we have enough free pages starting from the first one */
308 p = start;
309 while (P_STATUS(head->map[p]) == P_FREE)
311 p += P_COUNT(head->map[p]); /* Advance past the block */
312 if (p >= start + pages) /* Counted enough free pages? */
314 /* Allocate the block and exit */
315 ret = addr;
316 SetBlockState(head, start, pages, P_ALLOC);
317 break;
320 if (p == head->size) /* Reached end of this memory chunk? */
321 break;
322 if (p > head->size) /* Went past the chunk? This must never happen! */
323 Alert(AN_MemCorrupt);
326 ReleaseSemaphore(&head->sem);
327 return ret;
330 /* Free 'size' bytes starting from address 'addr' in the MemHeader mh */
331 void mm_Free(struct MemHeader *mh, APTR addr, IPTR size)
333 struct BlockHeader *head = (struct BlockHeader *)mh->mh_First;
334 /* Calculate number of the starting page within the region */
335 IPTR first = (addr - head->start) / head->pageSize;
336 IPTR align = head->pageSize - 1;
337 IPTR pages;
339 /* Pad up size and convert it to number of pages */
340 size = (size + align) & ~align;
341 pages = size / head->pageSize;
343 ObtainSemaphore(&head->sem);
345 /* Set block state to free */
346 SetBlockState(head, first, pages, P_FREE);
348 /* Update free memory counter */
349 mh->mh_Free += size;
351 ReleaseSemaphore(&head->sem);
355 * Iniialize memory management in a given MemHeader.
356 * This routine takes into account only mh_Lower and mh_Upper, the rest is
357 * ignored.
358 * TODO: Currently it's assumed that the MemHeader itself is placed in the beginning
359 * of the region. In future this may be not true.
361 void mm_Init(struct MemHeader *mh, ULONG pageSize)
363 struct BlockHeader *head;
364 IPTR align;
365 APTR end;
366 IPTR memsize;
367 IPTR mapsize;
368 IPTR p;
369 UBYTE free;
372 * Currently we assume the struct MemHeader to be in the beginning
373 * our our region.
375 head = (APTR)mh + MEMHEADER_TOTAL;
376 align = pageSize - 1;
378 /* Fill in the BlockHeader */
379 head->mc.mc_Next = NULL;
380 head->mc.mc_Bytes = 0;
381 head->pageSize = pageSize;
384 * Page-align boundaries.
385 * We intentionally make it start pointing to the previous page,
386 * we'll jump to the next page later, in the loop.
388 head->start = (APTR)((IPTR)head->map & ~align);
389 end = (APTR)(((IPTR)mh->mh_Upper + 1) & ~align);
391 D(bug("[mm_Init] MemHeader 0x%p, BlockHeader 0x%p, usable 0x%p - 0x%p\n", mh, head, head->start, end));
395 /* Skip one page. This reserves some space (one page or less) for allocations map. */
396 head->start += pageSize;
397 /* Calculate resulting map size */
398 mapsize = (head->start - (APTR)head->map) / sizeof(ULONG);
399 /* Calculate number of free bytes and pages */
400 memsize = end - head->start;
401 head->size = memsize / pageSize;
403 * Repeat the operation if there's not enough memory for allocations map.
404 * This will take one more page from the area and use it for the map.
406 } while (mapsize < head->size);
408 D(bug("[mm_Init] Got %u usable pages\n", head->size));
410 /* Mark all pages as free */
411 p = head->size;
412 free = 1;
413 do {
414 head->map[--p] = free;
415 if (free < 127)
416 free++;
417 } while (p > 0);
419 /* Set BlockHeader pointer and free space counter */
420 mh->mh_First = &head->mc;
421 mh->mh_Free = memsize;
425 * Apply memory protection to the MemHeader.
426 * This is a separate routine because MMU by itself requires some memory to place its
427 * control structures, and they need to be allocated using already working allocator.
428 * Only once MMU is up and running, we can apply protection to our memory.
430 void mm_Protect(struct MemHeader *mh, struct KernelBase *KernelBase)
432 struct BlockHeader *head = (struct BlockHeader *)mh->mh_First;
434 InitSemaphore(&head->sem);
436 /* TODO */
439 #define SET_LARGEST(ptr, val) \
440 if (ptr) \
442 if (val > *ptr) \
443 *ptr = val; \
446 #define SET_SMALLEST(ptr, val) \
447 if (ptr) \
449 if (*ptr) \
451 if (val < *ptr) \
452 *ptr = val; \
454 else \
455 *ptr = val; \
458 /* Get statistics from the specified MemHeader */
459 void mm_StatMemHeader(struct MemHeader *mh, const struct TagItem *query, struct KernelBase *KernelBase)
461 struct TagItem *tag, *tstate = (struct TagItem *)query;
462 IPTR *largest_alloc = NULL;
463 IPTR *smallest_alloc = NULL;
464 IPTR *largest_free = NULL;
465 IPTR *smallest_free = NULL;
466 IPTR *num_alloc = NULL;
467 IPTR *num_free = NULL;
468 BOOL do_traverse = FALSE;
470 while ((tag = LibNextTagItem(&tstate)))
472 switch (tag->ti_Tag)
474 case KMS_Free:
475 *((IPTR *)tag->ti_Data) += mh->mh_Free;
476 break;
478 case KMS_Total:
479 *((IPTR *)tag->ti_Data) += mh->mh_Upper - mh->mh_Lower;
480 break;
482 case KMS_LargestFree:
483 largest_free = (IPTR *)tag->ti_Data;
484 do_traverse = TRUE;
485 break;
487 case KMS_SmallestFree:
488 smallest_free = (IPTR *)tag->ti_Data;
489 do_traverse = TRUE;
490 break;
492 case KMS_LargestAlloc:
493 largest_alloc = (IPTR *)tag->ti_Data;
494 do_traverse = TRUE;
495 break;
497 case KMS_SmallestAlloc:
498 smallest_alloc = (IPTR *)tag->ti_Data;
499 do_traverse = TRUE;
500 break;
502 case KMS_NumAlloc:
503 num_alloc = (IPTR *)tag->ti_Data;
504 do_traverse = TRUE;
505 break;
507 case KMS_NumFree:
508 num_free = (IPTR *)tag->ti_Data;
509 do_traverse = TRUE;
510 break;
514 if (do_traverse)
516 struct BlockHeader *head = (struct BlockHeader *)mh->mh_First;
517 IPTR p;
519 ObtainSemaphoreShared(&head->sem);
521 for (p = 0; p < head->size; )
523 /* Get total size and state of the current block */
524 IPTR blksize = 0;
525 page_t blkstate = P_STATUS(head->map[p]);
529 UBYTE cnt = P_COUNT(head->map[p]); /* Get (partial) block length */
531 blksize += cnt; /* Add length to total count */
532 p += cnt; /* Advance past the block */
534 if (p == head->size) /* Reached end of this memory chunk ? */
535 break;
536 if (p > head->size) /* Went past the chunk? This must never happen! */
537 Alert(AN_MemCorrupt);
538 } while (P_STATUS(head->map[p]) == blkstate);
540 blksize *= head->pageSize; /* Convert to bytes */
542 if (blkstate == P_ALLOC)
544 SET_LARGEST(largest_alloc, blksize);
545 SET_SMALLEST(smallest_alloc, blksize);
547 if (num_alloc)
548 *num_alloc += 1;
550 else
552 SET_LARGEST(largest_free, blksize);
553 SET_SMALLEST(smallest_free, blksize);
555 if (num_free)
556 *num_free += 1;
560 ReleaseSemaphore(&head->sem);