Switch virtual consoles using Alt-Fn or Shift-Fn
[helenos.git] / kernel / generic / src / mm / frame.c
blob7f54f50704c899c74d247e5862bdfdafcc36a1c0
1 /*
2 * Copyright (c) 2001-2005 Jakub Jermar
3 * Copyright (c) 2005 Sergey Bondari
4 * Copyright (c) 2009 Martin Decky
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 /** @addtogroup kernel_generic_mm
32 * @{
35 /**
36 * @file
37 * @brief Physical frame allocator.
39 * This file contains the physical frame allocator and memory zone management.
40 * The frame allocator is built on top of the two-level bitmap structure.
44 #include <typedefs.h>
45 #include <mm/frame.h>
46 #include <mm/reserve.h>
47 #include <mm/as.h>
48 #include <panic.h>
49 #include <assert.h>
50 #include <adt/list.h>
51 #include <synch/mutex.h>
52 #include <synch/condvar.h>
53 #include <arch/asm.h>
54 #include <arch.h>
55 #include <stdio.h>
56 #include <log.h>
57 #include <align.h>
58 #include <mm/slab.h>
59 #include <bitops.h>
60 #include <macros.h>
61 #include <config.h>
62 #include <str.h>
63 #include <proc/thread.h> /* THREAD */
65 zones_t zones;
68 * Synchronization primitives used to sleep when there is no memory
69 * available.
71 static mutex_t mem_avail_mtx;
72 static condvar_t mem_avail_cv;
73 static size_t mem_avail_req = 0; /**< Number of frames requested. */
74 static size_t mem_avail_gen = 0; /**< Generation counter. */
76 /** Initialize frame structure.
78 * @param frame Frame structure to be initialized.
81 _NO_TRACE static void frame_initialize(frame_t *frame)
83 frame->refcount = 0;
84 frame->parent = NULL;
88 * Zones functions
91 /** Insert-sort zone into zones list.
93 * Assume interrupts are disabled and zones lock is
94 * locked.
96 * @param base Base frame of the newly inserted zone.
97 * @param count Number of frames of the newly inserted zone.
99 * @return Zone number on success, -1 on error.
102 _NO_TRACE static size_t zones_insert_zone(pfn_t base, size_t count,
103 zone_flags_t flags)
105 if (zones.count + 1 == ZONES_MAX) {
106 log(LF_OTHER, LVL_ERROR, "Maximum zone count %u exceeded!",
107 ZONES_MAX);
108 return (size_t) -1;
111 size_t i;
112 for (i = 0; i < zones.count; i++) {
113 /* Check for overlap */
114 if (overlaps(zones.info[i].base, zones.info[i].count,
115 base, count)) {
118 * If the overlaping zones are of the same type
119 * and the new zone is completely within the previous
120 * one, then quietly ignore the new zone.
124 if ((zones.info[i].flags != flags) ||
125 (!iswithin(zones.info[i].base, zones.info[i].count,
126 base, count))) {
127 log(LF_OTHER, LVL_WARN,
128 "Zone (%p, %p) overlaps "
129 "with previous zone (%p %p)!",
130 (void *) PFN2ADDR(base), (void *) PFN2ADDR(count),
131 (void *) PFN2ADDR(zones.info[i].base),
132 (void *) PFN2ADDR(zones.info[i].count));
135 return (size_t) -1;
137 if (base < zones.info[i].base)
138 break;
141 /* Move other zones up */
142 for (size_t j = zones.count; j > i; j--)
143 zones.info[j] = zones.info[j - 1];
145 zones.count++;
147 return i;
150 /** Get total available frames.
152 * Assume interrupts are disabled and zones lock is
153 * locked.
155 * @return Total number of available frames.
158 _NO_TRACE static size_t frame_total_free_get_internal(void)
160 size_t total = 0;
161 size_t i;
163 for (i = 0; i < zones.count; i++)
164 total += zones.info[i].free_count;
166 return total;
169 _NO_TRACE size_t frame_total_free_get(void)
171 size_t total;
173 irq_spinlock_lock(&zones.lock, true);
174 total = frame_total_free_get_internal();
175 irq_spinlock_unlock(&zones.lock, true);
177 return total;
180 /** Find a zone with a given frames.
182 * Assume interrupts are disabled and zones lock is
183 * locked.
185 * @param frame Frame number contained in zone.
186 * @param count Number of frames to look for.
187 * @param hint Used as zone hint.
189 * @return Zone index or -1 if not found.
192 _NO_TRACE size_t find_zone(pfn_t frame, size_t count, size_t hint)
194 if (hint >= zones.count)
195 hint = 0;
197 size_t i = hint;
198 do {
199 if ((zones.info[i].base <= frame) &&
200 (zones.info[i].base + zones.info[i].count >= frame + count))
201 return i;
203 i++;
204 if (i >= zones.count)
205 i = 0;
207 } while (i != hint);
209 return (size_t) -1;
212 /** @return True if zone can allocate specified number of frames */
213 _NO_TRACE static bool zone_can_alloc(zone_t *zone, size_t count,
214 pfn_t constraint)
217 * The function bitmap_allocate_range() does not modify
218 * the bitmap if the last argument is NULL.
221 return ((zone->flags & ZONE_AVAILABLE) &&
222 bitmap_allocate_range(&zone->bitmap, count, zone->base,
223 FRAME_LOWPRIO, constraint, NULL));
226 /** Find a zone that can allocate specified number of frames
228 * This function searches among all zones. Assume interrupts are
229 * disabled and zones lock is locked.
231 * @param count Number of free frames we are trying to find.
232 * @param flags Required flags of the zone.
233 * @param constraint Indication of bits that cannot be set in the
234 * physical frame number of the first allocated frame.
235 * @param hint Preferred zone.
237 * @return Zone that can allocate specified number of frames.
238 * @return -1 if no zone can satisfy the request.
241 _NO_TRACE static size_t find_free_zone_all(size_t count, zone_flags_t flags,
242 pfn_t constraint, size_t hint)
244 for (size_t pos = 0; pos < zones.count; pos++) {
245 size_t i = (pos + hint) % zones.count;
247 /* Check whether the zone meets the search criteria. */
248 if (!ZONE_FLAGS_MATCH(zones.info[i].flags, flags))
249 continue;
251 /* Check if the zone can satisfy the allocation request. */
252 if (zone_can_alloc(&zones.info[i], count, constraint))
253 return i;
256 return (size_t) -1;
259 /** Check if frame range priority memory
261 * @param pfn Starting frame.
262 * @param count Number of frames.
264 * @return True if the range contains only priority memory.
267 _NO_TRACE static bool is_high_priority(pfn_t base, size_t count)
269 return (base + count <= FRAME_LOWPRIO);
272 /** Find a zone that can allocate specified number of frames
274 * This function ignores zones that contain only high-priority
275 * memory. Assume interrupts are disabled and zones lock is locked.
277 * @param count Number of free frames we are trying to find.
278 * @param flags Required flags of the zone.
279 * @param constraint Indication of bits that cannot be set in the
280 * physical frame number of the first allocated frame.
281 * @param hint Preferred zone.
283 * @return Zone that can allocate specified number of frames.
284 * @return -1 if no low-priority zone can satisfy the request.
287 _NO_TRACE static size_t find_free_zone_lowprio(size_t count, zone_flags_t flags,
288 pfn_t constraint, size_t hint)
290 for (size_t pos = 0; pos < zones.count; pos++) {
291 size_t i = (pos + hint) % zones.count;
293 /* Skip zones containing only high-priority memory. */
294 if (is_high_priority(zones.info[i].base, zones.info[i].count))
295 continue;
297 /* Check whether the zone meets the search criteria. */
298 if (!ZONE_FLAGS_MATCH(zones.info[i].flags, flags))
299 continue;
301 /* Check if the zone can satisfy the allocation request. */
302 if (zone_can_alloc(&zones.info[i], count, constraint))
303 return i;
306 return (size_t) -1;
309 /** Find a zone that can allocate specified number of frames
311 * Assume interrupts are disabled and zones lock is
312 * locked.
314 * @param count Number of free frames we are trying to find.
315 * @param flags Required flags of the target zone.
316 * @param constraint Indication of bits that cannot be set in the
317 * physical frame number of the first allocated frame.
318 * @param hint Preferred zone.
320 * @return Zone that can allocate specified number of frames.
321 * @return -1 if no zone can satisfy the request.
324 _NO_TRACE static size_t find_free_zone(size_t count, zone_flags_t flags,
325 pfn_t constraint, size_t hint)
327 if (hint >= zones.count)
328 hint = 0;
331 * Prefer zones with low-priority memory over
332 * zones with high-priority memory.
335 size_t znum = find_free_zone_lowprio(count, flags, constraint, hint);
336 if (znum != (size_t) -1)
337 return znum;
339 /* Take all zones into account */
340 return find_free_zone_all(count, flags, constraint, hint);
344 * Zone functions
347 /** Return frame from zone. */
348 _NO_TRACE static frame_t *zone_get_frame(zone_t *zone, size_t index)
350 assert(index < zone->count);
352 return &zone->frames[index];
355 /** Allocate frame in particular zone.
357 * Assume zone is locked and is available for allocation.
358 * Panics if allocation is impossible.
360 * @param zone Zone to allocate from.
361 * @param count Number of frames to allocate
362 * @param constraint Indication of bits that cannot be set in the
363 * physical frame number of the first allocated frame.
365 * @return Frame index in zone.
368 _NO_TRACE static size_t zone_frame_alloc(zone_t *zone, size_t count,
369 pfn_t constraint)
371 assert(zone->flags & ZONE_AVAILABLE);
372 assert(zone->free_count >= count);
374 /* Allocate frames from zone */
375 size_t index = (size_t) -1;
376 int avail = bitmap_allocate_range(&zone->bitmap, count, zone->base,
377 FRAME_LOWPRIO, constraint, &index);
379 (void) avail;
380 assert(avail);
381 assert(index != (size_t) -1);
383 /* Update frame reference count */
384 for (size_t i = 0; i < count; i++) {
385 frame_t *frame = zone_get_frame(zone, index + i);
387 assert(frame->refcount == 0);
388 frame->refcount = 1;
391 /* Update zone information. */
392 zone->free_count -= count;
393 zone->busy_count += count;
395 return index;
398 /** Free frame from zone.
400 * Assume zone is locked and is available for deallocation.
402 * @param zone Pointer to zone from which the frame is to be freed.
403 * @param index Frame index relative to zone.
405 * @return Number of freed frames.
408 _NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t index)
410 assert(zone->flags & ZONE_AVAILABLE);
412 frame_t *frame = zone_get_frame(zone, index);
413 assert(frame->refcount > 0);
415 if (!--frame->refcount) {
416 assert(zone->busy_count > 0);
418 bitmap_set(&zone->bitmap, index, 0);
420 /* Update zone information. */
421 zone->free_count++;
422 zone->busy_count--;
424 return 1;
427 return 0;
430 /** Mark frame in zone unavailable to allocation. */
431 _NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t index)
433 assert(zone->flags & ZONE_AVAILABLE);
435 frame_t *frame = zone_get_frame(zone, index);
436 assert(frame->refcount <= 1);
438 if (frame->refcount > 0)
439 return;
441 assert(zone->free_count > 0);
443 frame->refcount = 1;
444 bitmap_set_range(&zone->bitmap, index, 1);
446 zone->free_count--;
447 reserve_force_alloc(1);
450 /** Mark frame in zone available to allocation. */
451 _NO_TRACE static void zone_mark_available(zone_t *zone, size_t index)
453 assert(zone->flags & ZONE_AVAILABLE);
455 frame_t *frame = zone_get_frame(zone, index);
456 assert(frame->refcount == 1);
458 frame->refcount = 0;
459 bitmap_set_range(&zone->bitmap, index, 0);
461 zone->free_count++;
464 /** Merge two zones.
466 * Assume z1 & z2 are locked and compatible and zones lock is
467 * locked.
469 * @param z1 First zone to merge.
470 * @param z2 Second zone to merge.
471 * @param old_z1 Original data of the first zone.
472 * @param confdata Merged zone configuration data.
475 _NO_TRACE static void zone_merge_internal(size_t z1, size_t z2, zone_t *old_z1,
476 void *confdata)
478 assert(zones.info[z1].flags & ZONE_AVAILABLE);
479 assert(zones.info[z2].flags & ZONE_AVAILABLE);
480 assert(zones.info[z1].flags == zones.info[z2].flags);
481 assert(zones.info[z1].base < zones.info[z2].base);
482 assert(!overlaps(zones.info[z1].base, zones.info[z1].count,
483 zones.info[z2].base, zones.info[z2].count));
485 /* Difference between zone bases */
486 pfn_t base_diff = zones.info[z2].base - zones.info[z1].base;
487 pfn_t gap = base_diff - zones.info[z1].count;
489 zones.info[z1].count = base_diff + zones.info[z2].count;
490 zones.info[z1].free_count += zones.info[z2].free_count;
491 zones.info[z1].busy_count += zones.info[z2].busy_count;
493 bitmap_initialize(&zones.info[z1].bitmap, zones.info[z1].count,
494 confdata + (sizeof(frame_t) * zones.info[z1].count));
495 bitmap_clear_range(&zones.info[z1].bitmap, 0, zones.info[z1].count);
497 zones.info[z1].frames = (frame_t *) confdata;
500 * Copy frames and bits from both zones to preserve parents, etc.
503 for (size_t i = 0; i < old_z1->count; i++) {
504 bitmap_set(&zones.info[z1].bitmap, i,
505 bitmap_get(&old_z1->bitmap, i));
506 zones.info[z1].frames[i] = old_z1->frames[i];
509 for (size_t i = 0; i < zones.info[z2].count; i++) {
510 bitmap_set(&zones.info[z1].bitmap, base_diff + i,
511 bitmap_get(&zones.info[z2].bitmap, i));
512 zones.info[z1].frames[base_diff + i] =
513 zones.info[z2].frames[i];
517 * Mark the gap between the original zones as unavailable.
520 for (size_t i = 0; i < gap; i++) {
521 frame_initialize(&zones.info[z1].frames[old_z1->count + i]);
522 zone_mark_unavailable(&zones.info[z1], old_z1->count + i);
526 /** Return old configuration frames into the zone.
528 * We have two cases:
529 * - The configuration data is outside the zone
530 * -> do nothing (perhaps call frame_free?)
531 * - The configuration data was created by zone_create
532 * or updated by reduce_region -> free every frame
534 * @param znum The actual zone where freeing should occur.
535 * @param pfn Old zone configuration frame.
536 * @param count Old zone frame count.
539 _NO_TRACE static void return_config_frames(size_t znum, pfn_t pfn, size_t count)
541 assert(zones.info[znum].flags & ZONE_AVAILABLE);
543 size_t cframes = SIZE2FRAMES(zone_conf_size(count));
545 if ((pfn < zones.info[znum].base) ||
546 (pfn >= zones.info[znum].base + zones.info[znum].count))
547 return;
549 for (size_t i = 0; i < cframes; i++)
550 zone_mark_available(&zones.info[znum],
551 pfn - zones.info[znum].base + i);
554 /** Merge zones z1 and z2.
556 * The merged zones must be 2 zones with no zone existing in between
557 * (which means that z2 = z1 + 1). Both zones must be available zones
558 * with the same flags.
560 * When you create a new zone, the frame allocator configuration does
561 * not to be 2^order size. Once the allocator is running it is no longer
562 * possible, merged configuration data occupies more space :-/
565 bool zone_merge(size_t z1, size_t z2)
567 irq_spinlock_lock(&zones.lock, true);
569 bool ret = true;
572 * We can join only 2 zones with none existing inbetween,
573 * the zones have to be available and with the same
574 * set of flags
576 if ((z1 >= zones.count) || (z2 >= zones.count) || (z2 - z1 != 1) ||
577 (zones.info[z1].flags != zones.info[z2].flags)) {
578 ret = false;
579 goto errout;
582 pfn_t cframes = SIZE2FRAMES(zone_conf_size(
583 zones.info[z2].base - zones.info[z1].base +
584 zones.info[z2].count));
586 /* Allocate merged zone data inside one of the zones */
587 pfn_t pfn;
588 if (zone_can_alloc(&zones.info[z1], cframes, 0)) {
589 pfn = zones.info[z1].base +
590 zone_frame_alloc(&zones.info[z1], cframes, 0);
591 } else if (zone_can_alloc(&zones.info[z2], cframes, 0)) {
592 pfn = zones.info[z2].base +
593 zone_frame_alloc(&zones.info[z2], cframes, 0);
594 } else {
595 ret = false;
596 goto errout;
599 /* Preserve original data from z1 */
600 zone_t old_z1 = zones.info[z1];
602 /* Do zone merging */
603 zone_merge_internal(z1, z2, &old_z1, (void *) PA2KA(PFN2ADDR(pfn)));
605 /* Subtract zone information from busy frames */
606 zones.info[z1].busy_count -= cframes;
608 /* Free old zone information */
609 return_config_frames(z1,
610 ADDR2PFN(KA2PA((uintptr_t) old_z1.frames)), old_z1.count);
611 return_config_frames(z1,
612 ADDR2PFN(KA2PA((uintptr_t) zones.info[z2].frames)),
613 zones.info[z2].count);
615 /* Move zones down */
616 for (size_t i = z2 + 1; i < zones.count; i++)
617 zones.info[i - 1] = zones.info[i];
619 zones.count--;
621 errout:
622 irq_spinlock_unlock(&zones.lock, true);
624 return ret;
627 /** Merge all mergeable zones into one big zone.
629 * It is reasonable to do this on systems where
630 * BIOS reports parts in chunks, so that we could
631 * have 1 zone (it's faster).
634 void zone_merge_all(void)
636 size_t i = 1;
638 while (i < zones.count) {
639 if (!zone_merge(i - 1, i))
640 i++;
644 /** Create new frame zone.
646 * @param zone Zone to construct.
647 * @param start Physical address of the first frame within the zone.
648 * @param count Count of frames in zone.
649 * @param flags Zone flags.
650 * @param confdata Configuration data of the zone.
652 * @return Initialized zone.
655 _NO_TRACE static void zone_construct(zone_t *zone, pfn_t start, size_t count,
656 zone_flags_t flags, void *confdata)
658 zone->base = start;
659 zone->count = count;
660 zone->flags = flags;
661 zone->free_count = count;
662 zone->busy_count = 0;
664 if (flags & ZONE_AVAILABLE) {
666 * Initialize frame bitmap (located after the array of
667 * frame_t structures in the configuration space).
670 bitmap_initialize(&zone->bitmap, count, confdata +
671 (sizeof(frame_t) * count));
672 bitmap_clear_range(&zone->bitmap, 0, count);
675 * Initialize the array of frame_t structures.
678 zone->frames = (frame_t *) confdata;
680 for (size_t i = 0; i < count; i++)
681 frame_initialize(&zone->frames[i]);
682 } else {
683 bitmap_initialize(&zone->bitmap, 0, NULL);
684 zone->frames = NULL;
688 /** Compute configuration data size for zone.
690 * @param count Size of zone in frames.
692 * @return Size of zone configuration info (in bytes).
695 size_t zone_conf_size(size_t count)
697 return (count * sizeof(frame_t) + bitmap_size(count));
700 /** Allocate external configuration frames from low memory. */
701 pfn_t zone_external_conf_alloc(size_t count)
703 size_t frames = SIZE2FRAMES(zone_conf_size(count));
705 return ADDR2PFN((uintptr_t)
706 frame_alloc(frames, FRAME_LOWMEM | FRAME_ATOMIC, 0));
709 /** Create and add zone to system.
711 * @param start First frame number (absolute).
712 * @param count Size of zone in frames.
713 * @param confframe Where configuration frames are supposed to be.
714 * Automatically checks that we will not disturb the
715 * kernel and possibly init. If confframe is given
716 * _outside_ this zone, it is expected, that the area is
717 * already marked BUSY and big enough to contain
718 * zone_conf_size() amount of data. If the confframe is
719 * inside the area, the zone free frame information is
720 * modified not to include it.
722 * @return Zone number or -1 on error.
725 size_t zone_create(pfn_t start, size_t count, pfn_t confframe,
726 zone_flags_t flags)
728 irq_spinlock_lock(&zones.lock, true);
730 if (flags & ZONE_AVAILABLE) { /* Create available zone */
732 * Theoretically we could have NULL here, practically make sure
733 * nobody tries to do that. If some platform requires, remove
734 * the assert
736 assert(confframe != ADDR2PFN((uintptr_t) NULL));
738 /* Update the known end of physical memory. */
739 config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count));
742 * If confframe is supposed to be inside our zone, then make sure
743 * it does not span kernel & init
745 size_t confcount = SIZE2FRAMES(zone_conf_size(count));
747 if ((confframe >= start) && (confframe < start + count)) {
748 for (; confframe < start + count; confframe++) {
749 uintptr_t addr = PFN2ADDR(confframe);
750 if (overlaps(addr, PFN2ADDR(confcount),
751 KA2PA(config.base), config.kernel_size))
752 continue;
754 if (overlaps(addr, PFN2ADDR(confcount),
755 KA2PA(ballocs.base), ballocs.size))
756 continue;
758 bool overlap = false;
759 for (size_t i = 0; i < init.cnt; i++) {
760 if (overlaps(addr, PFN2ADDR(confcount),
761 init.tasks[i].paddr,
762 init.tasks[i].size)) {
763 overlap = true;
764 break;
768 if (overlap)
769 continue;
771 break;
774 if (confframe >= start + count)
775 panic("Cannot find configuration data for zone.");
778 size_t znum = zones_insert_zone(start, count, flags);
779 if (znum == (size_t) -1) {
780 irq_spinlock_unlock(&zones.lock, true);
781 return (size_t) -1;
784 void *confdata = (void *) PA2KA(PFN2ADDR(confframe));
785 zone_construct(&zones.info[znum], start, count, flags, confdata);
787 /* If confdata in zone, mark as unavailable */
788 if ((confframe >= start) && (confframe < start + count)) {
789 for (size_t i = confframe; i < confframe + confcount; i++)
790 zone_mark_unavailable(&zones.info[znum],
791 i - zones.info[znum].base);
794 irq_spinlock_unlock(&zones.lock, true);
796 return znum;
799 /* Non-available zone */
800 size_t znum = zones_insert_zone(start, count, flags);
801 if (znum == (size_t) -1) {
802 irq_spinlock_unlock(&zones.lock, true);
803 return (size_t) -1;
806 zone_construct(&zones.info[znum], start, count, flags, NULL);
808 irq_spinlock_unlock(&zones.lock, true);
810 return znum;
814 * Frame functions
817 /** Set parent of frame. */
818 void frame_set_parent(pfn_t pfn, void *data, size_t hint)
820 irq_spinlock_lock(&zones.lock, true);
822 size_t znum = find_zone(pfn, 1, hint);
824 assert(znum != (size_t) -1);
826 zone_get_frame(&zones.info[znum],
827 pfn - zones.info[znum].base)->parent = data;
829 irq_spinlock_unlock(&zones.lock, true);
832 void *frame_get_parent(pfn_t pfn, size_t hint)
834 irq_spinlock_lock(&zones.lock, true);
836 size_t znum = find_zone(pfn, 1, hint);
838 assert(znum != (size_t) -1);
840 void *res = zone_get_frame(&zones.info[znum],
841 pfn - zones.info[znum].base)->parent;
843 irq_spinlock_unlock(&zones.lock, true);
845 return res;
848 static size_t try_find_zone(size_t count, bool lowmem,
849 pfn_t frame_constraint, size_t hint)
851 if (!lowmem) {
852 size_t znum = find_free_zone(count,
853 ZONE_HIGHMEM | ZONE_AVAILABLE, frame_constraint, hint);
854 if (znum != (size_t) -1)
855 return znum;
858 return find_free_zone(count, ZONE_LOWMEM | ZONE_AVAILABLE,
859 frame_constraint, hint);
862 /** Allocate frames of physical memory.
864 * @param count Number of continuous frames to allocate.
865 * @param flags Flags for host zone selection and address processing.
866 * @param constraint Indication of physical address bits that cannot be
867 * set in the address of the first allocated frame.
868 * @param pzone Preferred zone.
870 * @return Physical address of the allocated frame.
873 uintptr_t frame_alloc_generic(size_t count, frame_flags_t flags,
874 uintptr_t constraint, size_t *pzone)
876 assert(count > 0);
878 size_t hint = pzone ? (*pzone) : 0;
879 pfn_t frame_constraint = ADDR2PFN(constraint);
882 * If not told otherwise, we must first reserve the memory.
884 if (!(flags & FRAME_NO_RESERVE))
885 reserve_force_alloc(count);
887 loop:
888 irq_spinlock_lock(&zones.lock, true);
890 // TODO: Print diagnostic if neither is explicitly specified.
891 bool lowmem = (flags & FRAME_LOWMEM) || !(flags & FRAME_HIGHMEM);
894 * First, find suitable frame zone.
896 size_t znum = try_find_zone(count, lowmem, frame_constraint, hint);
899 * If no memory, reclaim some slab memory,
900 * if it does not help, reclaim all.
902 if ((znum == (size_t) -1) && (!(flags & FRAME_NO_RECLAIM))) {
903 irq_spinlock_unlock(&zones.lock, true);
904 size_t freed = slab_reclaim(0);
905 irq_spinlock_lock(&zones.lock, true);
907 if (freed > 0)
908 znum = try_find_zone(count, lowmem,
909 frame_constraint, hint);
911 if (znum == (size_t) -1) {
912 irq_spinlock_unlock(&zones.lock, true);
913 freed = slab_reclaim(SLAB_RECLAIM_ALL);
914 irq_spinlock_lock(&zones.lock, true);
916 if (freed > 0)
917 znum = try_find_zone(count, lowmem,
918 frame_constraint, hint);
922 if (znum == (size_t) -1) {
923 if (flags & FRAME_ATOMIC) {
924 irq_spinlock_unlock(&zones.lock, true);
926 if (!(flags & FRAME_NO_RESERVE))
927 reserve_free(count);
929 return 0;
932 size_t avail = frame_total_free_get_internal();
934 irq_spinlock_unlock(&zones.lock, true);
936 if (!THREAD)
937 panic("Cannot wait for %zu frames to become available "
938 "(%zu available).", count, avail);
941 * Sleep until some frames are available again.
944 #ifdef CONFIG_DEBUG
945 log(LF_OTHER, LVL_DEBUG,
946 "Thread %" PRIu64 " waiting for %zu frames "
947 "%zu available.", THREAD->tid, count, avail);
948 #endif
951 * Since the mem_avail_mtx is an active mutex, we need to
952 * disable interrupts to prevent deadlock with TLB shootdown.
954 ipl_t ipl = interrupts_disable();
955 mutex_lock(&mem_avail_mtx);
957 if (mem_avail_req > 0)
958 mem_avail_req = min(mem_avail_req, count);
959 else
960 mem_avail_req = count;
962 size_t gen = mem_avail_gen;
964 while (gen == mem_avail_gen)
965 condvar_wait(&mem_avail_cv, &mem_avail_mtx);
967 mutex_unlock(&mem_avail_mtx);
968 interrupts_restore(ipl);
970 #ifdef CONFIG_DEBUG
971 log(LF_OTHER, LVL_DEBUG, "Thread %" PRIu64 " woken up.",
972 THREAD->tid);
973 #endif
975 goto loop;
978 pfn_t pfn = zone_frame_alloc(&zones.info[znum], count,
979 frame_constraint) + zones.info[znum].base;
981 irq_spinlock_unlock(&zones.lock, true);
983 if (pzone)
984 *pzone = znum;
986 return PFN2ADDR(pfn);
989 uintptr_t frame_alloc(size_t count, frame_flags_t flags, uintptr_t constraint)
991 return frame_alloc_generic(count, flags, constraint, NULL);
994 /** Free frames of physical memory.
996 * Find respective frame structures for supplied physical frames.
997 * Decrement each frame reference count. If it drops to zero, mark
998 * the frames as available.
1000 * @param start Physical Address of the first frame to be freed.
1001 * @param count Number of frames to free.
1002 * @param flags Flags to control memory reservation.
1005 void frame_free_generic(uintptr_t start, size_t count, frame_flags_t flags)
1007 size_t freed = 0;
1009 irq_spinlock_lock(&zones.lock, true);
1011 for (size_t i = 0; i < count; i++) {
1013 * First, find host frame zone for addr.
1015 pfn_t pfn = ADDR2PFN(start) + i;
1016 size_t znum = find_zone(pfn, 1, 0);
1018 assert(znum != (size_t) -1);
1020 freed += zone_frame_free(&zones.info[znum],
1021 pfn - zones.info[znum].base);
1024 irq_spinlock_unlock(&zones.lock, true);
1027 * Signal that some memory has been freed.
1028 * Since the mem_avail_mtx is an active mutex,
1029 * we need to disable interruptsto prevent deadlock
1030 * with TLB shootdown.
1033 ipl_t ipl = interrupts_disable();
1034 mutex_lock(&mem_avail_mtx);
1036 if (mem_avail_req > 0)
1037 mem_avail_req -= min(mem_avail_req, freed);
1039 if (mem_avail_req == 0) {
1040 mem_avail_gen++;
1041 condvar_broadcast(&mem_avail_cv);
1044 mutex_unlock(&mem_avail_mtx);
1045 interrupts_restore(ipl);
1047 if (!(flags & FRAME_NO_RESERVE))
1048 reserve_free(freed);
1051 void frame_free(uintptr_t frame, size_t count)
1053 frame_free_generic(frame, count, 0);
1056 void frame_free_noreserve(uintptr_t frame, size_t count)
1058 frame_free_generic(frame, count, FRAME_NO_RESERVE);
1061 /** Add reference to frame.
1063 * Find respective frame structure for supplied PFN and
1064 * increment frame reference count.
1066 * @param pfn Frame number of the frame to be freed.
1069 _NO_TRACE void frame_reference_add(pfn_t pfn)
1071 irq_spinlock_lock(&zones.lock, true);
1074 * First, find host frame zone for addr.
1076 size_t znum = find_zone(pfn, 1, 0);
1078 assert(znum != (size_t) -1);
1080 zones.info[znum].frames[pfn - zones.info[znum].base].refcount++;
1082 irq_spinlock_unlock(&zones.lock, true);
1085 /** Mark given range unavailable in frame zones.
1088 _NO_TRACE void frame_mark_unavailable(pfn_t start, size_t count)
1090 irq_spinlock_lock(&zones.lock, true);
1092 for (size_t i = 0; i < count; i++) {
1093 size_t znum = find_zone(start + i, 1, 0);
1095 if (znum == (size_t) -1) /* PFN not found */
1096 continue;
1098 zone_mark_unavailable(&zones.info[znum],
1099 start + i - zones.info[znum].base);
1102 irq_spinlock_unlock(&zones.lock, true);
1105 /** Initialize physical memory management.
1108 void frame_init(void)
1110 if (config.cpu_active == 1) {
1111 zones.count = 0;
1112 irq_spinlock_initialize(&zones.lock, "frame.zones.lock");
1113 mutex_initialize(&mem_avail_mtx, MUTEX_ACTIVE);
1114 condvar_initialize(&mem_avail_cv);
1117 /* Tell the architecture to create some memory */
1118 frame_low_arch_init();
1120 if (config.cpu_active == 1) {
1121 frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)),
1122 SIZE2FRAMES(config.kernel_size));
1124 for (size_t i = 0; i < init.cnt; i++)
1125 frame_mark_unavailable(ADDR2PFN(init.tasks[i].paddr),
1126 SIZE2FRAMES(init.tasks[i].size));
1128 if (ballocs.size)
1129 frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)),
1130 SIZE2FRAMES(ballocs.size));
1133 * Blacklist first frame, as allocating NULL would
1134 * fail in some places
1136 frame_mark_unavailable(0, 1);
1139 frame_high_arch_init();
1142 /** Adjust bounds of physical memory region according to low/high memory split.
1144 * @param low[in] If true, the adjustment is performed to make the region
1145 * fit in the low memory. Otherwise the adjustment is
1146 * performed to make the region fit in the high memory.
1147 * @param basep[inout] Pointer to a variable which contains the region's base
1148 * address and which may receive the adjusted base address.
1149 * @param sizep[inout] Pointer to a variable which contains the region's size
1150 * and which may receive the adjusted size.
1152 * @return True if the region still exists even after the adjustment.
1153 * @return False otherwise.
1156 bool frame_adjust_zone_bounds(bool low, uintptr_t *basep, size_t *sizep)
1158 uintptr_t limit = KA2PA(config.identity_base) + config.identity_size;
1160 if (low) {
1161 if (*basep > limit)
1162 return false;
1164 if (*basep + *sizep > limit)
1165 *sizep = limit - *basep;
1166 } else {
1167 if (*basep + *sizep <= limit)
1168 return false;
1170 if (*basep <= limit) {
1171 *sizep -= limit - *basep;
1172 *basep = limit;
1176 return true;
1179 /** Return total size of all zones.
1182 uint64_t zones_total_size(void)
1184 irq_spinlock_lock(&zones.lock, true);
1186 uint64_t total = 0;
1188 for (size_t i = 0; i < zones.count; i++)
1189 total += (uint64_t) FRAMES2SIZE(zones.info[i].count);
1191 irq_spinlock_unlock(&zones.lock, true);
1193 return total;
1196 void zones_stats(uint64_t *total, uint64_t *unavail, uint64_t *busy,
1197 uint64_t *free)
1199 assert(total != NULL);
1200 assert(unavail != NULL);
1201 assert(busy != NULL);
1202 assert(free != NULL);
1204 irq_spinlock_lock(&zones.lock, true);
1206 *total = 0;
1207 *unavail = 0;
1208 *busy = 0;
1209 *free = 0;
1211 for (size_t i = 0; i < zones.count; i++) {
1212 *total += (uint64_t) FRAMES2SIZE(zones.info[i].count);
1214 if (zones.info[i].flags & ZONE_AVAILABLE) {
1215 *busy += (uint64_t) FRAMES2SIZE(zones.info[i].busy_count);
1216 *free += (uint64_t) FRAMES2SIZE(zones.info[i].free_count);
1217 } else
1218 *unavail += (uint64_t) FRAMES2SIZE(zones.info[i].count);
1221 irq_spinlock_unlock(&zones.lock, true);
1224 /** Prints list of zones.
1227 void zones_print_list(void)
1229 #ifdef __32_BITS__
1230 printf("[nr] [base addr] [frames ] [flags ] [free frames ] [busy frames ]\n");
1231 #endif
1233 #ifdef __64_BITS__
1234 printf("[nr] [base address ] [frames ] [flags ] [free frames ] [busy frames ]\n");
1235 #endif
1238 * Because printing may require allocation of memory, we may not hold
1239 * the frame allocator locks when printing zone statistics. Therefore,
1240 * we simply gather the statistics under the protection of the locks and
1241 * print the statistics when the locks have been released.
1243 * When someone adds/removes zones while we are printing the statistics,
1244 * we may end up with inaccurate output (e.g. a zone being skipped from
1245 * the listing).
1248 size_t free_lowmem = 0;
1249 size_t free_highmem = 0;
1250 size_t free_highprio = 0;
1252 for (size_t i = 0; ; i++) {
1253 irq_spinlock_lock(&zones.lock, true);
1255 if (i >= zones.count) {
1256 irq_spinlock_unlock(&zones.lock, true);
1257 break;
1260 pfn_t fbase = zones.info[i].base;
1261 uintptr_t base = PFN2ADDR(fbase);
1262 size_t count = zones.info[i].count;
1263 zone_flags_t flags = zones.info[i].flags;
1264 size_t free_count = zones.info[i].free_count;
1265 size_t busy_count = zones.info[i].busy_count;
1267 bool available = ((flags & ZONE_AVAILABLE) != 0);
1268 bool lowmem = ((flags & ZONE_LOWMEM) != 0);
1269 bool highmem = ((flags & ZONE_HIGHMEM) != 0);
1270 bool highprio = is_high_priority(fbase, count);
1272 if (available) {
1273 if (lowmem)
1274 free_lowmem += free_count;
1276 if (highmem)
1277 free_highmem += free_count;
1279 if (highprio) {
1280 free_highprio += free_count;
1281 } else {
1283 * Walk all frames of the zone and examine
1284 * all high priority memory to get accurate
1285 * statistics.
1288 for (size_t index = 0; index < count; index++) {
1289 if (is_high_priority(fbase + index, 0)) {
1290 if (!bitmap_get(&zones.info[i].bitmap, index))
1291 free_highprio++;
1292 } else
1293 break;
1298 irq_spinlock_unlock(&zones.lock, true);
1300 printf("%-4zu", i);
1302 #ifdef __32_BITS__
1303 printf(" %p", (void *) base);
1304 #endif
1306 #ifdef __64_BITS__
1307 printf(" %p", (void *) base);
1308 #endif
1310 printf(" %12zu %c%c%c%c%c ", count,
1311 available ? 'A' : '-',
1312 (flags & ZONE_RESERVED) ? 'R' : '-',
1313 (flags & ZONE_FIRMWARE) ? 'F' : '-',
1314 (flags & ZONE_LOWMEM) ? 'L' : '-',
1315 (flags & ZONE_HIGHMEM) ? 'H' : '-');
1317 if (available)
1318 printf("%14zu %14zu",
1319 free_count, busy_count);
1321 printf("\n");
1324 printf("\n");
1326 uint64_t size;
1327 const char *size_suffix;
1329 bin_order_suffix(FRAMES2SIZE(free_lowmem), &size, &size_suffix,
1330 false);
1331 printf("Available low memory: %zu frames (%" PRIu64 " %s)\n",
1332 free_lowmem, size, size_suffix);
1334 bin_order_suffix(FRAMES2SIZE(free_highmem), &size, &size_suffix,
1335 false);
1336 printf("Available high memory: %zu frames (%" PRIu64 " %s)\n",
1337 free_highmem, size, size_suffix);
1339 bin_order_suffix(FRAMES2SIZE(free_highprio), &size, &size_suffix,
1340 false);
1341 printf("Available high priority: %zu frames (%" PRIu64 " %s)\n",
1342 free_highprio, size, size_suffix);
1345 /** Prints zone details.
1347 * @param num Zone base address or zone number.
1350 void zone_print_one(size_t num)
1352 irq_spinlock_lock(&zones.lock, true);
1353 size_t znum = (size_t) -1;
1355 for (size_t i = 0; i < zones.count; i++) {
1356 if ((i == num) || (PFN2ADDR(zones.info[i].base) == num)) {
1357 znum = i;
1358 break;
1362 if (znum == (size_t) -1) {
1363 irq_spinlock_unlock(&zones.lock, true);
1364 printf("Zone not found.\n");
1365 return;
1368 size_t free_lowmem = 0;
1369 size_t free_highmem = 0;
1370 size_t free_highprio = 0;
1372 pfn_t fbase = zones.info[znum].base;
1373 uintptr_t base = PFN2ADDR(fbase);
1374 zone_flags_t flags = zones.info[znum].flags;
1375 size_t count = zones.info[znum].count;
1376 size_t free_count = zones.info[znum].free_count;
1377 size_t busy_count = zones.info[znum].busy_count;
1379 bool available = ((flags & ZONE_AVAILABLE) != 0);
1380 bool lowmem = ((flags & ZONE_LOWMEM) != 0);
1381 bool highmem = ((flags & ZONE_HIGHMEM) != 0);
1382 bool highprio = is_high_priority(fbase, count);
1384 if (available) {
1385 if (lowmem)
1386 free_lowmem = free_count;
1388 if (highmem)
1389 free_highmem = free_count;
1391 if (highprio) {
1392 free_highprio = free_count;
1393 } else {
1395 * Walk all frames of the zone and examine
1396 * all high priority memory to get accurate
1397 * statistics.
1400 for (size_t index = 0; index < count; index++) {
1401 if (is_high_priority(fbase + index, 0)) {
1402 if (!bitmap_get(&zones.info[znum].bitmap, index))
1403 free_highprio++;
1404 } else
1405 break;
1410 irq_spinlock_unlock(&zones.lock, true);
1412 uint64_t size;
1413 const char *size_suffix;
1415 bin_order_suffix(FRAMES2SIZE(count), &size, &size_suffix, false);
1417 printf("Zone number: %zu\n", znum);
1418 printf("Zone base address: %p\n", (void *) base);
1419 printf("Zone size: %zu frames (%" PRIu64 " %s)\n", count,
1420 size, size_suffix);
1421 printf("Zone flags: %c%c%c%c%c\n",
1422 available ? 'A' : '-',
1423 (flags & ZONE_RESERVED) ? 'R' : '-',
1424 (flags & ZONE_FIRMWARE) ? 'F' : '-',
1425 (flags & ZONE_LOWMEM) ? 'L' : '-',
1426 (flags & ZONE_HIGHMEM) ? 'H' : '-');
1428 if (available) {
1429 bin_order_suffix(FRAMES2SIZE(busy_count), &size, &size_suffix,
1430 false);
1431 printf("Allocated space: %zu frames (%" PRIu64 " %s)\n",
1432 busy_count, size, size_suffix);
1434 bin_order_suffix(FRAMES2SIZE(free_count), &size, &size_suffix,
1435 false);
1436 printf("Available space: %zu frames (%" PRIu64 " %s)\n",
1437 free_count, size, size_suffix);
1439 bin_order_suffix(FRAMES2SIZE(free_lowmem), &size, &size_suffix,
1440 false);
1441 printf("Available low memory: %zu frames (%" PRIu64 " %s)\n",
1442 free_lowmem, size, size_suffix);
1444 bin_order_suffix(FRAMES2SIZE(free_highmem), &size, &size_suffix,
1445 false);
1446 printf("Available high memory: %zu frames (%" PRIu64 " %s)\n",
1447 free_highmem, size, size_suffix);
1449 bin_order_suffix(FRAMES2SIZE(free_highprio), &size, &size_suffix,
1450 false);
1451 printf("Available high priority: %zu frames (%" PRIu64 " %s)\n",
1452 free_highprio, size, size_suffix);
1456 /** @}