2 * Copyright (c) 2010 Jakub Jermar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup genericmm
35 * @brief Address space related functions.
37 * This file contains address space manipulation functions.
38 * Roughly speaking, this is a higher-level client of
39 * Virtual Address Translation (VAT) subsystem.
41 * Functionality provided by this file allows one to
42 * create address spaces and create, resize and share
43 * address space areas.
50 #include <arch/mm/as.h>
55 #include <arch/mm/page.h>
56 #include <genarch/mm/page_pt.h>
57 #include <genarch/mm/page_ht.h>
59 #include <arch/mm/asid.h>
60 #include <preemption.h>
61 #include <synch/spinlock.h>
62 #include <synch/mutex.h>
64 #include <adt/btree.h>
65 #include <proc/task.h>
66 #include <proc/thread.h>
79 #include <syscall/copy.h>
80 #include <arch/interrupt.h>
81 #include <interrupt.h>
84 * Each architecture decides what functions will be used to carry out
85 * address space operations such as creating or locking page tables.
87 as_operations_t
*as_operations
= NULL
;
89 /** Slab for as_t objects.
92 static slab_cache_t
*as_slab
;
94 /** ASID subsystem lock.
97 * - inactive_as_with_asid_list
98 * - as->asid for each as of the as_t type
99 * - asids_allocated counter
102 SPINLOCK_INITIALIZE(asidlock
);
105 * Inactive address spaces (on all processors)
106 * that have valid ASID.
108 LIST_INITIALIZE(inactive_as_with_asid_list
);
110 /** Kernel address space. */
111 as_t
*AS_KERNEL
= NULL
;
113 NO_TRACE
static int as_constructor(void *obj
, unsigned int flags
)
115 as_t
*as
= (as_t
*) obj
;
117 link_initialize(&as
->inactive_as_with_asid_link
);
118 mutex_initialize(&as
->lock
, MUTEX_PASSIVE
);
120 return as_constructor_arch(as
, flags
);
123 NO_TRACE
static size_t as_destructor(void *obj
)
125 return as_destructor_arch((as_t
*) obj
);
128 /** Initialize address space subsystem. */
133 as_slab
= slab_cache_create("as_t", sizeof(as_t
), 0,
134 as_constructor
, as_destructor
, SLAB_CACHE_MAGDEFERRED
);
136 AS_KERNEL
= as_create(FLAG_AS_KERNEL
);
138 panic("Cannot create kernel address space.");
141 * Make sure the kernel address space
142 * reference count never drops to zero.
147 /** Create address space.
149 * @param flags Flags that influence the way in wich the address
153 as_t
*as_create(unsigned int flags
)
155 as_t
*as
= (as_t
*) slab_alloc(as_slab
, 0);
156 (void) as_create_arch(as
, 0);
158 btree_create(&as
->as_area_btree
);
160 if (flags
& FLAG_AS_KERNEL
)
161 as
->asid
= ASID_KERNEL
;
163 as
->asid
= ASID_INVALID
;
165 atomic_set(&as
->refcount
, 0);
166 as
->cpu_refcount
= 0;
169 as
->genarch
.page_table
= page_table_create(flags
);
171 page_table_create(flags
);
177 /** Destroy adress space.
179 * When there are no tasks referencing this address space (i.e. its refcount is
180 * zero), the address space can be destroyed.
182 * We know that we don't hold any spinlock.
184 * @param as Address space to be destroyed.
187 void as_destroy(as_t
*as
)
189 DEADLOCK_PROBE_INIT(p_asidlock
);
192 ASSERT(atomic_get(&as
->refcount
) == 0);
195 * Since there is no reference to this address space, it is safe not to
200 * We need to avoid deadlock between TLB shootdown and asidlock.
201 * We therefore try to take asid conditionally and if we don't succeed,
202 * we enable interrupts and try again. This is done while preemption is
203 * disabled to prevent nested context switches. We also depend on the
204 * fact that so far no spinlocks are held.
206 preemption_disable();
207 ipl_t ipl
= interrupts_read();
210 interrupts_disable();
211 if (!spinlock_trylock(&asidlock
)) {
213 DEADLOCK_PROBE(p_asidlock
, DEADLOCK_THRESHOLD
);
217 /* Interrupts disabled, enable preemption */
220 if ((as
->asid
!= ASID_INVALID
) && (as
!= AS_KERNEL
)) {
221 if (as
->cpu_refcount
== 0)
222 list_remove(&as
->inactive_as_with_asid_link
);
227 spinlock_unlock(&asidlock
);
228 interrupts_restore(ipl
);
232 * Destroy address space areas of the address space.
233 * The B+tree must be walked carefully because it is
234 * also being destroyed.
238 ASSERT(!list_empty(&as
->as_area_btree
.leaf_list
));
241 list_get_instance(list_first(&as
->as_area_btree
.leaf_list
),
242 btree_node_t
, leaf_link
);
244 if ((cond
= node
->keys
))
245 as_area_destroy(as
, node
->key
[0]);
248 btree_destroy(&as
->as_area_btree
);
251 page_table_destroy(as
->genarch
.page_table
);
253 page_table_destroy(NULL
);
256 slab_free(as_slab
, as
);
259 /** Hold a reference to an address space.
261 * Holding a reference to an address space prevents destruction
262 * of that address space.
264 * @param as Address space to be held.
267 NO_TRACE
void as_hold(as_t
*as
)
269 atomic_inc(&as
->refcount
);
272 /** Release a reference to an address space.
274 * The last one to release a reference to an address space
275 * destroys the address space.
277 * @param asAddress space to be released.
280 NO_TRACE
void as_release(as_t
*as
)
282 if (atomic_predec(&as
->refcount
) == 0)
286 /** Check area conflicts with other areas.
288 * @param as Address space.
289 * @param addr Starting virtual address of the area being tested.
290 * @param count Number of pages in the area being tested.
291 * @param guarded True if the area being tested is protected by guard pages.
292 * @param avoid Do not touch this area.
294 * @return True if there is no conflict, false otherwise.
297 NO_TRACE
static bool check_area_conflicts(as_t
*as
, uintptr_t addr
,
298 size_t count
, bool guarded
, as_area_t
*avoid
)
300 ASSERT((addr
% PAGE_SIZE
) == 0);
301 ASSERT(mutex_locked(&as
->lock
));
304 * If the addition of the supposed area address and size overflows,
307 if (overflows_into_positive(addr
, P2SZ(count
)))
311 * We don't want any area to have conflicts with NULL page.
313 if (overlaps(addr
, P2SZ(count
), (uintptr_t) NULL
, PAGE_SIZE
))
317 * The leaf node is found in O(log n), where n is proportional to
318 * the number of address space areas belonging to as.
319 * The check for conflicts is then attempted on the rightmost
320 * record in the left neighbour, the leftmost record in the right
321 * neighbour and all records in the leaf node itself.
325 (as_area_t
*) btree_search(&as
->as_area_btree
, addr
, &leaf
);
331 /* First, check the two border cases. */
333 btree_leaf_node_left_neighbour(&as
->as_area_btree
, leaf
);
335 area
= (as_area_t
*) node
->value
[node
->keys
- 1];
338 mutex_lock(&area
->lock
);
341 * If at least one of the two areas are protected
342 * by the AS_AREA_GUARD flag then we must be sure
343 * that they are separated by at least one unmapped
346 int const gp
= (guarded
||
347 (area
->flags
& AS_AREA_GUARD
)) ? 1 : 0;
350 * The area comes from the left neighbour node, which
351 * means that there already are some areas in the leaf
352 * node, which in turn means that adding gp is safe and
353 * will not cause an integer overflow.
355 if (overlaps(addr
, P2SZ(count
), area
->base
,
356 P2SZ(area
->pages
+ gp
))) {
357 mutex_unlock(&area
->lock
);
361 mutex_unlock(&area
->lock
);
365 node
= btree_leaf_node_right_neighbour(&as
->as_area_btree
, leaf
);
367 area
= (as_area_t
*) node
->value
[0];
372 mutex_lock(&area
->lock
);
374 gp
= (guarded
|| (area
->flags
& AS_AREA_GUARD
)) ? 1 : 0;
375 if (gp
&& overflows(addr
, P2SZ(count
))) {
377 * Guard page not needed if the supposed area
378 * is adjacent to the end of the address space.
379 * We already know that the following test is
385 if (overlaps(addr
, P2SZ(count
+ gp
), area
->base
,
386 P2SZ(area
->pages
))) {
387 mutex_unlock(&area
->lock
);
391 mutex_unlock(&area
->lock
);
395 /* Second, check the leaf node. */
397 for (i
= 0; i
< leaf
->keys
; i
++) {
398 area
= (as_area_t
*) leaf
->value
[i
];
405 mutex_lock(&area
->lock
);
407 gp
= (guarded
|| (area
->flags
& AS_AREA_GUARD
)) ? 1 : 0;
411 * Sanitize the two possible unsigned integer overflows.
413 if (gp
&& overflows(addr
, P2SZ(count
)))
415 if (agp
&& overflows(area
->base
, P2SZ(area
->pages
)))
418 if (overlaps(addr
, P2SZ(count
+ gp
), area
->base
,
419 P2SZ(area
->pages
+ agp
))) {
420 mutex_unlock(&area
->lock
);
424 mutex_unlock(&area
->lock
);
428 * So far, the area does not conflict with other areas.
429 * Check if it is contained in the user address space.
431 if (!KERNEL_ADDRESS_SPACE_SHADOWED
) {
432 return iswithin(USER_ADDRESS_SPACE_START
,
433 (USER_ADDRESS_SPACE_END
- USER_ADDRESS_SPACE_START
) + 1,
440 /** Return pointer to unmapped address space area
442 * The address space must be already locked when calling
445 * @param as Address space.
446 * @param bound Lowest address bound.
447 * @param size Requested size of the allocation.
448 * @param guarded True if the allocation must be protected by guard pages.
450 * @return Address of the beginning of unmapped address space area.
451 * @return -1 if no suitable address space area was found.
454 NO_TRACE
static uintptr_t as_get_unmapped_area(as_t
*as
, uintptr_t bound
,
455 size_t size
, bool guarded
)
457 ASSERT(mutex_locked(&as
->lock
));
460 return (uintptr_t) -1;
463 * Make sure we allocate from page-aligned
464 * address. Check for possible overflow in
468 size_t pages
= SIZE2FRAMES(size
);
471 * Find the lowest unmapped address aligned on the size
472 * boundary, not smaller than bound and of the required size.
475 /* First check the bound address itself */
476 uintptr_t addr
= ALIGN_UP(bound
, PAGE_SIZE
);
479 /* Leave an unmapped page between the lower
480 * bound and the area's start address.
485 if (check_area_conflicts(as
, addr
, pages
, guarded
, NULL
))
489 /* Eventually check the addresses behind each area */
490 list_foreach(as
->as_area_btree
.leaf_list
, leaf_link
, btree_node_t
, node
) {
492 for (btree_key_t i
= 0; i
< node
->keys
; i
++) {
493 as_area_t
*area
= (as_area_t
*) node
->value
[i
];
495 mutex_lock(&area
->lock
);
498 ALIGN_UP(area
->base
+ P2SZ(area
->pages
), PAGE_SIZE
);
500 if (guarded
|| area
->flags
& AS_AREA_GUARD
) {
501 /* We must leave an unmapped page
502 * between the two areas.
508 ((addr
>= bound
) && (addr
>= area
->base
) &&
509 (check_area_conflicts(as
, addr
, pages
, guarded
, area
)));
511 mutex_unlock(&area
->lock
);
518 /* No suitable address space area found */
519 return (uintptr_t) -1;
522 /** Remove reference to address space area share info.
524 * If the reference count drops to 0, the sh_info is deallocated.
526 * @param sh_info Pointer to address space area share info.
529 NO_TRACE
static void sh_info_remove_reference(share_info_t
*sh_info
)
531 bool dealloc
= false;
533 mutex_lock(&sh_info
->lock
);
534 ASSERT(sh_info
->refcount
);
536 if (--sh_info
->refcount
== 0) {
540 * Now walk carefully the pagemap B+tree and free/remove
541 * reference from all frames found there.
543 list_foreach(sh_info
->pagemap
.leaf_list
, leaf_link
,
544 btree_node_t
, node
) {
547 for (i
= 0; i
< node
->keys
; i
++)
548 frame_free((uintptr_t) node
->value
[i
], 1);
552 mutex_unlock(&sh_info
->lock
);
555 if (sh_info
->backend
&& sh_info
->backend
->destroy_shared_data
) {
556 sh_info
->backend
->destroy_shared_data(
557 sh_info
->backend_shared_data
);
559 btree_destroy(&sh_info
->pagemap
);
565 /** Create address space area of common attributes.
567 * The created address space area is added to the target address space.
569 * @param as Target address space.
570 * @param flags Flags of the area memory.
571 * @param size Size of area.
572 * @param attrs Attributes of the area.
573 * @param backend Address space area backend. NULL if no backend is used.
574 * @param backend_data NULL or a pointer to custom backend data.
575 * @param base Starting virtual address of the area.
576 * If set to AS_AREA_ANY, a suitable mappable area is
578 * @param bound Lowest address bound if base is set to AS_AREA_ANY.
581 * @return Address space area on success or NULL on failure.
584 as_area_t
*as_area_create(as_t
*as
, unsigned int flags
, size_t size
,
585 unsigned int attrs
, mem_backend_t
*backend
,
586 mem_backend_data_t
*backend_data
, uintptr_t *base
, uintptr_t bound
)
588 if ((*base
!= (uintptr_t) AS_AREA_ANY
) && !IS_ALIGNED(*base
, PAGE_SIZE
))
594 size_t pages
= SIZE2FRAMES(size
);
596 /* Writeable executable areas are not supported. */
597 if ((flags
& AS_AREA_EXEC
) && (flags
& AS_AREA_WRITE
))
600 bool const guarded
= flags
& AS_AREA_GUARD
;
602 mutex_lock(&as
->lock
);
604 if (*base
== (uintptr_t) AS_AREA_ANY
) {
605 *base
= as_get_unmapped_area(as
, bound
, size
, guarded
);
606 if (*base
== (uintptr_t) -1) {
607 mutex_unlock(&as
->lock
);
612 if (overflows_into_positive(*base
, size
)) {
613 mutex_unlock(&as
->lock
);
617 if (!check_area_conflicts(as
, *base
, pages
, guarded
, NULL
)) {
618 mutex_unlock(&as
->lock
);
622 as_area_t
*area
= (as_area_t
*) malloc(sizeof(as_area_t
), 0);
624 mutex_initialize(&area
->lock
, MUTEX_PASSIVE
);
628 area
->attributes
= attrs
;
632 area
->backend
= backend
;
633 area
->sh_info
= NULL
;
636 area
->backend_data
= *backend_data
;
638 memsetb(&area
->backend_data
, sizeof(area
->backend_data
), 0);
640 share_info_t
*si
= NULL
;
643 * Create the sharing info structure.
644 * We do this in advance for every new area, even if it is not going
647 if (!(attrs
& AS_AREA_ATTR_PARTIAL
)) {
648 si
= (share_info_t
*) malloc(sizeof(share_info_t
), 0);
649 mutex_initialize(&si
->lock
, MUTEX_PASSIVE
);
652 si
->backend_shared_data
= NULL
;
653 si
->backend
= backend
;
654 btree_create(&si
->pagemap
);
658 if (area
->backend
&& area
->backend
->create_shared_data
) {
659 if (!area
->backend
->create_shared_data(area
)) {
661 mutex_unlock(&as
->lock
);
662 sh_info_remove_reference(si
);
668 if (area
->backend
&& area
->backend
->create
) {
669 if (!area
->backend
->create(area
)) {
671 mutex_unlock(&as
->lock
);
672 if (!(attrs
& AS_AREA_ATTR_PARTIAL
))
673 sh_info_remove_reference(si
);
678 btree_create(&area
->used_space
);
679 btree_insert(&as
->as_area_btree
, *base
, (void *) area
,
682 mutex_unlock(&as
->lock
);
687 /** Find address space area and lock it.
689 * @param as Address space.
690 * @param va Virtual address.
692 * @return Locked address space area containing va on success or
696 NO_TRACE
static as_area_t
*find_area_and_lock(as_t
*as
, uintptr_t va
)
698 ASSERT(mutex_locked(&as
->lock
));
701 as_area_t
*area
= (as_area_t
*) btree_search(&as
->as_area_btree
, va
,
704 /* va is the base address of an address space area */
705 mutex_lock(&area
->lock
);
710 * Search the leaf node and the rightmost record of its left neighbour
711 * to find out whether this is a miss or va belongs to an address
712 * space area found there.
715 /* First, search the leaf node itself. */
718 for (i
= 0; i
< leaf
->keys
; i
++) {
719 area
= (as_area_t
*) leaf
->value
[i
];
721 mutex_lock(&area
->lock
);
723 if ((area
->base
<= va
) &&
724 (va
<= area
->base
+ (P2SZ(area
->pages
) - 1)))
727 mutex_unlock(&area
->lock
);
731 * Second, locate the left neighbour and test its last record.
732 * Because of its position in the B+tree, it must have base < va.
734 btree_node_t
*lnode
= btree_leaf_node_left_neighbour(&as
->as_area_btree
,
737 area
= (as_area_t
*) lnode
->value
[lnode
->keys
- 1];
739 mutex_lock(&area
->lock
);
741 if (va
<= area
->base
+ (P2SZ(area
->pages
) - 1))
744 mutex_unlock(&area
->lock
);
750 /** Find address space area and change it.
752 * @param as Address space.
753 * @param address Virtual address belonging to the area to be changed.
754 * Must be page-aligned.
755 * @param size New size of the virtual memory block starting at
757 * @param flags Flags influencing the remap operation. Currently unused.
759 * @return Zero on success or a value from @ref errno.h otherwise.
762 int as_area_resize(as_t
*as
, uintptr_t address
, size_t size
, unsigned int flags
)
764 if (!IS_ALIGNED(address
, PAGE_SIZE
))
767 mutex_lock(&as
->lock
);
772 as_area_t
*area
= find_area_and_lock(as
, address
);
774 mutex_unlock(&as
->lock
);
778 if (!area
->backend
->is_resizable(area
)) {
780 * The backend does not support resizing for this area.
782 mutex_unlock(&area
->lock
);
783 mutex_unlock(&as
->lock
);
787 mutex_lock(&area
->sh_info
->lock
);
788 if (area
->sh_info
->shared
) {
790 * Remapping of shared address space areas
793 mutex_unlock(&area
->sh_info
->lock
);
794 mutex_unlock(&area
->lock
);
795 mutex_unlock(&as
->lock
);
798 mutex_unlock(&area
->sh_info
->lock
);
800 size_t pages
= SIZE2FRAMES((address
- area
->base
) + size
);
803 * Zero size address space areas are not allowed.
805 mutex_unlock(&area
->lock
);
806 mutex_unlock(&as
->lock
);
810 if (pages
< area
->pages
) {
811 uintptr_t start_free
= area
->base
+ P2SZ(pages
);
814 * Shrinking the area.
815 * No need to check for overlaps.
818 page_table_lock(as
, false);
821 * Remove frames belonging to used space starting from
822 * the highest addresses downwards until an overlap with
823 * the resized address space area is found. Note that this
824 * is also the right way to remove part of the used_space
829 ASSERT(!list_empty(&area
->used_space
.leaf_list
));
832 list_get_instance(list_last(&area
->used_space
.leaf_list
),
833 btree_node_t
, leaf_link
);
835 if ((cond
= (bool) node
->keys
)) {
836 uintptr_t ptr
= node
->key
[node
->keys
- 1];
838 (size_t) node
->value
[node
->keys
- 1];
841 if (overlaps(ptr
, P2SZ(node_size
), area
->base
,
844 if (ptr
+ P2SZ(node_size
) <= start_free
) {
846 * The whole interval fits
847 * completely in the resized
848 * address space area.
854 * Part of the interval corresponding
855 * to b and c overlaps with the resized
856 * address space area.
859 /* We are almost done */
861 i
= (start_free
- ptr
) >> PAGE_WIDTH
;
862 if (!used_space_remove(area
, start_free
,
864 panic("Cannot remove used space.");
867 * The interval of used space can be
868 * completely removed.
870 if (!used_space_remove(area
, ptr
, node_size
))
871 panic("Cannot remove used space.");
875 * Start TLB shootdown sequence.
877 * The sequence is rather short and can be
878 * repeated multiple times. The reason is that
879 * we don't want to have used_space_remove()
880 * inside the sequence as it may use a blocking
881 * memory allocation for its B+tree. Blocking
882 * while holding the tlblock spinlock is
883 * forbidden and would hit a kernel assertion.
886 ipl_t ipl
= tlb_shootdown_start(TLB_INVL_PAGES
,
887 as
->asid
, area
->base
+ P2SZ(pages
),
888 area
->pages
- pages
);
890 for (; i
< node_size
; i
++) {
891 pte_t
*pte
= page_mapping_find(as
,
892 ptr
+ P2SZ(i
), false);
895 ASSERT(PTE_VALID(pte
));
896 ASSERT(PTE_PRESENT(pte
));
898 if ((area
->backend
) &&
899 (area
->backend
->frame_free
)) {
900 area
->backend
->frame_free(area
,
905 page_mapping_remove(as
, ptr
+ P2SZ(i
));
909 * Finish TLB shootdown sequence.
912 tlb_invalidate_pages(as
->asid
,
913 area
->base
+ P2SZ(pages
),
914 area
->pages
- pages
);
917 * Invalidate software translation caches
918 * (e.g. TSB on sparc64, PHT on ppc32).
920 as_invalidate_translation_cache(as
,
921 area
->base
+ P2SZ(pages
),
922 area
->pages
- pages
);
923 tlb_shootdown_finalize(ipl
);
926 page_table_unlock(as
, false);
932 if (overflows_into_positive(address
, P2SZ(pages
)))
936 * Check for overlaps with other address space areas.
938 bool const guarded
= area
->flags
& AS_AREA_GUARD
;
939 if (!check_area_conflicts(as
, address
, pages
, guarded
, area
)) {
940 mutex_unlock(&area
->lock
);
941 mutex_unlock(&as
->lock
);
942 return EADDRNOTAVAIL
;
946 if (area
->backend
&& area
->backend
->resize
) {
947 if (!area
->backend
->resize(area
, pages
)) {
948 mutex_unlock(&area
->lock
);
949 mutex_unlock(&as
->lock
);
956 mutex_unlock(&area
->lock
);
957 mutex_unlock(&as
->lock
);
962 /** Destroy address space area.
964 * @param as Address space.
965 * @param address Address within the area to be deleted.
967 * @return Zero on success or a value from @ref errno.h on failure.
970 int as_area_destroy(as_t
*as
, uintptr_t address
)
972 mutex_lock(&as
->lock
);
974 as_area_t
*area
= find_area_and_lock(as
, address
);
976 mutex_unlock(&as
->lock
);
980 if (area
->backend
&& area
->backend
->destroy
)
981 area
->backend
->destroy(area
);
983 uintptr_t base
= area
->base
;
985 page_table_lock(as
, false);
988 * Start TLB shootdown sequence.
990 ipl_t ipl
= tlb_shootdown_start(TLB_INVL_PAGES
, as
->asid
, area
->base
,
994 * Visit only the pages mapped by used_space B+tree.
996 list_foreach(area
->used_space
.leaf_list
, leaf_link
, btree_node_t
,
1000 for (i
= 0; i
< node
->keys
; i
++) {
1001 uintptr_t ptr
= node
->key
[i
];
1004 for (size
= 0; size
< (size_t) node
->value
[i
]; size
++) {
1005 pte_t
*pte
= page_mapping_find(as
,
1006 ptr
+ P2SZ(size
), false);
1009 ASSERT(PTE_VALID(pte
));
1010 ASSERT(PTE_PRESENT(pte
));
1012 if ((area
->backend
) &&
1013 (area
->backend
->frame_free
)) {
1014 area
->backend
->frame_free(area
,
1016 PTE_GET_FRAME(pte
));
1019 page_mapping_remove(as
, ptr
+ P2SZ(size
));
1025 * Finish TLB shootdown sequence.
1028 tlb_invalidate_pages(as
->asid
, area
->base
, area
->pages
);
1031 * Invalidate potential software translation caches
1032 * (e.g. TSB on sparc64, PHT on ppc32).
1034 as_invalidate_translation_cache(as
, area
->base
, area
->pages
);
1035 tlb_shootdown_finalize(ipl
);
1037 page_table_unlock(as
, false);
1039 btree_destroy(&area
->used_space
);
1041 area
->attributes
|= AS_AREA_ATTR_PARTIAL
;
1043 sh_info_remove_reference(area
->sh_info
);
1045 mutex_unlock(&area
->lock
);
1048 * Remove the empty area from address space.
1050 btree_remove(&as
->as_area_btree
, base
, NULL
);
1054 mutex_unlock(&as
->lock
);
1058 /** Share address space area with another or the same address space.
1060 * Address space area mapping is shared with a new address space area.
1061 * If the source address space area has not been shared so far,
1062 * a new sh_info is created. The new address space area simply gets the
1063 * sh_info of the source area. The process of duplicating the
1064 * mapping is done through the backend share function.
1066 * @param src_as Pointer to source address space.
1067 * @param src_base Base address of the source address space area.
1068 * @param acc_size Expected size of the source area.
1069 * @param dst_as Pointer to destination address space.
1070 * @param dst_flags_mask Destination address space area flags mask.
1071 * @param dst_base Target base address. If set to -1,
1072 * a suitable mappable area is found.
1073 * @param bound Lowest address bound if dst_base is set to -1.
1074 * Otherwise ignored.
1076 * @return Zero on success.
1077 * @return ENOENT if there is no such task or such address space.
1078 * @return EPERM if there was a problem in accepting the area.
1079 * @return ENOMEM if there was a problem in allocating destination
1080 * address space area.
1081 * @return ENOTSUP if the address space area backend does not support
1085 int as_area_share(as_t
*src_as
, uintptr_t src_base
, size_t acc_size
,
1086 as_t
*dst_as
, unsigned int dst_flags_mask
, uintptr_t *dst_base
,
1089 mutex_lock(&src_as
->lock
);
1090 as_area_t
*src_area
= find_area_and_lock(src_as
, src_base
);
1093 * Could not find the source address space area.
1095 mutex_unlock(&src_as
->lock
);
1099 if (!src_area
->backend
->is_shareable(src_area
)) {
1101 * The backend does not permit sharing of this area.
1103 mutex_unlock(&src_area
->lock
);
1104 mutex_unlock(&src_as
->lock
);
1108 size_t src_size
= P2SZ(src_area
->pages
);
1109 unsigned int src_flags
= src_area
->flags
;
1110 mem_backend_t
*src_backend
= src_area
->backend
;
1111 mem_backend_data_t src_backend_data
= src_area
->backend_data
;
1113 /* Share the cacheable flag from the original mapping */
1114 if (src_flags
& AS_AREA_CACHEABLE
)
1115 dst_flags_mask
|= AS_AREA_CACHEABLE
;
1117 if ((src_size
!= acc_size
) ||
1118 ((src_flags
& dst_flags_mask
) != dst_flags_mask
)) {
1119 mutex_unlock(&src_area
->lock
);
1120 mutex_unlock(&src_as
->lock
);
1125 * Now we are committed to sharing the area.
1126 * First, prepare the area for sharing.
1127 * Then it will be safe to unlock it.
1129 share_info_t
*sh_info
= src_area
->sh_info
;
1131 mutex_lock(&sh_info
->lock
);
1132 sh_info
->refcount
++;
1133 bool shared
= sh_info
->shared
;
1134 sh_info
->shared
= true;
1135 mutex_unlock(&sh_info
->lock
);
1139 * Call the backend to setup sharing.
1140 * This only happens once for each sh_info.
1142 src_area
->backend
->share(src_area
);
1145 mutex_unlock(&src_area
->lock
);
1146 mutex_unlock(&src_as
->lock
);
1149 * Create copy of the source address space area.
1150 * The destination area is created with AS_AREA_ATTR_PARTIAL
1151 * attribute set which prevents race condition with
1152 * preliminary as_page_fault() calls.
1153 * The flags of the source area are masked against dst_flags_mask
1154 * to support sharing in less privileged mode.
1156 as_area_t
*dst_area
= as_area_create(dst_as
, dst_flags_mask
,
1157 src_size
, AS_AREA_ATTR_PARTIAL
, src_backend
,
1158 &src_backend_data
, dst_base
, bound
);
1161 * Destination address space area could not be created.
1163 sh_info_remove_reference(sh_info
);
1169 * Now the destination address space area has been
1170 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
1171 * attribute and set the sh_info.
1173 mutex_lock(&dst_as
->lock
);
1174 mutex_lock(&dst_area
->lock
);
1175 dst_area
->attributes
&= ~AS_AREA_ATTR_PARTIAL
;
1176 dst_area
->sh_info
= sh_info
;
1177 mutex_unlock(&dst_area
->lock
);
1178 mutex_unlock(&dst_as
->lock
);
1183 /** Check access mode for address space area.
1185 * @param area Address space area.
1186 * @param access Access mode.
1188 * @return False if access violates area's permissions, true
1192 NO_TRACE
bool as_area_check_access(as_area_t
*area
, pf_access_t access
)
1194 ASSERT(mutex_locked(&area
->lock
));
1197 [PF_ACCESS_READ
] = AS_AREA_READ
,
1198 [PF_ACCESS_WRITE
] = AS_AREA_WRITE
,
1199 [PF_ACCESS_EXEC
] = AS_AREA_EXEC
1202 if (!(area
->flags
& flagmap
[access
]))
1208 /** Convert address space area flags to page flags.
1210 * @param aflags Flags of some address space area.
1212 * @return Flags to be passed to page_mapping_insert().
1215 NO_TRACE
static unsigned int area_flags_to_page_flags(unsigned int aflags
)
1217 unsigned int flags
= PAGE_USER
| PAGE_PRESENT
;
1219 if (aflags
& AS_AREA_READ
)
1222 if (aflags
& AS_AREA_WRITE
)
1223 flags
|= PAGE_WRITE
;
1225 if (aflags
& AS_AREA_EXEC
)
1228 if (aflags
& AS_AREA_CACHEABLE
)
1229 flags
|= PAGE_CACHEABLE
;
1234 /** Change adress space area flags.
1236 * The idea is to have the same data, but with a different access mode.
1237 * This is needed e.g. for writing code into memory and then executing it.
1238 * In order for this to work properly, this may copy the data
1239 * into private anonymous memory (unless it's already there).
1241 * @param as Address space.
1242 * @param flags Flags of the area memory.
1243 * @param address Address within the area to be changed.
1245 * @return Zero on success or a value from @ref errno.h on failure.
1248 int as_area_change_flags(as_t
*as
, unsigned int flags
, uintptr_t address
)
1250 /* Flags for the new memory mapping */
1251 unsigned int page_flags
= area_flags_to_page_flags(flags
);
1253 mutex_lock(&as
->lock
);
1255 as_area_t
*area
= find_area_and_lock(as
, address
);
1257 mutex_unlock(&as
->lock
);
1261 if (area
->backend
!= &anon_backend
) {
1262 /* Copying non-anonymous memory not supported yet */
1263 mutex_unlock(&area
->lock
);
1264 mutex_unlock(&as
->lock
);
1268 mutex_lock(&area
->sh_info
->lock
);
1269 if (area
->sh_info
->shared
) {
1270 /* Copying shared areas not supported yet */
1271 mutex_unlock(&area
->sh_info
->lock
);
1272 mutex_unlock(&area
->lock
);
1273 mutex_unlock(&as
->lock
);
1276 mutex_unlock(&area
->sh_info
->lock
);
1279 * Compute total number of used pages in the used_space B+tree
1281 size_t used_pages
= 0;
1283 list_foreach(area
->used_space
.leaf_list
, leaf_link
, btree_node_t
,
1287 for (i
= 0; i
< node
->keys
; i
++)
1288 used_pages
+= (size_t) node
->value
[i
];
1291 /* An array for storing frame numbers */
1292 uintptr_t *old_frame
= malloc(used_pages
* sizeof(uintptr_t), 0);
1294 page_table_lock(as
, false);
1297 * Start TLB shootdown sequence.
1299 ipl_t ipl
= tlb_shootdown_start(TLB_INVL_PAGES
, as
->asid
, area
->base
,
1303 * Remove used pages from page tables and remember their frame
1306 size_t frame_idx
= 0;
1308 list_foreach(area
->used_space
.leaf_list
, leaf_link
, btree_node_t
,
1312 for (i
= 0; i
< node
->keys
; i
++) {
1313 uintptr_t ptr
= node
->key
[i
];
1316 for (size
= 0; size
< (size_t) node
->value
[i
]; size
++) {
1317 pte_t
*pte
= page_mapping_find(as
,
1318 ptr
+ P2SZ(size
), false);
1321 ASSERT(PTE_VALID(pte
));
1322 ASSERT(PTE_PRESENT(pte
));
1324 old_frame
[frame_idx
++] = PTE_GET_FRAME(pte
);
1326 /* Remove old mapping */
1327 page_mapping_remove(as
, ptr
+ P2SZ(size
));
1333 * Finish TLB shootdown sequence.
1336 tlb_invalidate_pages(as
->asid
, area
->base
, area
->pages
);
1339 * Invalidate potential software translation caches
1340 * (e.g. TSB on sparc64, PHT on ppc32).
1342 as_invalidate_translation_cache(as
, area
->base
, area
->pages
);
1343 tlb_shootdown_finalize(ipl
);
1345 page_table_unlock(as
, false);
1348 * Set the new flags.
1350 area
->flags
= flags
;
1353 * Map pages back in with new flags. This step is kept separate
1354 * so that the memory area could not be accesed with both the old and
1355 * the new flags at once.
1359 list_foreach(area
->used_space
.leaf_list
, leaf_link
, btree_node_t
,
1363 for (i
= 0; i
< node
->keys
; i
++) {
1364 uintptr_t ptr
= node
->key
[i
];
1367 for (size
= 0; size
< (size_t) node
->value
[i
]; size
++) {
1368 page_table_lock(as
, false);
1370 /* Insert the new mapping */
1371 page_mapping_insert(as
, ptr
+ P2SZ(size
),
1372 old_frame
[frame_idx
++], page_flags
);
1374 page_table_unlock(as
, false);
1381 mutex_unlock(&area
->lock
);
1382 mutex_unlock(&as
->lock
);
1387 /** Handle page fault within the current address space.
1389 * This is the high-level page fault handler. It decides whether the page fault
1390 * can be resolved by any backend and if so, it invokes the backend to resolve
1393 * Interrupts are assumed disabled.
1395 * @param address Faulting address.
1396 * @param access Access mode that caused the page fault (i.e.
1398 * @param istate Pointer to the interrupted state.
1400 * @return AS_PF_FAULT on page fault.
1401 * @return AS_PF_OK on success.
1402 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace()
1403 * or copy_from_uspace().
1406 int as_page_fault(uintptr_t address
, pf_access_t access
, istate_t
*istate
)
1408 uintptr_t page
= ALIGN_DOWN(address
, PAGE_SIZE
);
1409 int rc
= AS_PF_FAULT
;
1417 mutex_lock(&AS
->lock
);
1418 as_area_t
*area
= find_area_and_lock(AS
, page
);
1421 * No area contained mapping for 'page'.
1422 * Signal page fault to low-level handler.
1424 mutex_unlock(&AS
->lock
);
1428 if (area
->attributes
& AS_AREA_ATTR_PARTIAL
) {
1430 * The address space area is not fully initialized.
1431 * Avoid possible race by returning error.
1433 mutex_unlock(&area
->lock
);
1434 mutex_unlock(&AS
->lock
);
1438 if ((!area
->backend
) || (!area
->backend
->page_fault
)) {
1440 * The address space area is not backed by any backend
1441 * or the backend cannot handle page faults.
1443 mutex_unlock(&area
->lock
);
1444 mutex_unlock(&AS
->lock
);
1448 page_table_lock(AS
, false);
1451 * To avoid race condition between two page faults on the same address,
1452 * we need to make sure the mapping has not been already inserted.
1455 if ((pte
= page_mapping_find(AS
, page
, false))) {
1456 if (PTE_PRESENT(pte
)) {
1457 if (((access
== PF_ACCESS_READ
) && PTE_READABLE(pte
)) ||
1458 (access
== PF_ACCESS_WRITE
&& PTE_WRITABLE(pte
)) ||
1459 (access
== PF_ACCESS_EXEC
&& PTE_EXECUTABLE(pte
))) {
1460 page_table_unlock(AS
, false);
1461 mutex_unlock(&area
->lock
);
1462 mutex_unlock(&AS
->lock
);
1469 * Resort to the backend page fault handler.
1471 rc
= area
->backend
->page_fault(area
, page
, access
);
1472 if (rc
!= AS_PF_OK
) {
1473 page_table_unlock(AS
, false);
1474 mutex_unlock(&area
->lock
);
1475 mutex_unlock(&AS
->lock
);
1479 page_table_unlock(AS
, false);
1480 mutex_unlock(&area
->lock
);
1481 mutex_unlock(&AS
->lock
);
1485 if (THREAD
->in_copy_from_uspace
) {
1486 THREAD
->in_copy_from_uspace
= false;
1487 istate_set_retaddr(istate
,
1488 (uintptr_t) &memcpy_from_uspace_failover_address
);
1489 } else if (THREAD
->in_copy_to_uspace
) {
1490 THREAD
->in_copy_to_uspace
= false;
1491 istate_set_retaddr(istate
,
1492 (uintptr_t) &memcpy_to_uspace_failover_address
);
1493 } else if (rc
== AS_PF_SILENT
) {
1494 printf("Killing task %" PRIu64
" due to a "
1495 "failed late reservation request.\n", TASK
->taskid
);
1496 task_kill_self(true);
1498 fault_if_from_uspace(istate
, "Page fault: %p.", (void *) address
);
1499 panic_memtrap(istate
, access
, address
, NULL
);
1505 /** Switch address spaces.
1507 * Note that this function cannot sleep as it is essentially a part of
1508 * scheduling. Sleeping here would lead to deadlock on wakeup. Another
1509 * thing which is forbidden in this context is locking the address space.
1511 * When this function is entered, no spinlocks may be held.
1513 * @param old Old address space or NULL.
1514 * @param new New address space.
1517 void as_switch(as_t
*old_as
, as_t
*new_as
)
1519 DEADLOCK_PROBE_INIT(p_asidlock
);
1520 preemption_disable();
1523 (void) interrupts_disable();
1524 if (!spinlock_trylock(&asidlock
)) {
1526 * Avoid deadlock with TLB shootdown.
1527 * We can enable interrupts here because
1528 * preemption is disabled. We should not be
1529 * holding any other lock.
1531 (void) interrupts_enable();
1532 DEADLOCK_PROBE(p_asidlock
, DEADLOCK_THRESHOLD
);
1535 preemption_enable();
1538 * First, take care of the old address space.
1541 ASSERT(old_as
->cpu_refcount
);
1543 if ((--old_as
->cpu_refcount
== 0) && (old_as
!= AS_KERNEL
)) {
1545 * The old address space is no longer active on
1546 * any processor. It can be appended to the
1547 * list of inactive address spaces with assigned
1550 ASSERT(old_as
->asid
!= ASID_INVALID
);
1552 list_append(&old_as
->inactive_as_with_asid_link
,
1553 &inactive_as_with_asid_list
);
1557 * Perform architecture-specific tasks when the address space
1558 * is being removed from the CPU.
1560 as_deinstall_arch(old_as
);
1564 * Second, prepare the new address space.
1566 if ((new_as
->cpu_refcount
++ == 0) && (new_as
!= AS_KERNEL
)) {
1567 if (new_as
->asid
!= ASID_INVALID
)
1568 list_remove(&new_as
->inactive_as_with_asid_link
);
1570 new_as
->asid
= asid_get();
1573 #ifdef AS_PAGE_TABLE
1574 SET_PTL0_ADDRESS(new_as
->genarch
.page_table
);
1578 * Perform architecture-specific steps.
1579 * (e.g. write ASID to hardware register etc.)
1581 as_install_arch(new_as
);
1583 spinlock_unlock(&asidlock
);
1588 /** Compute flags for virtual address translation subsytem.
1590 * @param area Address space area.
1592 * @return Flags to be used in page_mapping_insert().
1595 NO_TRACE
unsigned int as_area_get_flags(as_area_t
*area
)
1597 ASSERT(mutex_locked(&area
->lock
));
1599 return area_flags_to_page_flags(area
->flags
);
1602 /** Create page table.
1604 * Depending on architecture, create either address space private or global page
1607 * @param flags Flags saying whether the page table is for the kernel
1610 * @return First entry of the page table.
1613 NO_TRACE pte_t
*page_table_create(unsigned int flags
)
1615 ASSERT(as_operations
);
1616 ASSERT(as_operations
->page_table_create
);
1618 return as_operations
->page_table_create(flags
);
1621 /** Destroy page table.
1623 * Destroy page table in architecture specific way.
1625 * @param page_table Physical address of PTL0.
1628 NO_TRACE
void page_table_destroy(pte_t
*page_table
)
1630 ASSERT(as_operations
);
1631 ASSERT(as_operations
->page_table_destroy
);
1633 as_operations
->page_table_destroy(page_table
);
1636 /** Lock page table.
1638 * This function should be called before any page_mapping_insert(),
1639 * page_mapping_remove() and page_mapping_find().
1641 * Locking order is such that address space areas must be locked
1642 * prior to this call. Address space can be locked prior to this
1643 * call in which case the lock argument is false.
1645 * @param as Address space.
1646 * @param lock If false, do not attempt to lock as->lock.
1649 NO_TRACE
void page_table_lock(as_t
*as
, bool lock
)
1651 ASSERT(as_operations
);
1652 ASSERT(as_operations
->page_table_lock
);
1654 as_operations
->page_table_lock(as
, lock
);
1657 /** Unlock page table.
1659 * @param as Address space.
1660 * @param unlock If false, do not attempt to unlock as->lock.
1663 NO_TRACE
void page_table_unlock(as_t
*as
, bool unlock
)
1665 ASSERT(as_operations
);
1666 ASSERT(as_operations
->page_table_unlock
);
1668 as_operations
->page_table_unlock(as
, unlock
);
1671 /** Test whether page tables are locked.
1673 * @param as Address space where the page tables belong.
1675 * @return True if the page tables belonging to the address soace
1676 * are locked, otherwise false.
1678 NO_TRACE
bool page_table_locked(as_t
*as
)
1680 ASSERT(as_operations
);
1681 ASSERT(as_operations
->page_table_locked
);
1683 return as_operations
->page_table_locked(as
);
1686 /** Return size of the address space area with given base.
1688 * @param base Arbitrary address inside the address space area.
1690 * @return Size of the address space area in bytes or zero if it
1694 size_t as_area_get_size(uintptr_t base
)
1698 page_table_lock(AS
, true);
1699 as_area_t
*src_area
= find_area_and_lock(AS
, base
);
1702 size
= P2SZ(src_area
->pages
);
1703 mutex_unlock(&src_area
->lock
);
1707 page_table_unlock(AS
, true);
1711 /** Mark portion of address space area as used.
1713 * The address space area must be already locked.
1715 * @param area Address space area.
1716 * @param page First page to be marked.
1717 * @param count Number of page to be marked.
1719 * @return False on failure or true on success.
1722 bool used_space_insert(as_area_t
*area
, uintptr_t page
, size_t count
)
1724 ASSERT(mutex_locked(&area
->lock
));
1725 ASSERT(IS_ALIGNED(page
, PAGE_SIZE
));
1728 btree_node_t
*leaf
= NULL
;
1729 size_t pages
= (size_t) btree_search(&area
->used_space
, page
, &leaf
);
1732 * We hit the beginning of some used space.
1737 ASSERT(leaf
!= NULL
);
1740 btree_insert(&area
->used_space
, page
, (void *) count
, leaf
);
1744 btree_node_t
*node
= btree_leaf_node_left_neighbour(&area
->used_space
, leaf
);
1746 uintptr_t left_pg
= node
->key
[node
->keys
- 1];
1747 uintptr_t right_pg
= leaf
->key
[0];
1748 size_t left_cnt
= (size_t) node
->value
[node
->keys
- 1];
1749 size_t right_cnt
= (size_t) leaf
->value
[0];
1752 * Examine the possibility that the interval fits
1753 * somewhere between the rightmost interval of
1754 * the left neigbour and the first interval of the leaf.
1757 if (page
>= right_pg
) {
1759 } else if (overlaps(page
, P2SZ(count
), left_pg
,
1761 /* The interval intersects with the left interval. */
1763 } else if (overlaps(page
, P2SZ(count
), right_pg
,
1765 /* The interval intersects with the right interval. */
1767 } else if ((page
== left_pg
+ P2SZ(left_cnt
)) &&
1768 (page
+ P2SZ(count
) == right_pg
)) {
1770 * The interval can be added by merging the two already
1771 * present intervals.
1773 node
->value
[node
->keys
- 1] += count
+ right_cnt
;
1774 btree_remove(&area
->used_space
, right_pg
, leaf
);
1776 } else if (page
== left_pg
+ P2SZ(left_cnt
)) {
1778 * The interval can be added by simply growing the left
1781 node
->value
[node
->keys
- 1] += count
;
1783 } else if (page
+ P2SZ(count
) == right_pg
) {
1785 * The interval can be addded by simply moving base of
1786 * the right interval down and increasing its size
1789 leaf
->value
[0] += count
;
1790 leaf
->key
[0] = page
;
1794 * The interval is between both neigbouring intervals,
1795 * but cannot be merged with any of them.
1797 btree_insert(&area
->used_space
, page
, (void *) count
,
1801 } else if (page
< leaf
->key
[0]) {
1802 uintptr_t right_pg
= leaf
->key
[0];
1803 size_t right_cnt
= (size_t) leaf
->value
[0];
1806 * Investigate the border case in which the left neighbour does
1807 * not exist but the interval fits from the left.
1810 if (overlaps(page
, P2SZ(count
), right_pg
, P2SZ(right_cnt
))) {
1811 /* The interval intersects with the right interval. */
1813 } else if (page
+ P2SZ(count
) == right_pg
) {
1815 * The interval can be added by moving the base of the
1816 * right interval down and increasing its size
1819 leaf
->key
[0] = page
;
1820 leaf
->value
[0] += count
;
1824 * The interval doesn't adjoin with the right interval.
1825 * It must be added individually.
1827 btree_insert(&area
->used_space
, page
, (void *) count
,
1833 node
= btree_leaf_node_right_neighbour(&area
->used_space
, leaf
);
1835 uintptr_t left_pg
= leaf
->key
[leaf
->keys
- 1];
1836 uintptr_t right_pg
= node
->key
[0];
1837 size_t left_cnt
= (size_t) leaf
->value
[leaf
->keys
- 1];
1838 size_t right_cnt
= (size_t) node
->value
[0];
1841 * Examine the possibility that the interval fits
1842 * somewhere between the leftmost interval of
1843 * the right neigbour and the last interval of the leaf.
1846 if (page
< left_pg
) {
1848 } else if (overlaps(page
, P2SZ(count
), left_pg
,
1850 /* The interval intersects with the left interval. */
1852 } else if (overlaps(page
, P2SZ(count
), right_pg
,
1854 /* The interval intersects with the right interval. */
1856 } else if ((page
== left_pg
+ P2SZ(left_cnt
)) &&
1857 (page
+ P2SZ(count
) == right_pg
)) {
1859 * The interval can be added by merging the two already
1860 * present intervals.
1862 leaf
->value
[leaf
->keys
- 1] += count
+ right_cnt
;
1863 btree_remove(&area
->used_space
, right_pg
, node
);
1865 } else if (page
== left_pg
+ P2SZ(left_cnt
)) {
1867 * The interval can be added by simply growing the left
1870 leaf
->value
[leaf
->keys
- 1] += count
;
1872 } else if (page
+ P2SZ(count
) == right_pg
) {
1874 * The interval can be addded by simply moving base of
1875 * the right interval down and increasing its size
1878 node
->value
[0] += count
;
1879 node
->key
[0] = page
;
1883 * The interval is between both neigbouring intervals,
1884 * but cannot be merged with any of them.
1886 btree_insert(&area
->used_space
, page
, (void *) count
,
1890 } else if (page
>= leaf
->key
[leaf
->keys
- 1]) {
1891 uintptr_t left_pg
= leaf
->key
[leaf
->keys
- 1];
1892 size_t left_cnt
= (size_t) leaf
->value
[leaf
->keys
- 1];
1895 * Investigate the border case in which the right neighbour
1896 * does not exist but the interval fits from the right.
1899 if (overlaps(page
, P2SZ(count
), left_pg
, P2SZ(left_cnt
))) {
1900 /* The interval intersects with the left interval. */
1902 } else if (left_pg
+ P2SZ(left_cnt
) == page
) {
1904 * The interval can be added by growing the left
1907 leaf
->value
[leaf
->keys
- 1] += count
;
1911 * The interval doesn't adjoin with the left interval.
1912 * It must be added individually.
1914 btree_insert(&area
->used_space
, page
, (void *) count
,
1921 * Note that if the algorithm made it thus far, the interval can fit
1922 * only between two other intervals of the leaf. The two border cases
1923 * were already resolved.
1926 for (i
= 1; i
< leaf
->keys
; i
++) {
1927 if (page
< leaf
->key
[i
]) {
1928 uintptr_t left_pg
= leaf
->key
[i
- 1];
1929 uintptr_t right_pg
= leaf
->key
[i
];
1930 size_t left_cnt
= (size_t) leaf
->value
[i
- 1];
1931 size_t right_cnt
= (size_t) leaf
->value
[i
];
1934 * The interval fits between left_pg and right_pg.
1937 if (overlaps(page
, P2SZ(count
), left_pg
,
1940 * The interval intersects with the left
1944 } else if (overlaps(page
, P2SZ(count
), right_pg
,
1947 * The interval intersects with the right
1951 } else if ((page
== left_pg
+ P2SZ(left_cnt
)) &&
1952 (page
+ P2SZ(count
) == right_pg
)) {
1954 * The interval can be added by merging the two
1955 * already present intervals.
1957 leaf
->value
[i
- 1] += count
+ right_cnt
;
1958 btree_remove(&area
->used_space
, right_pg
, leaf
);
1960 } else if (page
== left_pg
+ P2SZ(left_cnt
)) {
1962 * The interval can be added by simply growing
1963 * the left interval.
1965 leaf
->value
[i
- 1] += count
;
1967 } else if (page
+ P2SZ(count
) == right_pg
) {
1969 * The interval can be addded by simply moving
1970 * base of the right interval down and
1971 * increasing its size accordingly.
1973 leaf
->value
[i
] += count
;
1974 leaf
->key
[i
] = page
;
1978 * The interval is between both neigbouring
1979 * intervals, but cannot be merged with any of
1982 btree_insert(&area
->used_space
, page
,
1983 (void *) count
, leaf
);
1989 panic("Inconsistency detected while adding %zu pages of used "
1990 "space at %p.", count
, (void *) page
);
1993 area
->resident
+= count
;
1997 /** Mark portion of address space area as unused.
1999 * The address space area must be already locked.
2001 * @param area Address space area.
2002 * @param page First page to be marked.
2003 * @param count Number of page to be marked.
2005 * @return False on failure or true on success.
2008 bool used_space_remove(as_area_t
*area
, uintptr_t page
, size_t count
)
2010 ASSERT(mutex_locked(&area
->lock
));
2011 ASSERT(IS_ALIGNED(page
, PAGE_SIZE
));
2015 size_t pages
= (size_t) btree_search(&area
->used_space
, page
, &leaf
);
2018 * We are lucky, page is the beginning of some interval.
2020 if (count
> pages
) {
2022 } else if (count
== pages
) {
2023 btree_remove(&area
->used_space
, page
, leaf
);
2027 * Find the respective interval.
2028 * Decrease its size and relocate its start address.
2031 for (i
= 0; i
< leaf
->keys
; i
++) {
2032 if (leaf
->key
[i
] == page
) {
2033 leaf
->key
[i
] += P2SZ(count
);
2034 leaf
->value
[i
] -= count
;
2043 btree_node_t
*node
= btree_leaf_node_left_neighbour(&area
->used_space
,
2045 if ((node
) && (page
< leaf
->key
[0])) {
2046 uintptr_t left_pg
= node
->key
[node
->keys
- 1];
2047 size_t left_cnt
= (size_t) node
->value
[node
->keys
- 1];
2049 if (overlaps(left_pg
, P2SZ(left_cnt
), page
, P2SZ(count
))) {
2050 if (page
+ P2SZ(count
) == left_pg
+ P2SZ(left_cnt
)) {
2052 * The interval is contained in the rightmost
2053 * interval of the left neighbour and can be
2054 * removed by updating the size of the bigger
2057 node
->value
[node
->keys
- 1] -= count
;
2059 } else if (page
+ P2SZ(count
) <
2060 left_pg
+ P2SZ(left_cnt
)) {
2064 * The interval is contained in the rightmost
2065 * interval of the left neighbour but its
2066 * removal requires both updating the size of
2067 * the original interval and also inserting a
2070 new_cnt
= ((left_pg
+ P2SZ(left_cnt
)) -
2071 (page
+ P2SZ(count
))) >> PAGE_WIDTH
;
2072 node
->value
[node
->keys
- 1] -= count
+ new_cnt
;
2073 btree_insert(&area
->used_space
, page
+
2074 P2SZ(count
), (void *) new_cnt
, leaf
);
2080 } else if (page
< leaf
->key
[0])
2083 if (page
> leaf
->key
[leaf
->keys
- 1]) {
2084 uintptr_t left_pg
= leaf
->key
[leaf
->keys
- 1];
2085 size_t left_cnt
= (size_t) leaf
->value
[leaf
->keys
- 1];
2087 if (overlaps(left_pg
, P2SZ(left_cnt
), page
, P2SZ(count
))) {
2088 if (page
+ P2SZ(count
) == left_pg
+ P2SZ(left_cnt
)) {
2090 * The interval is contained in the rightmost
2091 * interval of the leaf and can be removed by
2092 * updating the size of the bigger interval.
2094 leaf
->value
[leaf
->keys
- 1] -= count
;
2096 } else if (page
+ P2SZ(count
) < left_pg
+
2101 * The interval is contained in the rightmost
2102 * interval of the leaf but its removal
2103 * requires both updating the size of the
2104 * original interval and also inserting a new
2107 new_cnt
= ((left_pg
+ P2SZ(left_cnt
)) -
2108 (page
+ P2SZ(count
))) >> PAGE_WIDTH
;
2109 leaf
->value
[leaf
->keys
- 1] -= count
+ new_cnt
;
2110 btree_insert(&area
->used_space
, page
+
2111 P2SZ(count
), (void *) new_cnt
, leaf
);
2120 * The border cases have been already resolved.
2121 * Now the interval can be only between intervals of the leaf.
2124 for (i
= 1; i
< leaf
->keys
- 1; i
++) {
2125 if (page
< leaf
->key
[i
]) {
2126 uintptr_t left_pg
= leaf
->key
[i
- 1];
2127 size_t left_cnt
= (size_t) leaf
->value
[i
- 1];
2130 * Now the interval is between intervals corresponding
2133 if (overlaps(left_pg
, P2SZ(left_cnt
), page
,
2135 if (page
+ P2SZ(count
) ==
2136 left_pg
+ P2SZ(left_cnt
)) {
2138 * The interval is contained in the
2139 * interval (i - 1) of the leaf and can
2140 * be removed by updating the size of
2141 * the bigger interval.
2143 leaf
->value
[i
- 1] -= count
;
2145 } else if (page
+ P2SZ(count
) <
2146 left_pg
+ P2SZ(left_cnt
)) {
2150 * The interval is contained in the
2151 * interval (i - 1) of the leaf but its
2152 * removal requires both updating the
2153 * size of the original interval and
2154 * also inserting a new interval.
2156 new_cnt
= ((left_pg
+ P2SZ(left_cnt
)) -
2157 (page
+ P2SZ(count
))) >>
2159 leaf
->value
[i
- 1] -= count
+ new_cnt
;
2160 btree_insert(&area
->used_space
, page
+
2161 P2SZ(count
), (void *) new_cnt
,
2172 panic("Inconsistency detected while removing %zu pages of used "
2173 "space from %p.", count
, (void *) page
);
2176 area
->resident
-= count
;
2181 * Address space related syscalls.
2184 sysarg_t
sys_as_area_create(uintptr_t base
, size_t size
, unsigned int flags
,
2185 uintptr_t bound
, int pager
)
2187 uintptr_t virt
= base
;
2188 mem_backend_t
*backend
;
2189 mem_backend_data_t backend_data
;
2191 if (pager
== AS_AREA_UNPAGED
)
2192 backend
= &anon_backend
;
2194 backend
= &user_backend
;
2195 backend_data
.pager
= pager
;
2197 as_area_t
*area
= as_area_create(AS
, flags
, size
,
2198 AS_AREA_ATTR_NONE
, backend
, &backend_data
, &virt
, bound
);
2200 return (sysarg_t
) AS_MAP_FAILED
;
2202 return (sysarg_t
) virt
;
2205 sysarg_t
sys_as_area_resize(uintptr_t address
, size_t size
, unsigned int flags
)
2207 return (sysarg_t
) as_area_resize(AS
, address
, size
, 0);
2210 sysarg_t
sys_as_area_change_flags(uintptr_t address
, unsigned int flags
)
2212 return (sysarg_t
) as_area_change_flags(AS
, flags
, address
);
2215 sysarg_t
sys_as_area_destroy(uintptr_t address
)
2217 return (sysarg_t
) as_area_destroy(AS
, address
);
2220 /** Get list of adress space areas.
2222 * @param as Address space.
2223 * @param obuf Place to save pointer to returned buffer.
2224 * @param osize Place to save size of returned buffer.
2227 void as_get_area_info(as_t
*as
, as_area_info_t
**obuf
, size_t *osize
)
2229 mutex_lock(&as
->lock
);
2231 /* First pass, count number of areas. */
2233 size_t area_cnt
= 0;
2235 list_foreach(as
->as_area_btree
.leaf_list
, leaf_link
, btree_node_t
,
2237 area_cnt
+= node
->keys
;
2240 size_t isize
= area_cnt
* sizeof(as_area_info_t
);
2241 as_area_info_t
*info
= malloc(isize
, 0);
2243 /* Second pass, record data. */
2245 size_t area_idx
= 0;
2247 list_foreach(as
->as_area_btree
.leaf_list
, leaf_link
, btree_node_t
,
2251 for (i
= 0; i
< node
->keys
; i
++) {
2252 as_area_t
*area
= node
->value
[i
];
2254 ASSERT(area_idx
< area_cnt
);
2255 mutex_lock(&area
->lock
);
2257 info
[area_idx
].start_addr
= area
->base
;
2258 info
[area_idx
].size
= P2SZ(area
->pages
);
2259 info
[area_idx
].flags
= area
->flags
;
2262 mutex_unlock(&area
->lock
);
2266 mutex_unlock(&as
->lock
);
2272 /** Print out information about address space.
2274 * @param as Address space.
2277 void as_print(as_t
*as
)
2279 mutex_lock(&as
->lock
);
2281 /* Print out info about address space areas */
2282 list_foreach(as
->as_area_btree
.leaf_list
, leaf_link
, btree_node_t
,
2286 for (i
= 0; i
< node
->keys
; i
++) {
2287 as_area_t
*area
= node
->value
[i
];
2289 mutex_lock(&area
->lock
);
2290 printf("as_area: %p, base=%p, pages=%zu"
2291 " (%p - %p)\n", area
, (void *) area
->base
,
2292 area
->pages
, (void *) area
->base
,
2293 (void *) (area
->base
+ P2SZ(area
->pages
)));
2294 mutex_unlock(&area
->lock
);
2298 mutex_unlock(&as
->lock
);