1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7 #include <linux/module.h>
8 #include <linux/sched/signal.h>
10 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <as-layout.h>
16 #include <kern_util.h>
21 int (*mmap
)(struct mm_id
*mm_idp
,
22 unsigned long virt
, unsigned long len
, int prot
,
23 int phys_fd
, unsigned long long offset
);
24 int (*unmap
)(struct mm_id
*mm_idp
,
25 unsigned long virt
, unsigned long len
);
26 int (*mprotect
)(struct mm_id
*mm_idp
,
27 unsigned long virt
, unsigned long len
,
31 static int kern_map(struct mm_id
*mm_idp
,
32 unsigned long virt
, unsigned long len
, int prot
,
33 int phys_fd
, unsigned long long offset
)
35 /* TODO: Why is executable needed to be always set in the kernel? */
36 return os_map_memory((void *)virt
, phys_fd
, offset
, len
,
37 prot
& UM_PROT_READ
, prot
& UM_PROT_WRITE
,
41 static int kern_unmap(struct mm_id
*mm_idp
,
42 unsigned long virt
, unsigned long len
)
44 return os_unmap_memory((void *)virt
, len
);
47 static int kern_mprotect(struct mm_id
*mm_idp
,
48 unsigned long virt
, unsigned long len
,
51 return os_protect_memory((void *)virt
, len
,
52 prot
& UM_PROT_READ
, prot
& UM_PROT_WRITE
,
56 void report_enomem(void)
58 printk(KERN_ERR
"UML ran out of memory on the host side! "
59 "This can happen due to a memory limitation or "
60 "vm.max_map_count has been reached.\n");
63 static inline int update_pte_range(pmd_t
*pmd
, unsigned long addr
,
68 int r
, w
, x
, prot
, ret
= 0;
70 pte
= pte_offset_kernel(pmd
, addr
);
75 if (!pte_young(*pte
)) {
78 } else if (!pte_dirty(*pte
))
81 prot
= ((r
? UM_PROT_READ
: 0) | (w
? UM_PROT_WRITE
: 0) |
82 (x
? UM_PROT_EXEC
: 0));
83 if (pte_newpage(*pte
)) {
84 if (pte_present(*pte
)) {
85 if (pte_newpage(*pte
)) {
88 pte_val(*pte
) & PAGE_MASK
;
89 int fd
= phys_mapping(phys
, &offset
);
91 ret
= ops
->mmap(ops
->mm_idp
, addr
,
96 ret
= ops
->unmap(ops
->mm_idp
, addr
, PAGE_SIZE
);
97 } else if (pte_newprot(*pte
))
98 ret
= ops
->mprotect(ops
->mm_idp
, addr
, PAGE_SIZE
, prot
);
99 *pte
= pte_mkuptodate(*pte
);
100 } while (pte
++, addr
+= PAGE_SIZE
, ((addr
< end
) && !ret
));
104 static inline int update_pmd_range(pud_t
*pud
, unsigned long addr
,
112 pmd
= pmd_offset(pud
, addr
);
114 next
= pmd_addr_end(addr
, end
);
115 if (!pmd_present(*pmd
)) {
116 if (pmd_newpage(*pmd
)) {
117 ret
= ops
->unmap(ops
->mm_idp
, addr
,
119 pmd_mkuptodate(*pmd
);
122 else ret
= update_pte_range(pmd
, addr
, next
, ops
);
123 } while (pmd
++, addr
= next
, ((addr
< end
) && !ret
));
127 static inline int update_pud_range(p4d_t
*p4d
, unsigned long addr
,
135 pud
= pud_offset(p4d
, addr
);
137 next
= pud_addr_end(addr
, end
);
138 if (!pud_present(*pud
)) {
139 if (pud_newpage(*pud
)) {
140 ret
= ops
->unmap(ops
->mm_idp
, addr
,
142 pud_mkuptodate(*pud
);
145 else ret
= update_pmd_range(pud
, addr
, next
, ops
);
146 } while (pud
++, addr
= next
, ((addr
< end
) && !ret
));
150 static inline int update_p4d_range(pgd_t
*pgd
, unsigned long addr
,
158 p4d
= p4d_offset(pgd
, addr
);
160 next
= p4d_addr_end(addr
, end
);
161 if (!p4d_present(*p4d
)) {
162 if (p4d_newpage(*p4d
)) {
163 ret
= ops
->unmap(ops
->mm_idp
, addr
,
165 p4d_mkuptodate(*p4d
);
168 ret
= update_pud_range(p4d
, addr
, next
, ops
);
169 } while (p4d
++, addr
= next
, ((addr
< end
) && !ret
));
173 int um_tlb_sync(struct mm_struct
*mm
)
177 unsigned long addr
= mm
->context
.sync_tlb_range_from
, next
;
180 if (mm
->context
.sync_tlb_range_to
== 0)
183 ops
.mm_idp
= &mm
->context
.id
;
184 if (mm
== &init_mm
) {
186 ops
.unmap
= kern_unmap
;
187 ops
.mprotect
= kern_mprotect
;
191 ops
.mprotect
= protect
;
194 pgd
= pgd_offset(mm
, addr
);
196 next
= pgd_addr_end(addr
, mm
->context
.sync_tlb_range_to
);
197 if (!pgd_present(*pgd
)) {
198 if (pgd_newpage(*pgd
)) {
199 ret
= ops
.unmap(ops
.mm_idp
, addr
,
201 pgd_mkuptodate(*pgd
);
204 ret
= update_p4d_range(pgd
, addr
, next
, &ops
);
205 } while (pgd
++, addr
= next
,
206 ((addr
< mm
->context
.sync_tlb_range_to
) && !ret
));
211 mm
->context
.sync_tlb_range_from
= 0;
212 mm
->context
.sync_tlb_range_to
= 0;
217 void flush_tlb_all(void)
220 * Don't bother flushing if this address space is about to be
223 if (atomic_read(¤t
->mm
->mm_users
) == 0)
226 flush_tlb_mm(current
->mm
);
229 void flush_tlb_mm(struct mm_struct
*mm
)
231 struct vm_area_struct
*vma
;
232 VMA_ITERATOR(vmi
, mm
, 0);
234 for_each_vma(vmi
, vma
)
235 um_tlb_mark_sync(mm
, vma
->vm_start
, vma
->vm_end
);