Merge branch 'sched/urgent'
[linux-2.6/x86.git] / arch / x86 / xen / mmu.c
blob0f0a675f79ab5ad76a5b89ebdd5e6fb3077ccce8
1 /*
2 * Xen mmu operations
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
41 #include <linux/sched.h>
42 #include <linux/highmem.h>
43 #include <linux/debugfs.h>
44 #include <linux/bug.h>
45 #include <linux/vmalloc.h>
46 #include <linux/module.h>
47 #include <linux/gfp.h>
48 #include <linux/memblock.h>
49 #include <linux/seq_file.h>
51 #include <trace/events/xen.h>
53 #include <asm/pgtable.h>
54 #include <asm/tlbflush.h>
55 #include <asm/fixmap.h>
56 #include <asm/mmu_context.h>
57 #include <asm/setup.h>
58 #include <asm/paravirt.h>
59 #include <asm/e820.h>
60 #include <asm/linkage.h>
61 #include <asm/page.h>
62 #include <asm/init.h>
63 #include <asm/pat.h>
64 #include <asm/smp.h>
66 #include <asm/xen/hypercall.h>
67 #include <asm/xen/hypervisor.h>
69 #include <xen/xen.h>
70 #include <xen/page.h>
71 #include <xen/interface/xen.h>
72 #include <xen/interface/hvm/hvm_op.h>
73 #include <xen/interface/version.h>
74 #include <xen/interface/memory.h>
75 #include <xen/hvc-console.h>
77 #include "multicalls.h"
78 #include "mmu.h"
79 #include "debugfs.h"
82 * Protects atomic reservation decrease/increase against concurrent increases.
83 * Also protects non-atomic updates of current_pages and balloon lists.
85 DEFINE_SPINLOCK(xen_reservation_lock);
88 * Identity map, in addition to plain kernel map. This needs to be
89 * large enough to allocate page table pages to allocate the rest.
90 * Each page can map 2MB.
92 #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
93 static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
95 #ifdef CONFIG_X86_64
96 /* l3 pud for userspace vsyscall mapping */
97 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
98 #endif /* CONFIG_X86_64 */
101 * Note about cr3 (pagetable base) values:
103 * xen_cr3 contains the current logical cr3 value; it contains the
104 * last set cr3. This may not be the current effective cr3, because
105 * its update may be being lazily deferred. However, a vcpu looking
106 * at its own cr3 can use this value knowing that it everything will
107 * be self-consistent.
109 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
110 * hypercall to set the vcpu cr3 is complete (so it may be a little
111 * out of date, but it will never be set early). If one vcpu is
112 * looking at another vcpu's cr3 value, it should use this variable.
114 DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
115 DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
119 * Just beyond the highest usermode address. STACK_TOP_MAX has a
120 * redzone above it, so round it up to a PGD boundary.
122 #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
124 unsigned long arbitrary_virt_to_mfn(void *vaddr)
126 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
128 return PFN_DOWN(maddr.maddr);
131 xmaddr_t arbitrary_virt_to_machine(void *vaddr)
133 unsigned long address = (unsigned long)vaddr;
134 unsigned int level;
135 pte_t *pte;
136 unsigned offset;
139 * if the PFN is in the linear mapped vaddr range, we can just use
140 * the (quick) virt_to_machine() p2m lookup
142 if (virt_addr_valid(vaddr))
143 return virt_to_machine(vaddr);
145 /* otherwise we have to do a (slower) full page-table walk */
147 pte = lookup_address(address, &level);
148 BUG_ON(pte == NULL);
149 offset = address & ~PAGE_MASK;
150 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
152 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
154 void make_lowmem_page_readonly(void *vaddr)
156 pte_t *pte, ptev;
157 unsigned long address = (unsigned long)vaddr;
158 unsigned int level;
160 pte = lookup_address(address, &level);
161 if (pte == NULL)
162 return; /* vaddr missing */
164 ptev = pte_wrprotect(*pte);
166 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
167 BUG();
170 void make_lowmem_page_readwrite(void *vaddr)
172 pte_t *pte, ptev;
173 unsigned long address = (unsigned long)vaddr;
174 unsigned int level;
176 pte = lookup_address(address, &level);
177 if (pte == NULL)
178 return; /* vaddr missing */
180 ptev = pte_mkwrite(*pte);
182 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
183 BUG();
187 static bool xen_page_pinned(void *ptr)
189 struct page *page = virt_to_page(ptr);
191 return PagePinned(page);
194 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
196 struct multicall_space mcs;
197 struct mmu_update *u;
199 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
201 mcs = xen_mc_entry(sizeof(*u));
202 u = mcs.args;
204 /* ptep might be kmapped when using 32-bit HIGHPTE */
205 u->ptr = virt_to_machine(ptep).maddr;
206 u->val = pte_val_ma(pteval);
208 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
210 xen_mc_issue(PARAVIRT_LAZY_MMU);
212 EXPORT_SYMBOL_GPL(xen_set_domain_pte);
214 static void xen_extend_mmu_update(const struct mmu_update *update)
216 struct multicall_space mcs;
217 struct mmu_update *u;
219 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
221 if (mcs.mc != NULL) {
222 mcs.mc->args[1]++;
223 } else {
224 mcs = __xen_mc_entry(sizeof(*u));
225 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
228 u = mcs.args;
229 *u = *update;
232 static void xen_extend_mmuext_op(const struct mmuext_op *op)
234 struct multicall_space mcs;
235 struct mmuext_op *u;
237 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
239 if (mcs.mc != NULL) {
240 mcs.mc->args[1]++;
241 } else {
242 mcs = __xen_mc_entry(sizeof(*u));
243 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
246 u = mcs.args;
247 *u = *op;
250 static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
252 struct mmu_update u;
254 preempt_disable();
256 xen_mc_batch();
258 /* ptr may be ioremapped for 64-bit pagetable setup */
259 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
260 u.val = pmd_val_ma(val);
261 xen_extend_mmu_update(&u);
263 xen_mc_issue(PARAVIRT_LAZY_MMU);
265 preempt_enable();
268 static void xen_set_pmd(pmd_t *ptr, pmd_t val)
270 trace_xen_mmu_set_pmd(ptr, val);
272 /* If page is not pinned, we can just update the entry
273 directly */
274 if (!xen_page_pinned(ptr)) {
275 *ptr = val;
276 return;
279 xen_set_pmd_hyper(ptr, val);
283 * Associate a virtual page frame with a given physical page frame
284 * and protection flags for that frame.
286 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
288 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
291 static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
293 struct mmu_update u;
295 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
296 return false;
298 xen_mc_batch();
300 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
301 u.val = pte_val_ma(pteval);
302 xen_extend_mmu_update(&u);
304 xen_mc_issue(PARAVIRT_LAZY_MMU);
306 return true;
309 static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
311 if (!xen_batched_set_pte(ptep, pteval))
312 native_set_pte(ptep, pteval);
315 static void xen_set_pte(pte_t *ptep, pte_t pteval)
317 trace_xen_mmu_set_pte(ptep, pteval);
318 __xen_set_pte(ptep, pteval);
321 static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
322 pte_t *ptep, pte_t pteval)
324 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
325 __xen_set_pte(ptep, pteval);
328 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
329 unsigned long addr, pte_t *ptep)
331 /* Just return the pte as-is. We preserve the bits on commit */
332 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
333 return *ptep;
336 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
337 pte_t *ptep, pte_t pte)
339 struct mmu_update u;
341 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
342 xen_mc_batch();
344 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
345 u.val = pte_val_ma(pte);
346 xen_extend_mmu_update(&u);
348 xen_mc_issue(PARAVIRT_LAZY_MMU);
351 /* Assume pteval_t is equivalent to all the other *val_t types. */
352 static pteval_t pte_mfn_to_pfn(pteval_t val)
354 if (val & _PAGE_PRESENT) {
355 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
356 pteval_t flags = val & PTE_FLAGS_MASK;
357 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
360 return val;
363 static pteval_t pte_pfn_to_mfn(pteval_t val)
365 if (val & _PAGE_PRESENT) {
366 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
367 pteval_t flags = val & PTE_FLAGS_MASK;
368 unsigned long mfn;
370 if (!xen_feature(XENFEAT_auto_translated_physmap))
371 mfn = get_phys_to_machine(pfn);
372 else
373 mfn = pfn;
375 * If there's no mfn for the pfn, then just create an
376 * empty non-present pte. Unfortunately this loses
377 * information about the original pfn, so
378 * pte_mfn_to_pfn is asymmetric.
380 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
381 mfn = 0;
382 flags = 0;
383 } else {
385 * Paramount to do this test _after_ the
386 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
387 * IDENTITY_FRAME_BIT resolves to true.
389 mfn &= ~FOREIGN_FRAME_BIT;
390 if (mfn & IDENTITY_FRAME_BIT) {
391 mfn &= ~IDENTITY_FRAME_BIT;
392 flags |= _PAGE_IOMAP;
395 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
398 return val;
401 static pteval_t iomap_pte(pteval_t val)
403 if (val & _PAGE_PRESENT) {
404 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
405 pteval_t flags = val & PTE_FLAGS_MASK;
407 /* We assume the pte frame number is a MFN, so
408 just use it as-is. */
409 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
412 return val;
415 static pteval_t xen_pte_val(pte_t pte)
417 pteval_t pteval = pte.pte;
419 /* If this is a WC pte, convert back from Xen WC to Linux WC */
420 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
421 WARN_ON(!pat_enabled);
422 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
425 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
426 return pteval;
428 return pte_mfn_to_pfn(pteval);
430 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
432 static pgdval_t xen_pgd_val(pgd_t pgd)
434 return pte_mfn_to_pfn(pgd.pgd);
436 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
439 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
440 * are reserved for now, to correspond to the Intel-reserved PAT
441 * types.
443 * We expect Linux's PAT set as follows:
445 * Idx PTE flags Linux Xen Default
446 * 0 WB WB WB
447 * 1 PWT WC WT WT
448 * 2 PCD UC- UC- UC-
449 * 3 PCD PWT UC UC UC
450 * 4 PAT WB WC WB
451 * 5 PAT PWT WC WP WT
452 * 6 PAT PCD UC- UC UC-
453 * 7 PAT PCD PWT UC UC UC
456 void xen_set_pat(u64 pat)
458 /* We expect Linux to use a PAT setting of
459 * UC UC- WC WB (ignoring the PAT flag) */
460 WARN_ON(pat != 0x0007010600070106ull);
463 static pte_t xen_make_pte(pteval_t pte)
465 phys_addr_t addr = (pte & PTE_PFN_MASK);
467 /* If Linux is trying to set a WC pte, then map to the Xen WC.
468 * If _PAGE_PAT is set, then it probably means it is really
469 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
470 * things work out OK...
472 * (We should never see kernel mappings with _PAGE_PSE set,
473 * but we could see hugetlbfs mappings, I think.).
475 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
476 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
477 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
481 * Unprivileged domains are allowed to do IOMAPpings for
482 * PCI passthrough, but not map ISA space. The ISA
483 * mappings are just dummy local mappings to keep other
484 * parts of the kernel happy.
486 if (unlikely(pte & _PAGE_IOMAP) &&
487 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
488 pte = iomap_pte(pte);
489 } else {
490 pte &= ~_PAGE_IOMAP;
491 pte = pte_pfn_to_mfn(pte);
494 return native_make_pte(pte);
496 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
498 #ifdef CONFIG_XEN_DEBUG
499 pte_t xen_make_pte_debug(pteval_t pte)
501 phys_addr_t addr = (pte & PTE_PFN_MASK);
502 phys_addr_t other_addr;
503 bool io_page = false;
504 pte_t _pte;
506 if (pte & _PAGE_IOMAP)
507 io_page = true;
509 _pte = xen_make_pte(pte);
511 if (!addr)
512 return _pte;
514 if (io_page &&
515 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
516 other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT;
517 WARN_ONCE(addr != other_addr,
518 "0x%lx is using VM_IO, but it is 0x%lx!\n",
519 (unsigned long)addr, (unsigned long)other_addr);
520 } else {
521 pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP;
522 other_addr = (_pte.pte & PTE_PFN_MASK);
523 WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set),
524 "0x%lx is missing VM_IO (and wasn't fixed)!\n",
525 (unsigned long)addr);
528 return _pte;
530 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug);
531 #endif
533 static pgd_t xen_make_pgd(pgdval_t pgd)
535 pgd = pte_pfn_to_mfn(pgd);
536 return native_make_pgd(pgd);
538 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
540 static pmdval_t xen_pmd_val(pmd_t pmd)
542 return pte_mfn_to_pfn(pmd.pmd);
544 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
546 static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
548 struct mmu_update u;
550 preempt_disable();
552 xen_mc_batch();
554 /* ptr may be ioremapped for 64-bit pagetable setup */
555 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
556 u.val = pud_val_ma(val);
557 xen_extend_mmu_update(&u);
559 xen_mc_issue(PARAVIRT_LAZY_MMU);
561 preempt_enable();
564 static void xen_set_pud(pud_t *ptr, pud_t val)
566 trace_xen_mmu_set_pud(ptr, val);
568 /* If page is not pinned, we can just update the entry
569 directly */
570 if (!xen_page_pinned(ptr)) {
571 *ptr = val;
572 return;
575 xen_set_pud_hyper(ptr, val);
578 #ifdef CONFIG_X86_PAE
579 static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
581 trace_xen_mmu_set_pte_atomic(ptep, pte);
582 set_64bit((u64 *)ptep, native_pte_val(pte));
585 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
587 trace_xen_mmu_pte_clear(mm, addr, ptep);
588 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
589 native_pte_clear(mm, addr, ptep);
592 static void xen_pmd_clear(pmd_t *pmdp)
594 trace_xen_mmu_pmd_clear(pmdp);
595 set_pmd(pmdp, __pmd(0));
597 #endif /* CONFIG_X86_PAE */
599 static pmd_t xen_make_pmd(pmdval_t pmd)
601 pmd = pte_pfn_to_mfn(pmd);
602 return native_make_pmd(pmd);
604 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
606 #if PAGETABLE_LEVELS == 4
607 static pudval_t xen_pud_val(pud_t pud)
609 return pte_mfn_to_pfn(pud.pud);
611 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
613 static pud_t xen_make_pud(pudval_t pud)
615 pud = pte_pfn_to_mfn(pud);
617 return native_make_pud(pud);
619 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
621 static pgd_t *xen_get_user_pgd(pgd_t *pgd)
623 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
624 unsigned offset = pgd - pgd_page;
625 pgd_t *user_ptr = NULL;
627 if (offset < pgd_index(USER_LIMIT)) {
628 struct page *page = virt_to_page(pgd_page);
629 user_ptr = (pgd_t *)page->private;
630 if (user_ptr)
631 user_ptr += offset;
634 return user_ptr;
637 static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
639 struct mmu_update u;
641 u.ptr = virt_to_machine(ptr).maddr;
642 u.val = pgd_val_ma(val);
643 xen_extend_mmu_update(&u);
647 * Raw hypercall-based set_pgd, intended for in early boot before
648 * there's a page structure. This implies:
649 * 1. The only existing pagetable is the kernel's
650 * 2. It is always pinned
651 * 3. It has no user pagetable attached to it
653 static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
655 preempt_disable();
657 xen_mc_batch();
659 __xen_set_pgd_hyper(ptr, val);
661 xen_mc_issue(PARAVIRT_LAZY_MMU);
663 preempt_enable();
666 static void xen_set_pgd(pgd_t *ptr, pgd_t val)
668 pgd_t *user_ptr = xen_get_user_pgd(ptr);
670 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
672 /* If page is not pinned, we can just update the entry
673 directly */
674 if (!xen_page_pinned(ptr)) {
675 *ptr = val;
676 if (user_ptr) {
677 WARN_ON(xen_page_pinned(user_ptr));
678 *user_ptr = val;
680 return;
683 /* If it's pinned, then we can at least batch the kernel and
684 user updates together. */
685 xen_mc_batch();
687 __xen_set_pgd_hyper(ptr, val);
688 if (user_ptr)
689 __xen_set_pgd_hyper(user_ptr, val);
691 xen_mc_issue(PARAVIRT_LAZY_MMU);
693 #endif /* PAGETABLE_LEVELS == 4 */
696 * (Yet another) pagetable walker. This one is intended for pinning a
697 * pagetable. This means that it walks a pagetable and calls the
698 * callback function on each page it finds making up the page table,
699 * at every level. It walks the entire pagetable, but it only bothers
700 * pinning pte pages which are below limit. In the normal case this
701 * will be STACK_TOP_MAX, but at boot we need to pin up to
702 * FIXADDR_TOP.
704 * For 32-bit the important bit is that we don't pin beyond there,
705 * because then we start getting into Xen's ptes.
707 * For 64-bit, we must skip the Xen hole in the middle of the address
708 * space, just after the big x86-64 virtual hole.
710 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
711 int (*func)(struct mm_struct *mm, struct page *,
712 enum pt_level),
713 unsigned long limit)
715 int flush = 0;
716 unsigned hole_low, hole_high;
717 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
718 unsigned pgdidx, pudidx, pmdidx;
720 /* The limit is the last byte to be touched */
721 limit--;
722 BUG_ON(limit >= FIXADDR_TOP);
724 if (xen_feature(XENFEAT_auto_translated_physmap))
725 return 0;
728 * 64-bit has a great big hole in the middle of the address
729 * space, which contains the Xen mappings. On 32-bit these
730 * will end up making a zero-sized hole and so is a no-op.
732 hole_low = pgd_index(USER_LIMIT);
733 hole_high = pgd_index(PAGE_OFFSET);
735 pgdidx_limit = pgd_index(limit);
736 #if PTRS_PER_PUD > 1
737 pudidx_limit = pud_index(limit);
738 #else
739 pudidx_limit = 0;
740 #endif
741 #if PTRS_PER_PMD > 1
742 pmdidx_limit = pmd_index(limit);
743 #else
744 pmdidx_limit = 0;
745 #endif
747 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
748 pud_t *pud;
750 if (pgdidx >= hole_low && pgdidx < hole_high)
751 continue;
753 if (!pgd_val(pgd[pgdidx]))
754 continue;
756 pud = pud_offset(&pgd[pgdidx], 0);
758 if (PTRS_PER_PUD > 1) /* not folded */
759 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
761 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
762 pmd_t *pmd;
764 if (pgdidx == pgdidx_limit &&
765 pudidx > pudidx_limit)
766 goto out;
768 if (pud_none(pud[pudidx]))
769 continue;
771 pmd = pmd_offset(&pud[pudidx], 0);
773 if (PTRS_PER_PMD > 1) /* not folded */
774 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
776 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
777 struct page *pte;
779 if (pgdidx == pgdidx_limit &&
780 pudidx == pudidx_limit &&
781 pmdidx > pmdidx_limit)
782 goto out;
784 if (pmd_none(pmd[pmdidx]))
785 continue;
787 pte = pmd_page(pmd[pmdidx]);
788 flush |= (*func)(mm, pte, PT_PTE);
793 out:
794 /* Do the top level last, so that the callbacks can use it as
795 a cue to do final things like tlb flushes. */
796 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
798 return flush;
801 static int xen_pgd_walk(struct mm_struct *mm,
802 int (*func)(struct mm_struct *mm, struct page *,
803 enum pt_level),
804 unsigned long limit)
806 return __xen_pgd_walk(mm, mm->pgd, func, limit);
809 /* If we're using split pte locks, then take the page's lock and
810 return a pointer to it. Otherwise return NULL. */
811 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
813 spinlock_t *ptl = NULL;
815 #if USE_SPLIT_PTLOCKS
816 ptl = __pte_lockptr(page);
817 spin_lock_nest_lock(ptl, &mm->page_table_lock);
818 #endif
820 return ptl;
823 static void xen_pte_unlock(void *v)
825 spinlock_t *ptl = v;
826 spin_unlock(ptl);
829 static void xen_do_pin(unsigned level, unsigned long pfn)
831 struct mmuext_op op;
833 op.cmd = level;
834 op.arg1.mfn = pfn_to_mfn(pfn);
836 xen_extend_mmuext_op(&op);
839 static int xen_pin_page(struct mm_struct *mm, struct page *page,
840 enum pt_level level)
842 unsigned pgfl = TestSetPagePinned(page);
843 int flush;
845 if (pgfl)
846 flush = 0; /* already pinned */
847 else if (PageHighMem(page))
848 /* kmaps need flushing if we found an unpinned
849 highpage */
850 flush = 1;
851 else {
852 void *pt = lowmem_page_address(page);
853 unsigned long pfn = page_to_pfn(page);
854 struct multicall_space mcs = __xen_mc_entry(0);
855 spinlock_t *ptl;
857 flush = 0;
860 * We need to hold the pagetable lock between the time
861 * we make the pagetable RO and when we actually pin
862 * it. If we don't, then other users may come in and
863 * attempt to update the pagetable by writing it,
864 * which will fail because the memory is RO but not
865 * pinned, so Xen won't do the trap'n'emulate.
867 * If we're using split pte locks, we can't hold the
868 * entire pagetable's worth of locks during the
869 * traverse, because we may wrap the preempt count (8
870 * bits). The solution is to mark RO and pin each PTE
871 * page while holding the lock. This means the number
872 * of locks we end up holding is never more than a
873 * batch size (~32 entries, at present).
875 * If we're not using split pte locks, we needn't pin
876 * the PTE pages independently, because we're
877 * protected by the overall pagetable lock.
879 ptl = NULL;
880 if (level == PT_PTE)
881 ptl = xen_pte_lock(page, mm);
883 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
884 pfn_pte(pfn, PAGE_KERNEL_RO),
885 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
887 if (ptl) {
888 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
890 /* Queue a deferred unlock for when this batch
891 is completed. */
892 xen_mc_callback(xen_pte_unlock, ptl);
896 return flush;
899 /* This is called just after a mm has been created, but it has not
900 been used yet. We need to make sure that its pagetable is all
901 read-only, and can be pinned. */
902 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
904 trace_xen_mmu_pgd_pin(mm, pgd);
906 xen_mc_batch();
908 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
909 /* re-enable interrupts for flushing */
910 xen_mc_issue(0);
912 kmap_flush_unused();
914 xen_mc_batch();
917 #ifdef CONFIG_X86_64
919 pgd_t *user_pgd = xen_get_user_pgd(pgd);
921 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
923 if (user_pgd) {
924 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
925 xen_do_pin(MMUEXT_PIN_L4_TABLE,
926 PFN_DOWN(__pa(user_pgd)));
929 #else /* CONFIG_X86_32 */
930 #ifdef CONFIG_X86_PAE
931 /* Need to make sure unshared kernel PMD is pinnable */
932 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
933 PT_PMD);
934 #endif
935 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
936 #endif /* CONFIG_X86_64 */
937 xen_mc_issue(0);
940 static void xen_pgd_pin(struct mm_struct *mm)
942 __xen_pgd_pin(mm, mm->pgd);
946 * On save, we need to pin all pagetables to make sure they get their
947 * mfns turned into pfns. Search the list for any unpinned pgds and pin
948 * them (unpinned pgds are not currently in use, probably because the
949 * process is under construction or destruction).
951 * Expected to be called in stop_machine() ("equivalent to taking
952 * every spinlock in the system"), so the locking doesn't really
953 * matter all that much.
955 void xen_mm_pin_all(void)
957 struct page *page;
959 spin_lock(&pgd_lock);
961 list_for_each_entry(page, &pgd_list, lru) {
962 if (!PagePinned(page)) {
963 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
964 SetPageSavePinned(page);
968 spin_unlock(&pgd_lock);
972 * The init_mm pagetable is really pinned as soon as its created, but
973 * that's before we have page structures to store the bits. So do all
974 * the book-keeping now.
976 static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
977 enum pt_level level)
979 SetPagePinned(page);
980 return 0;
983 static void __init xen_mark_init_mm_pinned(void)
985 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
988 static int xen_unpin_page(struct mm_struct *mm, struct page *page,
989 enum pt_level level)
991 unsigned pgfl = TestClearPagePinned(page);
993 if (pgfl && !PageHighMem(page)) {
994 void *pt = lowmem_page_address(page);
995 unsigned long pfn = page_to_pfn(page);
996 spinlock_t *ptl = NULL;
997 struct multicall_space mcs;
1000 * Do the converse to pin_page. If we're using split
1001 * pte locks, we must be holding the lock for while
1002 * the pte page is unpinned but still RO to prevent
1003 * concurrent updates from seeing it in this
1004 * partially-pinned state.
1006 if (level == PT_PTE) {
1007 ptl = xen_pte_lock(page, mm);
1009 if (ptl)
1010 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
1013 mcs = __xen_mc_entry(0);
1015 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1016 pfn_pte(pfn, PAGE_KERNEL),
1017 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1019 if (ptl) {
1020 /* unlock when batch completed */
1021 xen_mc_callback(xen_pte_unlock, ptl);
1025 return 0; /* never need to flush on unpin */
1028 /* Release a pagetables pages back as normal RW */
1029 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
1031 trace_xen_mmu_pgd_unpin(mm, pgd);
1033 xen_mc_batch();
1035 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1037 #ifdef CONFIG_X86_64
1039 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1041 if (user_pgd) {
1042 xen_do_pin(MMUEXT_UNPIN_TABLE,
1043 PFN_DOWN(__pa(user_pgd)));
1044 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
1047 #endif
1049 #ifdef CONFIG_X86_PAE
1050 /* Need to make sure unshared kernel PMD is unpinned */
1051 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
1052 PT_PMD);
1053 #endif
1055 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
1057 xen_mc_issue(0);
1060 static void xen_pgd_unpin(struct mm_struct *mm)
1062 __xen_pgd_unpin(mm, mm->pgd);
1066 * On resume, undo any pinning done at save, so that the rest of the
1067 * kernel doesn't see any unexpected pinned pagetables.
1069 void xen_mm_unpin_all(void)
1071 struct page *page;
1073 spin_lock(&pgd_lock);
1075 list_for_each_entry(page, &pgd_list, lru) {
1076 if (PageSavePinned(page)) {
1077 BUG_ON(!PagePinned(page));
1078 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
1079 ClearPageSavePinned(page);
1083 spin_unlock(&pgd_lock);
1086 static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1088 spin_lock(&next->page_table_lock);
1089 xen_pgd_pin(next);
1090 spin_unlock(&next->page_table_lock);
1093 static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1095 spin_lock(&mm->page_table_lock);
1096 xen_pgd_pin(mm);
1097 spin_unlock(&mm->page_table_lock);
1101 #ifdef CONFIG_SMP
1102 /* Another cpu may still have their %cr3 pointing at the pagetable, so
1103 we need to repoint it somewhere else before we can unpin it. */
1104 static void drop_other_mm_ref(void *info)
1106 struct mm_struct *mm = info;
1107 struct mm_struct *active_mm;
1109 active_mm = percpu_read(cpu_tlbstate.active_mm);
1111 if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
1112 leave_mm(smp_processor_id());
1114 /* If this cpu still has a stale cr3 reference, then make sure
1115 it has been flushed. */
1116 if (percpu_read(xen_current_cr3) == __pa(mm->pgd))
1117 load_cr3(swapper_pg_dir);
1120 static void xen_drop_mm_ref(struct mm_struct *mm)
1122 cpumask_var_t mask;
1123 unsigned cpu;
1125 if (current->active_mm == mm) {
1126 if (current->mm == mm)
1127 load_cr3(swapper_pg_dir);
1128 else
1129 leave_mm(smp_processor_id());
1132 /* Get the "official" set of cpus referring to our pagetable. */
1133 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1134 for_each_online_cpu(cpu) {
1135 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
1136 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1137 continue;
1138 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1140 return;
1142 cpumask_copy(mask, mm_cpumask(mm));
1144 /* It's possible that a vcpu may have a stale reference to our
1145 cr3, because its in lazy mode, and it hasn't yet flushed
1146 its set of pending hypercalls yet. In this case, we can
1147 look at its actual current cr3 value, and force it to flush
1148 if needed. */
1149 for_each_online_cpu(cpu) {
1150 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1151 cpumask_set_cpu(cpu, mask);
1154 if (!cpumask_empty(mask))
1155 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1156 free_cpumask_var(mask);
1158 #else
1159 static void xen_drop_mm_ref(struct mm_struct *mm)
1161 if (current->active_mm == mm)
1162 load_cr3(swapper_pg_dir);
1164 #endif
1167 * While a process runs, Xen pins its pagetables, which means that the
1168 * hypervisor forces it to be read-only, and it controls all updates
1169 * to it. This means that all pagetable updates have to go via the
1170 * hypervisor, which is moderately expensive.
1172 * Since we're pulling the pagetable down, we switch to use init_mm,
1173 * unpin old process pagetable and mark it all read-write, which
1174 * allows further operations on it to be simple memory accesses.
1176 * The only subtle point is that another CPU may be still using the
1177 * pagetable because of lazy tlb flushing. This means we need need to
1178 * switch all CPUs off this pagetable before we can unpin it.
1180 static void xen_exit_mmap(struct mm_struct *mm)
1182 get_cpu(); /* make sure we don't move around */
1183 xen_drop_mm_ref(mm);
1184 put_cpu();
1186 spin_lock(&mm->page_table_lock);
1188 /* pgd may not be pinned in the error exit path of execve */
1189 if (xen_page_pinned(mm->pgd))
1190 xen_pgd_unpin(mm);
1192 spin_unlock(&mm->page_table_lock);
1195 static void __init xen_pagetable_setup_start(pgd_t *base)
1199 static void xen_post_allocator_init(void);
1201 static void __init xen_pagetable_setup_done(pgd_t *base)
1203 xen_setup_shared_info();
1204 xen_post_allocator_init();
1207 static void xen_write_cr2(unsigned long cr2)
1209 percpu_read(xen_vcpu)->arch.cr2 = cr2;
1212 static unsigned long xen_read_cr2(void)
1214 return percpu_read(xen_vcpu)->arch.cr2;
1217 unsigned long xen_read_cr2_direct(void)
1219 return percpu_read(xen_vcpu_info.arch.cr2);
1222 static void xen_flush_tlb(void)
1224 struct mmuext_op *op;
1225 struct multicall_space mcs;
1227 trace_xen_mmu_flush_tlb(0);
1229 preempt_disable();
1231 mcs = xen_mc_entry(sizeof(*op));
1233 op = mcs.args;
1234 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1235 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1237 xen_mc_issue(PARAVIRT_LAZY_MMU);
1239 preempt_enable();
1242 static void xen_flush_tlb_single(unsigned long addr)
1244 struct mmuext_op *op;
1245 struct multicall_space mcs;
1247 trace_xen_mmu_flush_tlb_single(addr);
1249 preempt_disable();
1251 mcs = xen_mc_entry(sizeof(*op));
1252 op = mcs.args;
1253 op->cmd = MMUEXT_INVLPG_LOCAL;
1254 op->arg1.linear_addr = addr & PAGE_MASK;
1255 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1257 xen_mc_issue(PARAVIRT_LAZY_MMU);
1259 preempt_enable();
1262 static void xen_flush_tlb_others(const struct cpumask *cpus,
1263 struct mm_struct *mm, unsigned long va)
1265 struct {
1266 struct mmuext_op op;
1267 #ifdef CONFIG_SMP
1268 DECLARE_BITMAP(mask, num_processors);
1269 #else
1270 DECLARE_BITMAP(mask, NR_CPUS);
1271 #endif
1272 } *args;
1273 struct multicall_space mcs;
1275 trace_xen_mmu_flush_tlb_others(cpus, mm, va);
1277 if (cpumask_empty(cpus))
1278 return; /* nothing to do */
1280 mcs = xen_mc_entry(sizeof(*args));
1281 args = mcs.args;
1282 args->op.arg2.vcpumask = to_cpumask(args->mask);
1284 /* Remove us, and any offline CPUS. */
1285 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1286 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1288 if (va == TLB_FLUSH_ALL) {
1289 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1290 } else {
1291 args->op.cmd = MMUEXT_INVLPG_MULTI;
1292 args->op.arg1.linear_addr = va;
1295 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1297 xen_mc_issue(PARAVIRT_LAZY_MMU);
1300 static unsigned long xen_read_cr3(void)
1302 return percpu_read(xen_cr3);
1305 static void set_current_cr3(void *v)
1307 percpu_write(xen_current_cr3, (unsigned long)v);
1310 static void __xen_write_cr3(bool kernel, unsigned long cr3)
1312 struct mmuext_op op;
1313 unsigned long mfn;
1315 trace_xen_mmu_write_cr3(kernel, cr3);
1317 if (cr3)
1318 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1319 else
1320 mfn = 0;
1322 WARN_ON(mfn == 0 && kernel);
1324 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1325 op.arg1.mfn = mfn;
1327 xen_extend_mmuext_op(&op);
1329 if (kernel) {
1330 percpu_write(xen_cr3, cr3);
1332 /* Update xen_current_cr3 once the batch has actually
1333 been submitted. */
1334 xen_mc_callback(set_current_cr3, (void *)cr3);
1338 static void xen_write_cr3(unsigned long cr3)
1340 BUG_ON(preemptible());
1342 xen_mc_batch(); /* disables interrupts */
1344 /* Update while interrupts are disabled, so its atomic with
1345 respect to ipis */
1346 percpu_write(xen_cr3, cr3);
1348 __xen_write_cr3(true, cr3);
1350 #ifdef CONFIG_X86_64
1352 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1353 if (user_pgd)
1354 __xen_write_cr3(false, __pa(user_pgd));
1355 else
1356 __xen_write_cr3(false, 0);
1358 #endif
1360 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1363 static int xen_pgd_alloc(struct mm_struct *mm)
1365 pgd_t *pgd = mm->pgd;
1366 int ret = 0;
1368 BUG_ON(PagePinned(virt_to_page(pgd)));
1370 #ifdef CONFIG_X86_64
1372 struct page *page = virt_to_page(pgd);
1373 pgd_t *user_pgd;
1375 BUG_ON(page->private != 0);
1377 ret = -ENOMEM;
1379 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1380 page->private = (unsigned long)user_pgd;
1382 if (user_pgd != NULL) {
1383 user_pgd[pgd_index(VSYSCALL_START)] =
1384 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1385 ret = 0;
1388 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1390 #endif
1392 return ret;
1395 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1397 #ifdef CONFIG_X86_64
1398 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1400 if (user_pgd)
1401 free_page((unsigned long)user_pgd);
1402 #endif
1405 #ifdef CONFIG_X86_32
1406 static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1408 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1409 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1410 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1411 pte_val_ma(pte));
1413 return pte;
1415 #else /* CONFIG_X86_64 */
1416 static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1418 unsigned long pfn = pte_pfn(pte);
1421 * If the new pfn is within the range of the newly allocated
1422 * kernel pagetable, and it isn't being mapped into an
1423 * early_ioremap fixmap slot as a freshly allocated page, make sure
1424 * it is RO.
1426 if (((!is_early_ioremap_ptep(ptep) &&
1427 pfn >= pgt_buf_start && pfn < pgt_buf_top)) ||
1428 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
1429 pte = pte_wrprotect(pte);
1431 return pte;
1433 #endif /* CONFIG_X86_64 */
1435 /* Init-time set_pte while constructing initial pagetables, which
1436 doesn't allow RO pagetable pages to be remapped RW */
1437 static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1439 pte = mask_rw_pte(ptep, pte);
1441 xen_set_pte(ptep, pte);
1444 static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1446 struct mmuext_op op;
1447 op.cmd = cmd;
1448 op.arg1.mfn = pfn_to_mfn(pfn);
1449 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1450 BUG();
1453 /* Early in boot, while setting up the initial pagetable, assume
1454 everything is pinned. */
1455 static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1457 #ifdef CONFIG_FLATMEM
1458 BUG_ON(mem_map); /* should only be used early */
1459 #endif
1460 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1461 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1464 /* Used for pmd and pud */
1465 static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1467 #ifdef CONFIG_FLATMEM
1468 BUG_ON(mem_map); /* should only be used early */
1469 #endif
1470 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1473 /* Early release_pte assumes that all pts are pinned, since there's
1474 only init_mm and anything attached to that is pinned. */
1475 static void __init xen_release_pte_init(unsigned long pfn)
1477 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1478 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1481 static void __init xen_release_pmd_init(unsigned long pfn)
1483 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1486 static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1488 struct multicall_space mcs;
1489 struct mmuext_op *op;
1491 mcs = __xen_mc_entry(sizeof(*op));
1492 op = mcs.args;
1493 op->cmd = cmd;
1494 op->arg1.mfn = pfn_to_mfn(pfn);
1496 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1499 static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1501 struct multicall_space mcs;
1502 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1504 mcs = __xen_mc_entry(0);
1505 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1506 pfn_pte(pfn, prot), 0);
1509 /* This needs to make sure the new pte page is pinned iff its being
1510 attached to a pinned pagetable. */
1511 static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1512 unsigned level)
1514 bool pinned = PagePinned(virt_to_page(mm->pgd));
1516 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
1518 if (pinned) {
1519 struct page *page = pfn_to_page(pfn);
1521 SetPagePinned(page);
1523 if (!PageHighMem(page)) {
1524 xen_mc_batch();
1526 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1528 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1529 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1531 xen_mc_issue(PARAVIRT_LAZY_MMU);
1532 } else {
1533 /* make sure there are no stray mappings of
1534 this page */
1535 kmap_flush_unused();
1540 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1542 xen_alloc_ptpage(mm, pfn, PT_PTE);
1545 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1547 xen_alloc_ptpage(mm, pfn, PT_PMD);
1550 /* This should never happen until we're OK to use struct page */
1551 static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1553 struct page *page = pfn_to_page(pfn);
1554 bool pinned = PagePinned(page);
1556 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1558 if (pinned) {
1559 if (!PageHighMem(page)) {
1560 xen_mc_batch();
1562 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1563 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1565 __set_pfn_prot(pfn, PAGE_KERNEL);
1567 xen_mc_issue(PARAVIRT_LAZY_MMU);
1569 ClearPagePinned(page);
1573 static void xen_release_pte(unsigned long pfn)
1575 xen_release_ptpage(pfn, PT_PTE);
1578 static void xen_release_pmd(unsigned long pfn)
1580 xen_release_ptpage(pfn, PT_PMD);
1583 #if PAGETABLE_LEVELS == 4
1584 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1586 xen_alloc_ptpage(mm, pfn, PT_PUD);
1589 static void xen_release_pud(unsigned long pfn)
1591 xen_release_ptpage(pfn, PT_PUD);
1593 #endif
1595 void __init xen_reserve_top(void)
1597 #ifdef CONFIG_X86_32
1598 unsigned long top = HYPERVISOR_VIRT_START;
1599 struct xen_platform_parameters pp;
1601 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1602 top = pp.virt_start;
1604 reserve_top_address(-top);
1605 #endif /* CONFIG_X86_32 */
1609 * Like __va(), but returns address in the kernel mapping (which is
1610 * all we have until the physical memory mapping has been set up.
1612 static void *__ka(phys_addr_t paddr)
1614 #ifdef CONFIG_X86_64
1615 return (void *)(paddr + __START_KERNEL_map);
1616 #else
1617 return __va(paddr);
1618 #endif
1621 /* Convert a machine address to physical address */
1622 static unsigned long m2p(phys_addr_t maddr)
1624 phys_addr_t paddr;
1626 maddr &= PTE_PFN_MASK;
1627 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1629 return paddr;
1632 /* Convert a machine address to kernel virtual */
1633 static void *m2v(phys_addr_t maddr)
1635 return __ka(m2p(maddr));
1638 /* Set the page permissions on an identity-mapped pages */
1639 static void set_page_prot(void *addr, pgprot_t prot)
1641 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1642 pte_t pte = pfn_pte(pfn, prot);
1644 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1645 BUG();
1648 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1650 unsigned pmdidx, pteidx;
1651 unsigned ident_pte;
1652 unsigned long pfn;
1654 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1655 PAGE_SIZE);
1657 ident_pte = 0;
1658 pfn = 0;
1659 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1660 pte_t *pte_page;
1662 /* Reuse or allocate a page of ptes */
1663 if (pmd_present(pmd[pmdidx]))
1664 pte_page = m2v(pmd[pmdidx].pmd);
1665 else {
1666 /* Check for free pte pages */
1667 if (ident_pte == LEVEL1_IDENT_ENTRIES)
1668 break;
1670 pte_page = &level1_ident_pgt[ident_pte];
1671 ident_pte += PTRS_PER_PTE;
1673 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1676 /* Install mappings */
1677 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1678 pte_t pte;
1680 #ifdef CONFIG_X86_32
1681 if (pfn > max_pfn_mapped)
1682 max_pfn_mapped = pfn;
1683 #endif
1685 if (!pte_none(pte_page[pteidx]))
1686 continue;
1688 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1689 pte_page[pteidx] = pte;
1693 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1694 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1696 set_page_prot(pmd, PAGE_KERNEL_RO);
1699 void __init xen_setup_machphys_mapping(void)
1701 struct xen_machphys_mapping mapping;
1703 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1704 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1705 machine_to_phys_nr = mapping.max_mfn + 1;
1706 } else {
1707 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1709 #ifdef CONFIG_X86_32
1710 if ((machine_to_phys_mapping + machine_to_phys_nr)
1711 < machine_to_phys_mapping)
1712 machine_to_phys_nr = (unsigned long *)NULL
1713 - machine_to_phys_mapping;
1714 #endif
1717 #ifdef CONFIG_X86_64
1718 static void convert_pfn_mfn(void *v)
1720 pte_t *pte = v;
1721 int i;
1723 /* All levels are converted the same way, so just treat them
1724 as ptes. */
1725 for (i = 0; i < PTRS_PER_PTE; i++)
1726 pte[i] = xen_make_pte(pte[i].pte);
1730 * Set up the initial kernel pagetable.
1732 * We can construct this by grafting the Xen provided pagetable into
1733 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1734 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1735 * means that only the kernel has a physical mapping to start with -
1736 * but that's enough to get __va working. We need to fill in the rest
1737 * of the physical mapping once some sort of allocator has been set
1738 * up.
1740 pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
1741 unsigned long max_pfn)
1743 pud_t *l3;
1744 pmd_t *l2;
1746 /* max_pfn_mapped is the last pfn mapped in the initial memory
1747 * mappings. Considering that on Xen after the kernel mappings we
1748 * have the mappings of some pages that don't exist in pfn space, we
1749 * set max_pfn_mapped to the last real pfn mapped. */
1750 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1752 /* Zap identity mapping */
1753 init_level4_pgt[0] = __pgd(0);
1755 /* Pre-constructed entries are in pfn, so convert to mfn */
1756 convert_pfn_mfn(init_level4_pgt);
1757 convert_pfn_mfn(level3_ident_pgt);
1758 convert_pfn_mfn(level3_kernel_pgt);
1760 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1761 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1763 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1764 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1766 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1767 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1768 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1770 /* Set up identity map */
1771 xen_map_identity_early(level2_ident_pgt, max_pfn);
1773 /* Make pagetable pieces RO */
1774 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1775 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1776 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1777 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1778 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1779 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1781 /* Pin down new L4 */
1782 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1783 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1785 /* Unpin Xen-provided one */
1786 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1788 /* Switch over */
1789 pgd = init_level4_pgt;
1792 * At this stage there can be no user pgd, and no page
1793 * structure to attach it to, so make sure we just set kernel
1794 * pgd.
1796 xen_mc_batch();
1797 __xen_write_cr3(true, __pa(pgd));
1798 xen_mc_issue(PARAVIRT_LAZY_CPU);
1800 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
1801 __pa(xen_start_info->pt_base +
1802 xen_start_info->nr_pt_frames * PAGE_SIZE),
1803 "XEN PAGETABLES");
1805 return pgd;
1807 #else /* !CONFIG_X86_64 */
1808 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1809 static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1811 static void __init xen_write_cr3_init(unsigned long cr3)
1813 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1815 BUG_ON(read_cr3() != __pa(initial_page_table));
1816 BUG_ON(cr3 != __pa(swapper_pg_dir));
1819 * We are switching to swapper_pg_dir for the first time (from
1820 * initial_page_table) and therefore need to mark that page
1821 * read-only and then pin it.
1823 * Xen disallows sharing of kernel PMDs for PAE
1824 * guests. Therefore we must copy the kernel PMD from
1825 * initial_page_table into a new kernel PMD to be used in
1826 * swapper_pg_dir.
1828 swapper_kernel_pmd =
1829 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1830 memcpy(swapper_kernel_pmd, initial_kernel_pmd,
1831 sizeof(pmd_t) * PTRS_PER_PMD);
1832 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1833 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1834 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1836 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1837 xen_write_cr3(cr3);
1838 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1840 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1841 PFN_DOWN(__pa(initial_page_table)));
1842 set_page_prot(initial_page_table, PAGE_KERNEL);
1843 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1845 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1848 pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
1849 unsigned long max_pfn)
1851 pmd_t *kernel_pmd;
1853 initial_kernel_pmd =
1854 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1856 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1857 xen_start_info->nr_pt_frames * PAGE_SIZE +
1858 512*1024);
1860 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1861 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
1863 xen_map_identity_early(initial_kernel_pmd, max_pfn);
1865 memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1866 initial_page_table[KERNEL_PGD_BOUNDARY] =
1867 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
1869 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
1870 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
1871 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1873 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1875 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
1876 PFN_DOWN(__pa(initial_page_table)));
1877 xen_write_cr3(__pa(initial_page_table));
1879 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
1880 __pa(xen_start_info->pt_base +
1881 xen_start_info->nr_pt_frames * PAGE_SIZE),
1882 "XEN PAGETABLES");
1884 return initial_page_table;
1886 #endif /* CONFIG_X86_64 */
1888 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
1890 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1892 pte_t pte;
1894 phys >>= PAGE_SHIFT;
1896 switch (idx) {
1897 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1898 #ifdef CONFIG_X86_F00F_BUG
1899 case FIX_F00F_IDT:
1900 #endif
1901 #ifdef CONFIG_X86_32
1902 case FIX_WP_TEST:
1903 case FIX_VDSO:
1904 # ifdef CONFIG_HIGHMEM
1905 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1906 # endif
1907 #else
1908 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1909 case VVAR_PAGE:
1910 #endif
1911 case FIX_TEXT_POKE0:
1912 case FIX_TEXT_POKE1:
1913 /* All local page mappings */
1914 pte = pfn_pte(phys, prot);
1915 break;
1917 #ifdef CONFIG_X86_LOCAL_APIC
1918 case FIX_APIC_BASE: /* maps dummy local APIC */
1919 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
1920 break;
1921 #endif
1923 #ifdef CONFIG_X86_IO_APIC
1924 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
1926 * We just don't map the IO APIC - all access is via
1927 * hypercalls. Keep the address in the pte for reference.
1929 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
1930 break;
1931 #endif
1933 case FIX_PARAVIRT_BOOTMAP:
1934 /* This is an MFN, but it isn't an IO mapping from the
1935 IO domain */
1936 pte = mfn_pte(phys, prot);
1937 break;
1939 default:
1940 /* By default, set_fixmap is used for hardware mappings */
1941 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
1942 break;
1945 __native_set_fixmap(idx, pte);
1947 #ifdef CONFIG_X86_64
1948 /* Replicate changes to map the vsyscall page into the user
1949 pagetable vsyscall mapping. */
1950 if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) ||
1951 idx == VVAR_PAGE) {
1952 unsigned long vaddr = __fix_to_virt(idx);
1953 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1955 #endif
1958 void __init xen_ident_map_ISA(void)
1960 unsigned long pa;
1963 * If we're dom0, then linear map the ISA machine addresses into
1964 * the kernel's address space.
1966 if (!xen_initial_domain())
1967 return;
1969 xen_raw_printk("Xen: setup ISA identity maps\n");
1971 for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
1972 pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
1974 if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
1975 BUG();
1978 xen_flush_tlb();
1981 static void __init xen_post_allocator_init(void)
1983 #ifdef CONFIG_XEN_DEBUG
1984 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
1985 #endif
1986 pv_mmu_ops.set_pte = xen_set_pte;
1987 pv_mmu_ops.set_pmd = xen_set_pmd;
1988 pv_mmu_ops.set_pud = xen_set_pud;
1989 #if PAGETABLE_LEVELS == 4
1990 pv_mmu_ops.set_pgd = xen_set_pgd;
1991 #endif
1993 /* This will work as long as patching hasn't happened yet
1994 (which it hasn't) */
1995 pv_mmu_ops.alloc_pte = xen_alloc_pte;
1996 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1997 pv_mmu_ops.release_pte = xen_release_pte;
1998 pv_mmu_ops.release_pmd = xen_release_pmd;
1999 #if PAGETABLE_LEVELS == 4
2000 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2001 pv_mmu_ops.release_pud = xen_release_pud;
2002 #endif
2004 #ifdef CONFIG_X86_64
2005 SetPagePinned(virt_to_page(level3_user_vsyscall));
2006 #endif
2007 xen_mark_init_mm_pinned();
2010 static void xen_leave_lazy_mmu(void)
2012 preempt_disable();
2013 xen_mc_flush();
2014 paravirt_leave_lazy_mmu();
2015 preempt_enable();
2018 static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2019 .read_cr2 = xen_read_cr2,
2020 .write_cr2 = xen_write_cr2,
2022 .read_cr3 = xen_read_cr3,
2023 #ifdef CONFIG_X86_32
2024 .write_cr3 = xen_write_cr3_init,
2025 #else
2026 .write_cr3 = xen_write_cr3,
2027 #endif
2029 .flush_tlb_user = xen_flush_tlb,
2030 .flush_tlb_kernel = xen_flush_tlb,
2031 .flush_tlb_single = xen_flush_tlb_single,
2032 .flush_tlb_others = xen_flush_tlb_others,
2034 .pte_update = paravirt_nop,
2035 .pte_update_defer = paravirt_nop,
2037 .pgd_alloc = xen_pgd_alloc,
2038 .pgd_free = xen_pgd_free,
2040 .alloc_pte = xen_alloc_pte_init,
2041 .release_pte = xen_release_pte_init,
2042 .alloc_pmd = xen_alloc_pmd_init,
2043 .release_pmd = xen_release_pmd_init,
2045 .set_pte = xen_set_pte_init,
2046 .set_pte_at = xen_set_pte_at,
2047 .set_pmd = xen_set_pmd_hyper,
2049 .ptep_modify_prot_start = __ptep_modify_prot_start,
2050 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2052 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2053 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2055 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2056 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2058 #ifdef CONFIG_X86_PAE
2059 .set_pte_atomic = xen_set_pte_atomic,
2060 .pte_clear = xen_pte_clear,
2061 .pmd_clear = xen_pmd_clear,
2062 #endif /* CONFIG_X86_PAE */
2063 .set_pud = xen_set_pud_hyper,
2065 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2066 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2068 #if PAGETABLE_LEVELS == 4
2069 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2070 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
2071 .set_pgd = xen_set_pgd_hyper,
2073 .alloc_pud = xen_alloc_pmd_init,
2074 .release_pud = xen_release_pmd_init,
2075 #endif /* PAGETABLE_LEVELS == 4 */
2077 .activate_mm = xen_activate_mm,
2078 .dup_mmap = xen_dup_mmap,
2079 .exit_mmap = xen_exit_mmap,
2081 .lazy_mode = {
2082 .enter = paravirt_enter_lazy_mmu,
2083 .leave = xen_leave_lazy_mmu,
2086 .set_fixmap = xen_set_fixmap,
2089 void __init xen_init_mmu_ops(void)
2091 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2092 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2093 pv_mmu_ops = xen_mmu_ops;
2095 memset(dummy_mapping, 0xff, PAGE_SIZE);
2098 /* Protected by xen_reservation_lock. */
2099 #define MAX_CONTIG_ORDER 9 /* 2MB */
2100 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2102 #define VOID_PTE (mfn_pte(0, __pgprot(0)))
2103 static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2104 unsigned long *in_frames,
2105 unsigned long *out_frames)
2107 int i;
2108 struct multicall_space mcs;
2110 xen_mc_batch();
2111 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2112 mcs = __xen_mc_entry(0);
2114 if (in_frames)
2115 in_frames[i] = virt_to_mfn(vaddr);
2117 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2118 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2120 if (out_frames)
2121 out_frames[i] = virt_to_pfn(vaddr);
2123 xen_mc_issue(0);
2127 * Update the pfn-to-mfn mappings for a virtual address range, either to
2128 * point to an array of mfns, or contiguously from a single starting
2129 * mfn.
2131 static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2132 unsigned long *mfns,
2133 unsigned long first_mfn)
2135 unsigned i, limit;
2136 unsigned long mfn;
2138 xen_mc_batch();
2140 limit = 1u << order;
2141 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2142 struct multicall_space mcs;
2143 unsigned flags;
2145 mcs = __xen_mc_entry(0);
2146 if (mfns)
2147 mfn = mfns[i];
2148 else
2149 mfn = first_mfn + i;
2151 if (i < (limit - 1))
2152 flags = 0;
2153 else {
2154 if (order == 0)
2155 flags = UVMF_INVLPG | UVMF_ALL;
2156 else
2157 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2160 MULTI_update_va_mapping(mcs.mc, vaddr,
2161 mfn_pte(mfn, PAGE_KERNEL), flags);
2163 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2166 xen_mc_issue(0);
2170 * Perform the hypercall to exchange a region of our pfns to point to
2171 * memory with the required contiguous alignment. Takes the pfns as
2172 * input, and populates mfns as output.
2174 * Returns a success code indicating whether the hypervisor was able to
2175 * satisfy the request or not.
2177 static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2178 unsigned long *pfns_in,
2179 unsigned long extents_out,
2180 unsigned int order_out,
2181 unsigned long *mfns_out,
2182 unsigned int address_bits)
2184 long rc;
2185 int success;
2187 struct xen_memory_exchange exchange = {
2188 .in = {
2189 .nr_extents = extents_in,
2190 .extent_order = order_in,
2191 .extent_start = pfns_in,
2192 .domid = DOMID_SELF
2194 .out = {
2195 .nr_extents = extents_out,
2196 .extent_order = order_out,
2197 .extent_start = mfns_out,
2198 .address_bits = address_bits,
2199 .domid = DOMID_SELF
2203 BUG_ON(extents_in << order_in != extents_out << order_out);
2205 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2206 success = (exchange.nr_exchanged == extents_in);
2208 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2209 BUG_ON(success && (rc != 0));
2211 return success;
2214 int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2215 unsigned int address_bits)
2217 unsigned long *in_frames = discontig_frames, out_frame;
2218 unsigned long flags;
2219 int success;
2222 * Currently an auto-translated guest will not perform I/O, nor will
2223 * it require PAE page directories below 4GB. Therefore any calls to
2224 * this function are redundant and can be ignored.
2227 if (xen_feature(XENFEAT_auto_translated_physmap))
2228 return 0;
2230 if (unlikely(order > MAX_CONTIG_ORDER))
2231 return -ENOMEM;
2233 memset((void *) vstart, 0, PAGE_SIZE << order);
2235 spin_lock_irqsave(&xen_reservation_lock, flags);
2237 /* 1. Zap current PTEs, remembering MFNs. */
2238 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2240 /* 2. Get a new contiguous memory extent. */
2241 out_frame = virt_to_pfn(vstart);
2242 success = xen_exchange_memory(1UL << order, 0, in_frames,
2243 1, order, &out_frame,
2244 address_bits);
2246 /* 3. Map the new extent in place of old pages. */
2247 if (success)
2248 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2249 else
2250 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2252 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2254 return success ? 0 : -ENOMEM;
2256 EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2258 void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2260 unsigned long *out_frames = discontig_frames, in_frame;
2261 unsigned long flags;
2262 int success;
2264 if (xen_feature(XENFEAT_auto_translated_physmap))
2265 return;
2267 if (unlikely(order > MAX_CONTIG_ORDER))
2268 return;
2270 memset((void *) vstart, 0, PAGE_SIZE << order);
2272 spin_lock_irqsave(&xen_reservation_lock, flags);
2274 /* 1. Find start MFN of contiguous extent. */
2275 in_frame = virt_to_mfn(vstart);
2277 /* 2. Zap current PTEs. */
2278 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2280 /* 3. Do the exchange for non-contiguous MFNs. */
2281 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2282 0, out_frames, 0);
2284 /* 4. Map new pages in place of old pages. */
2285 if (success)
2286 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2287 else
2288 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2290 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2292 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2294 #ifdef CONFIG_XEN_PVHVM
2295 static void xen_hvm_exit_mmap(struct mm_struct *mm)
2297 struct xen_hvm_pagetable_dying a;
2298 int rc;
2300 a.domid = DOMID_SELF;
2301 a.gpa = __pa(mm->pgd);
2302 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2303 WARN_ON_ONCE(rc < 0);
2306 static int is_pagetable_dying_supported(void)
2308 struct xen_hvm_pagetable_dying a;
2309 int rc = 0;
2311 a.domid = DOMID_SELF;
2312 a.gpa = 0x00;
2313 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2314 if (rc < 0) {
2315 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2316 return 0;
2318 return 1;
2321 void __init xen_hvm_init_mmu_ops(void)
2323 if (is_pagetable_dying_supported())
2324 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2326 #endif
2328 #define REMAP_BATCH_SIZE 16
2330 struct remap_data {
2331 unsigned long mfn;
2332 pgprot_t prot;
2333 struct mmu_update *mmu_update;
2336 static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2337 unsigned long addr, void *data)
2339 struct remap_data *rmd = data;
2340 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2342 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
2343 rmd->mmu_update->val = pte_val_ma(pte);
2344 rmd->mmu_update++;
2346 return 0;
2349 int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2350 unsigned long addr,
2351 unsigned long mfn, int nr,
2352 pgprot_t prot, unsigned domid)
2354 struct remap_data rmd;
2355 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2356 int batch;
2357 unsigned long range;
2358 int err = 0;
2360 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2362 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
2363 (VM_PFNMAP | VM_RESERVED | VM_IO)));
2365 rmd.mfn = mfn;
2366 rmd.prot = prot;
2368 while (nr) {
2369 batch = min(REMAP_BATCH_SIZE, nr);
2370 range = (unsigned long)batch << PAGE_SHIFT;
2372 rmd.mmu_update = mmu_update;
2373 err = apply_to_page_range(vma->vm_mm, addr, range,
2374 remap_area_mfn_pte_fn, &rmd);
2375 if (err)
2376 goto out;
2378 err = -EFAULT;
2379 if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
2380 goto out;
2382 nr -= batch;
2383 addr += range;
2386 err = 0;
2387 out:
2389 flush_tlb_all();
2391 return err;
2393 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
2395 #ifdef CONFIG_XEN_DEBUG_FS
2396 static int p2m_dump_open(struct inode *inode, struct file *filp)
2398 return single_open(filp, p2m_dump_show, NULL);
2401 static const struct file_operations p2m_dump_fops = {
2402 .open = p2m_dump_open,
2403 .read = seq_read,
2404 .llseek = seq_lseek,
2405 .release = single_release,
2407 #endif /* CONFIG_XEN_DEBUG_FS */