lguest: assume Switcher text is a single page.
[linux-2.6/btrfs-unstable.git] / drivers / lguest / page_tables.c
blob758466299b0d46895d6f61f22cd8df9489f17941
1 /*P:700
2 * The pagetable code, on the other hand, still shows the scars of
3 * previous encounters. It's functional, and as neat as it can be in the
4 * circumstances, but be wary, for these things are subtle and break easily.
5 * The Guest provides a virtual to physical mapping, but we can neither trust
6 * it nor use it: we verify and convert it here then point the CPU to the
7 * converted Guest pages when running the Guest.
8 :*/
10 /* Copyright (C) Rusty Russell IBM Corporation 2006.
11 * GPL v2 and any later version */
12 #include <linux/mm.h>
13 #include <linux/gfp.h>
14 #include <linux/types.h>
15 #include <linux/spinlock.h>
16 #include <linux/random.h>
17 #include <linux/percpu.h>
18 #include <asm/tlbflush.h>
19 #include <asm/uaccess.h>
20 #include "lg.h"
22 /*M:008
23 * We hold reference to pages, which prevents them from being swapped.
24 * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
25 * to swap out. If we had this, and a shrinker callback to trim PTE pages, we
26 * could probably consider launching Guests as non-root.
27 :*/
29 /*H:300
30 * The Page Table Code
32 * We use two-level page tables for the Guest, or three-level with PAE. If
33 * you're not entirely comfortable with virtual addresses, physical addresses
34 * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page
35 * Table Handling" (with diagrams!).
37 * The Guest keeps page tables, but we maintain the actual ones here: these are
38 * called "shadow" page tables. Which is a very Guest-centric name: these are
39 * the real page tables the CPU uses, although we keep them up to date to
40 * reflect the Guest's. (See what I mean about weird naming? Since when do
41 * shadows reflect anything?)
43 * Anyway, this is the most complicated part of the Host code. There are seven
44 * parts to this:
45 * (i) Looking up a page table entry when the Guest faults,
46 * (ii) Making sure the Guest stack is mapped,
47 * (iii) Setting up a page table entry when the Guest tells us one has changed,
48 * (iv) Switching page tables,
49 * (v) Flushing (throwing away) page tables,
50 * (vi) Mapping the Switcher when the Guest is about to run,
51 * (vii) Setting up the page tables initially.
52 :*/
55 * The Switcher uses the complete top PTE page. That's 1024 PTE entries (4MB)
56 * or 512 PTE entries with PAE (2MB).
58 #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
61 * For PAE we need the PMD index as well. We use the last 2MB, so we
62 * will need the last pmd entry of the last pmd page.
64 #ifdef CONFIG_X86_PAE
65 #define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1)
66 #define CHECK_GPGD_MASK _PAGE_PRESENT
67 #else
68 #define CHECK_GPGD_MASK _PAGE_TABLE
69 #endif
72 * We actually need a separate PTE page for each CPU. Remember that after the
73 * Switcher code itself comes two pages for each CPU, and we don't want this
74 * CPU's guest to see the pages of any other CPU.
76 static DEFINE_PER_CPU(pte_t *, switcher_pte_pages);
77 #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)
79 /*H:320
80 * The page table code is curly enough to need helper functions to keep it
81 * clear and clean. The kernel itself provides many of them; one advantage
82 * of insisting that the Guest and Host use the same CONFIG_PAE setting.
84 * There are two functions which return pointers to the shadow (aka "real")
85 * page tables.
87 * spgd_addr() takes the virtual address and returns a pointer to the top-level
88 * page directory entry (PGD) for that address. Since we keep track of several
89 * page tables, the "i" argument tells us which one we're interested in (it's
90 * usually the current one).
92 static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
94 unsigned int index = pgd_index(vaddr);
96 /* Return a pointer index'th pgd entry for the i'th page table. */
97 return &cpu->lg->pgdirs[i].pgdir[index];
100 #ifdef CONFIG_X86_PAE
102 * This routine then takes the PGD entry given above, which contains the
103 * address of the PMD page. It then returns a pointer to the PMD entry for the
104 * given address.
106 static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
108 unsigned int index = pmd_index(vaddr);
109 pmd_t *page;
111 /* You should never call this if the PGD entry wasn't valid */
112 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
113 page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
115 return &page[index];
117 #endif
120 * This routine then takes the page directory entry returned above, which
121 * contains the address of the page table entry (PTE) page. It then returns a
122 * pointer to the PTE entry for the given address.
124 static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
126 #ifdef CONFIG_X86_PAE
127 pmd_t *pmd = spmd_addr(cpu, spgd, vaddr);
128 pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT);
130 /* You should never call this if the PMD entry wasn't valid */
131 BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
132 #else
133 pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
134 /* You should never call this if the PGD entry wasn't valid */
135 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
136 #endif
138 return &page[pte_index(vaddr)];
142 * These functions are just like the above, except they access the Guest
143 * page tables. Hence they return a Guest address.
145 static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
147 unsigned int index = vaddr >> (PGDIR_SHIFT);
148 return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
151 #ifdef CONFIG_X86_PAE
152 /* Follow the PGD to the PMD. */
153 static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
155 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
156 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
157 return gpage + pmd_index(vaddr) * sizeof(pmd_t);
160 /* Follow the PMD to the PTE. */
161 static unsigned long gpte_addr(struct lg_cpu *cpu,
162 pmd_t gpmd, unsigned long vaddr)
164 unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT;
166 BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT));
167 return gpage + pte_index(vaddr) * sizeof(pte_t);
169 #else
170 /* Follow the PGD to the PTE (no mid-level for !PAE). */
171 static unsigned long gpte_addr(struct lg_cpu *cpu,
172 pgd_t gpgd, unsigned long vaddr)
174 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
176 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
177 return gpage + pte_index(vaddr) * sizeof(pte_t);
179 #endif
180 /*:*/
182 /*M:007
183 * get_pfn is slow: we could probably try to grab batches of pages here as
184 * an optimization (ie. pre-faulting).
187 /*H:350
188 * This routine takes a page number given by the Guest and converts it to
189 * an actual, physical page number. It can fail for several reasons: the
190 * virtual address might not be mapped by the Launcher, the write flag is set
191 * and the page is read-only, or the write flag was set and the page was
192 * shared so had to be copied, but we ran out of memory.
194 * This holds a reference to the page, so release_pte() is careful to put that
195 * back.
197 static unsigned long get_pfn(unsigned long virtpfn, int write)
199 struct page *page;
201 /* gup me one page at this address please! */
202 if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
203 return page_to_pfn(page);
205 /* This value indicates failure. */
206 return -1UL;
209 /*H:340
210 * Converting a Guest page table entry to a shadow (ie. real) page table
211 * entry can be a little tricky. The flags are (almost) the same, but the
212 * Guest PTE contains a virtual page number: the CPU needs the real page
213 * number.
215 static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
217 unsigned long pfn, base, flags;
220 * The Guest sets the global flag, because it thinks that it is using
221 * PGE. We only told it to use PGE so it would tell us whether it was
222 * flushing a kernel mapping or a userspace mapping. We don't actually
223 * use the global bit, so throw it away.
225 flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
227 /* The Guest's pages are offset inside the Launcher. */
228 base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE;
231 * We need a temporary "unsigned long" variable to hold the answer from
232 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
233 * fit in spte.pfn. get_pfn() finds the real physical number of the
234 * page, given the virtual number.
236 pfn = get_pfn(base + pte_pfn(gpte), write);
237 if (pfn == -1UL) {
238 kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte));
240 * When we destroy the Guest, we'll go through the shadow page
241 * tables and release_pte() them. Make sure we don't think
242 * this one is valid!
244 flags = 0;
246 /* Now we assemble our shadow PTE from the page number and flags. */
247 return pfn_pte(pfn, __pgprot(flags));
250 /*H:460 And to complete the chain, release_pte() looks like this: */
251 static void release_pte(pte_t pte)
254 * Remember that get_user_pages_fast() took a reference to the page, in
255 * get_pfn()? We have to put it back now.
257 if (pte_flags(pte) & _PAGE_PRESENT)
258 put_page(pte_page(pte));
260 /*:*/
262 static void check_gpte(struct lg_cpu *cpu, pte_t gpte)
264 if ((pte_flags(gpte) & _PAGE_PSE) ||
265 pte_pfn(gpte) >= cpu->lg->pfn_limit)
266 kill_guest(cpu, "bad page table entry");
269 static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
271 if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
272 (pgd_pfn(gpgd) >= cpu->lg->pfn_limit))
273 kill_guest(cpu, "bad page directory entry");
276 #ifdef CONFIG_X86_PAE
277 static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
279 if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
280 (pmd_pfn(gpmd) >= cpu->lg->pfn_limit))
281 kill_guest(cpu, "bad page middle directory entry");
283 #endif
285 /*H:330
286 * (i) Looking up a page table entry when the Guest faults.
288 * We saw this call in run_guest(): when we see a page fault in the Guest, we
289 * come here. That's because we only set up the shadow page tables lazily as
290 * they're needed, so we get page faults all the time and quietly fix them up
291 * and return to the Guest without it knowing.
293 * If we fixed up the fault (ie. we mapped the address), this routine returns
294 * true. Otherwise, it was a real fault and we need to tell the Guest.
296 bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
298 pgd_t gpgd;
299 pgd_t *spgd;
300 unsigned long gpte_ptr;
301 pte_t gpte;
302 pte_t *spte;
304 /* Mid level for PAE. */
305 #ifdef CONFIG_X86_PAE
306 pmd_t *spmd;
307 pmd_t gpmd;
308 #endif
310 /* We never demand page the Switcher, so trying is a mistake. */
311 if (vaddr >= switcher_addr)
312 return false;
314 /* First step: get the top-level Guest page table entry. */
315 if (unlikely(cpu->linear_pages)) {
316 /* Faking up a linear mapping. */
317 gpgd = __pgd(CHECK_GPGD_MASK);
318 } else {
319 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
320 /* Toplevel not present? We can't map it in. */
321 if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
322 return false;
325 /* Now look at the matching shadow entry. */
326 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
327 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
328 /* No shadow entry: allocate a new shadow PTE page. */
329 unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
331 * This is not really the Guest's fault, but killing it is
332 * simple for this corner case.
334 if (!ptepage) {
335 kill_guest(cpu, "out of memory allocating pte page");
336 return false;
338 /* We check that the Guest pgd is OK. */
339 check_gpgd(cpu, gpgd);
341 * And we copy the flags to the shadow PGD entry. The page
342 * number in the shadow PGD is the page we just allocated.
344 set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd)));
347 #ifdef CONFIG_X86_PAE
348 if (unlikely(cpu->linear_pages)) {
349 /* Faking up a linear mapping. */
350 gpmd = __pmd(_PAGE_TABLE);
351 } else {
352 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
353 /* Middle level not present? We can't map it in. */
354 if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
355 return false;
358 /* Now look at the matching shadow entry. */
359 spmd = spmd_addr(cpu, *spgd, vaddr);
361 if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
362 /* No shadow entry: allocate a new shadow PTE page. */
363 unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
366 * This is not really the Guest's fault, but killing it is
367 * simple for this corner case.
369 if (!ptepage) {
370 kill_guest(cpu, "out of memory allocating pte page");
371 return false;
374 /* We check that the Guest pmd is OK. */
375 check_gpmd(cpu, gpmd);
378 * And we copy the flags to the shadow PMD entry. The page
379 * number in the shadow PMD is the page we just allocated.
381 set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd)));
385 * OK, now we look at the lower level in the Guest page table: keep its
386 * address, because we might update it later.
388 gpte_ptr = gpte_addr(cpu, gpmd, vaddr);
389 #else
391 * OK, now we look at the lower level in the Guest page table: keep its
392 * address, because we might update it later.
394 gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
395 #endif
397 if (unlikely(cpu->linear_pages)) {
398 /* Linear? Make up a PTE which points to same page. */
399 gpte = __pte((vaddr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
400 } else {
401 /* Read the actual PTE value. */
402 gpte = lgread(cpu, gpte_ptr, pte_t);
405 /* If this page isn't in the Guest page tables, we can't page it in. */
406 if (!(pte_flags(gpte) & _PAGE_PRESENT))
407 return false;
410 * Check they're not trying to write to a page the Guest wants
411 * read-only (bit 2 of errcode == write).
413 if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
414 return false;
416 /* User access to a kernel-only page? (bit 3 == user access) */
417 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
418 return false;
421 * Check that the Guest PTE flags are OK, and the page number is below
422 * the pfn_limit (ie. not mapping the Launcher binary).
424 check_gpte(cpu, gpte);
426 /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
427 gpte = pte_mkyoung(gpte);
428 if (errcode & 2)
429 gpte = pte_mkdirty(gpte);
431 /* Get the pointer to the shadow PTE entry we're going to set. */
432 spte = spte_addr(cpu, *spgd, vaddr);
435 * If there was a valid shadow PTE entry here before, we release it.
436 * This can happen with a write to a previously read-only entry.
438 release_pte(*spte);
441 * If this is a write, we insist that the Guest page is writable (the
442 * final arg to gpte_to_spte()).
444 if (pte_dirty(gpte))
445 *spte = gpte_to_spte(cpu, gpte, 1);
446 else
448 * If this is a read, don't set the "writable" bit in the page
449 * table entry, even if the Guest says it's writable. That way
450 * we will come back here when a write does actually occur, so
451 * we can update the Guest's _PAGE_DIRTY flag.
453 set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
456 * Finally, we write the Guest PTE entry back: we've set the
457 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags.
459 if (likely(!cpu->linear_pages))
460 lgwrite(cpu, gpte_ptr, pte_t, gpte);
463 * The fault is fixed, the page table is populated, the mapping
464 * manipulated, the result returned and the code complete. A small
465 * delay and a trace of alliteration are the only indications the Guest
466 * has that a page fault occurred at all.
468 return true;
471 /*H:360
472 * (ii) Making sure the Guest stack is mapped.
474 * Remember that direct traps into the Guest need a mapped Guest kernel stack.
475 * pin_stack_pages() calls us here: we could simply call demand_page(), but as
476 * we've seen that logic is quite long, and usually the stack pages are already
477 * mapped, so it's overkill.
479 * This is a quick version which answers the question: is this virtual address
480 * mapped by the shadow page tables, and is it writable?
482 static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
484 pgd_t *spgd;
485 unsigned long flags;
486 #ifdef CONFIG_X86_PAE
487 pmd_t *spmd;
488 #endif
490 /* You can't put your stack in the Switcher! */
491 if (vaddr >= switcher_addr)
492 return false;
494 /* Look at the current top level entry: is it present? */
495 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
496 if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
497 return false;
499 #ifdef CONFIG_X86_PAE
500 spmd = spmd_addr(cpu, *spgd, vaddr);
501 if (!(pmd_flags(*spmd) & _PAGE_PRESENT))
502 return false;
503 #endif
506 * Check the flags on the pte entry itself: it must be present and
507 * writable.
509 flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr)));
511 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
515 * So, when pin_stack_pages() asks us to pin a page, we check if it's already
516 * in the page tables, and if not, we call demand_page() with error code 2
517 * (meaning "write").
519 void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
521 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2))
522 kill_guest(cpu, "bad stack page %#lx", vaddr);
524 /*:*/
526 #ifdef CONFIG_X86_PAE
527 static void release_pmd(pmd_t *spmd)
529 /* If the entry's not present, there's nothing to release. */
530 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
531 unsigned int i;
532 pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT);
533 /* For each entry in the page, we might need to release it. */
534 for (i = 0; i < PTRS_PER_PTE; i++)
535 release_pte(ptepage[i]);
536 /* Now we can free the page of PTEs */
537 free_page((long)ptepage);
538 /* And zero out the PMD entry so we never release it twice. */
539 set_pmd(spmd, __pmd(0));
543 static void release_pgd(pgd_t *spgd)
545 /* If the entry's not present, there's nothing to release. */
546 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
547 unsigned int i;
548 pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
550 for (i = 0; i < PTRS_PER_PMD; i++)
551 release_pmd(&pmdpage[i]);
553 /* Now we can free the page of PMDs */
554 free_page((long)pmdpage);
555 /* And zero out the PGD entry so we never release it twice. */
556 set_pgd(spgd, __pgd(0));
560 #else /* !CONFIG_X86_PAE */
561 /*H:450
562 * If we chase down the release_pgd() code, the non-PAE version looks like
563 * this. The PAE version is almost identical, but instead of calling
564 * release_pte it calls release_pmd(), which looks much like this.
566 static void release_pgd(pgd_t *spgd)
568 /* If the entry's not present, there's nothing to release. */
569 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
570 unsigned int i;
572 * Converting the pfn to find the actual PTE page is easy: turn
573 * the page number into a physical address, then convert to a
574 * virtual address (easy for kernel pages like this one).
576 pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
577 /* For each entry in the page, we might need to release it. */
578 for (i = 0; i < PTRS_PER_PTE; i++)
579 release_pte(ptepage[i]);
580 /* Now we can free the page of PTEs */
581 free_page((long)ptepage);
582 /* And zero out the PGD entry so we never release it twice. */
583 *spgd = __pgd(0);
586 #endif
588 /*H:445
589 * We saw flush_user_mappings() twice: once from the flush_user_mappings()
590 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
591 * It simply releases every PTE page from 0 up to the Guest's kernel address.
593 static void flush_user_mappings(struct lguest *lg, int idx)
595 unsigned int i;
596 /* Release every pgd entry up to the kernel's address. */
597 for (i = 0; i < pgd_index(lg->kernel_address); i++)
598 release_pgd(lg->pgdirs[idx].pgdir + i);
601 /*H:440
602 * (v) Flushing (throwing away) page tables,
604 * The Guest has a hypercall to throw away the page tables: it's used when a
605 * large number of mappings have been changed.
607 void guest_pagetable_flush_user(struct lg_cpu *cpu)
609 /* Drop the userspace part of the current page table. */
610 flush_user_mappings(cpu->lg, cpu->cpu_pgd);
612 /*:*/
614 /* We walk down the guest page tables to get a guest-physical address */
615 unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
617 pgd_t gpgd;
618 pte_t gpte;
619 #ifdef CONFIG_X86_PAE
620 pmd_t gpmd;
621 #endif
623 /* Still not set up? Just map 1:1. */
624 if (unlikely(cpu->linear_pages))
625 return vaddr;
627 /* First step: get the top-level Guest page table entry. */
628 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
629 /* Toplevel not present? We can't map it in. */
630 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) {
631 kill_guest(cpu, "Bad address %#lx", vaddr);
632 return -1UL;
635 #ifdef CONFIG_X86_PAE
636 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
637 if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
638 kill_guest(cpu, "Bad address %#lx", vaddr);
639 gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
640 #else
641 gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
642 #endif
643 if (!(pte_flags(gpte) & _PAGE_PRESENT))
644 kill_guest(cpu, "Bad address %#lx", vaddr);
646 return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
650 * We keep several page tables. This is a simple routine to find the page
651 * table (if any) corresponding to this top-level address the Guest has given
652 * us.
654 static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
656 unsigned int i;
657 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
658 if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable)
659 break;
660 return i;
663 /*H:435
664 * And this is us, creating the new page directory. If we really do
665 * allocate a new one (and so the kernel parts are not there), we set
666 * blank_pgdir.
668 static unsigned int new_pgdir(struct lg_cpu *cpu,
669 unsigned long gpgdir,
670 int *blank_pgdir)
672 unsigned int next;
673 #ifdef CONFIG_X86_PAE
674 pmd_t *pmd_table;
675 #endif
678 * We pick one entry at random to throw out. Choosing the Least
679 * Recently Used might be better, but this is easy.
681 next = random32() % ARRAY_SIZE(cpu->lg->pgdirs);
682 /* If it's never been allocated at all before, try now. */
683 if (!cpu->lg->pgdirs[next].pgdir) {
684 cpu->lg->pgdirs[next].pgdir =
685 (pgd_t *)get_zeroed_page(GFP_KERNEL);
686 /* If the allocation fails, just keep using the one we have */
687 if (!cpu->lg->pgdirs[next].pgdir)
688 next = cpu->cpu_pgd;
689 else {
690 #ifdef CONFIG_X86_PAE
692 * In PAE mode, allocate a pmd page and populate the
693 * last pgd entry.
695 pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL);
696 if (!pmd_table) {
697 free_page((long)cpu->lg->pgdirs[next].pgdir);
698 set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0));
699 next = cpu->cpu_pgd;
700 } else {
701 set_pgd(cpu->lg->pgdirs[next].pgdir +
702 SWITCHER_PGD_INDEX,
703 __pgd(__pa(pmd_table) | _PAGE_PRESENT));
705 * This is a blank page, so there are no kernel
706 * mappings: caller must map the stack!
708 *blank_pgdir = 1;
710 #else
711 *blank_pgdir = 1;
712 #endif
715 /* Record which Guest toplevel this shadows. */
716 cpu->lg->pgdirs[next].gpgdir = gpgdir;
717 /* Release all the non-kernel mappings. */
718 flush_user_mappings(cpu->lg, next);
720 return next;
723 /*H:470
724 * Finally, a routine which throws away everything: all PGD entries in all
725 * the shadow page tables, including the Guest's kernel mappings. This is used
726 * when we destroy the Guest.
728 static void release_all_pagetables(struct lguest *lg)
730 unsigned int i, j;
732 /* Every shadow pagetable this Guest has */
733 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
734 if (lg->pgdirs[i].pgdir) {
735 #ifdef CONFIG_X86_PAE
736 pgd_t *spgd;
737 pmd_t *pmdpage;
738 unsigned int k;
740 /* Get the last pmd page. */
741 spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX;
742 pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
745 * And release the pmd entries of that pmd page,
746 * except for the switcher pmd.
748 for (k = 0; k < SWITCHER_PMD_INDEX; k++)
749 release_pmd(&pmdpage[k]);
750 #endif
751 /* Every PGD entry except the Switcher at the top */
752 for (j = 0; j < SWITCHER_PGD_INDEX; j++)
753 release_pgd(lg->pgdirs[i].pgdir + j);
758 * We also throw away everything when a Guest tells us it's changed a kernel
759 * mapping. Since kernel mappings are in every page table, it's easiest to
760 * throw them all away. This traps the Guest in amber for a while as
761 * everything faults back in, but it's rare.
763 void guest_pagetable_clear_all(struct lg_cpu *cpu)
765 release_all_pagetables(cpu->lg);
766 /* We need the Guest kernel stack mapped again. */
767 pin_stack_pages(cpu);
770 /*H:430
771 * (iv) Switching page tables
773 * Now we've seen all the page table setting and manipulation, let's see
774 * what happens when the Guest changes page tables (ie. changes the top-level
775 * pgdir). This occurs on almost every context switch.
777 void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
779 int newpgdir, repin = 0;
782 * The very first time they call this, we're actually running without
783 * any page tables; we've been making it up. Throw them away now.
785 if (unlikely(cpu->linear_pages)) {
786 release_all_pagetables(cpu->lg);
787 cpu->linear_pages = false;
788 /* Force allocation of a new pgdir. */
789 newpgdir = ARRAY_SIZE(cpu->lg->pgdirs);
790 } else {
791 /* Look to see if we have this one already. */
792 newpgdir = find_pgdir(cpu->lg, pgtable);
796 * If not, we allocate or mug an existing one: if it's a fresh one,
797 * repin gets set to 1.
799 if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
800 newpgdir = new_pgdir(cpu, pgtable, &repin);
801 /* Change the current pgd index to the new one. */
802 cpu->cpu_pgd = newpgdir;
803 /* If it was completely blank, we map in the Guest kernel stack */
804 if (repin)
805 pin_stack_pages(cpu);
807 /*:*/
809 /*M:009
810 * Since we throw away all mappings when a kernel mapping changes, our
811 * performance sucks for guests using highmem. In fact, a guest with
812 * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
813 * usually slower than a Guest with less memory.
815 * This, of course, cannot be fixed. It would take some kind of... well, I
816 * don't know, but the term "puissant code-fu" comes to mind.
819 /*H:420
820 * This is the routine which actually sets the page table entry for then
821 * "idx"'th shadow page table.
823 * Normally, we can just throw out the old entry and replace it with 0: if they
824 * use it demand_page() will put the new entry in. We need to do this anyway:
825 * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
826 * is read from, and _PAGE_DIRTY when it's written to.
828 * But Avi Kivity pointed out that most Operating Systems (Linux included) set
829 * these bits on PTEs immediately anyway. This is done to save the CPU from
830 * having to update them, but it helps us the same way: if they set
831 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
832 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
834 static void do_set_pte(struct lg_cpu *cpu, int idx,
835 unsigned long vaddr, pte_t gpte)
837 /* Look up the matching shadow page directory entry. */
838 pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
839 #ifdef CONFIG_X86_PAE
840 pmd_t *spmd;
841 #endif
843 /* If the top level isn't present, there's no entry to update. */
844 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
845 #ifdef CONFIG_X86_PAE
846 spmd = spmd_addr(cpu, *spgd, vaddr);
847 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
848 #endif
849 /* Otherwise, start by releasing the existing entry. */
850 pte_t *spte = spte_addr(cpu, *spgd, vaddr);
851 release_pte(*spte);
854 * If they're setting this entry as dirty or accessed,
855 * we might as well put that entry they've given us in
856 * now. This shaves 10% off a copy-on-write
857 * micro-benchmark.
859 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
860 check_gpte(cpu, gpte);
861 set_pte(spte,
862 gpte_to_spte(cpu, gpte,
863 pte_flags(gpte) & _PAGE_DIRTY));
864 } else {
866 * Otherwise kill it and we can demand_page()
867 * it in later.
869 set_pte(spte, __pte(0));
871 #ifdef CONFIG_X86_PAE
873 #endif
877 /*H:410
878 * Updating a PTE entry is a little trickier.
880 * We keep track of several different page tables (the Guest uses one for each
881 * process, so it makes sense to cache at least a few). Each of these have
882 * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
883 * all processes. So when the page table above that address changes, we update
884 * all the page tables, not just the current one. This is rare.
886 * The benefit is that when we have to track a new page table, we can keep all
887 * the kernel mappings. This speeds up context switch immensely.
889 void guest_set_pte(struct lg_cpu *cpu,
890 unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
892 /* We don't let you remap the Switcher; we need it to get back! */
893 if (vaddr >= switcher_addr) {
894 kill_guest(cpu, "attempt to set pte into Switcher pages");
895 return;
899 * Kernel mappings must be changed on all top levels. Slow, but doesn't
900 * happen often.
902 if (vaddr >= cpu->lg->kernel_address) {
903 unsigned int i;
904 for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
905 if (cpu->lg->pgdirs[i].pgdir)
906 do_set_pte(cpu, i, vaddr, gpte);
907 } else {
908 /* Is this page table one we have a shadow for? */
909 int pgdir = find_pgdir(cpu->lg, gpgdir);
910 if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
911 /* If so, do the update. */
912 do_set_pte(cpu, pgdir, vaddr, gpte);
916 /*H:400
917 * (iii) Setting up a page table entry when the Guest tells us one has changed.
919 * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
920 * with the other side of page tables while we're here: what happens when the
921 * Guest asks for a page table to be updated?
923 * We already saw that demand_page() will fill in the shadow page tables when
924 * needed, so we can simply remove shadow page table entries whenever the Guest
925 * tells us they've changed. When the Guest tries to use the new entry it will
926 * fault and demand_page() will fix it up.
928 * So with that in mind here's our code to update a (top-level) PGD entry:
930 void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
932 int pgdir;
934 if (idx >= SWITCHER_PGD_INDEX)
935 return;
937 /* If they're talking about a page table we have a shadow for... */
938 pgdir = find_pgdir(lg, gpgdir);
939 if (pgdir < ARRAY_SIZE(lg->pgdirs))
940 /* ... throw it away. */
941 release_pgd(lg->pgdirs[pgdir].pgdir + idx);
944 #ifdef CONFIG_X86_PAE
945 /* For setting a mid-level, we just throw everything away. It's easy. */
946 void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
948 guest_pagetable_clear_all(&lg->cpus[0]);
950 #endif
952 /*H:500
953 * (vii) Setting up the page tables initially.
955 * When a Guest is first created, set initialize a shadow page table which
956 * we will populate on future faults. The Guest doesn't have any actual
957 * pagetables yet, so we set linear_pages to tell demand_page() to fake it
958 * for the moment.
960 int init_guest_pagetable(struct lguest *lg)
962 struct lg_cpu *cpu = &lg->cpus[0];
963 int allocated = 0;
965 /* lg (and lg->cpus[]) starts zeroed: this allocates a new pgdir */
966 cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated);
967 if (!allocated)
968 return -ENOMEM;
970 /* We start with a linear mapping until the initialize. */
971 cpu->linear_pages = true;
972 return 0;
975 /*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
976 void page_table_guest_data_init(struct lg_cpu *cpu)
979 * We tell the Guest that it can't use the virtual addresses
980 * used by the Switcher. This trick is equivalent to 4GB -
981 * switcher_addr.
983 u32 top = ~switcher_addr + 1;
985 /* We get the kernel address: above this is all kernel memory. */
986 if (get_user(cpu->lg->kernel_address,
987 &cpu->lg->lguest_data->kernel_address)
989 * We tell the Guest that it can't use the top virtual
990 * addresses (used by the Switcher).
992 || put_user(top, &cpu->lg->lguest_data->reserve_mem)) {
993 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
994 return;
998 * In flush_user_mappings() we loop from 0 to
999 * "pgd_index(lg->kernel_address)". This assumes it won't hit the
1000 * Switcher mappings, so check that now.
1002 if (cpu->lg->kernel_address >= switcher_addr)
1003 kill_guest(cpu, "bad kernel address %#lx",
1004 cpu->lg->kernel_address);
1007 /* When a Guest dies, our cleanup is fairly simple. */
1008 void free_guest_pagetable(struct lguest *lg)
1010 unsigned int i;
1012 /* Throw away all page table pages. */
1013 release_all_pagetables(lg);
1014 /* Now free the top levels: free_page() can handle 0 just fine. */
1015 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
1016 free_page((long)lg->pgdirs[i].pgdir);
1019 /*H:480
1020 * (vi) Mapping the Switcher when the Guest is about to run.
1022 * The Switcher and the two pages for this CPU need to be visible in the
1023 * Guest (and not the pages for other CPUs). We have the appropriate PTE pages
1024 * for each CPU already set up, we just need to hook them in now we know which
1025 * Guest is about to run on this CPU.
1027 void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
1029 pte_t *switcher_pte_page = __this_cpu_read(switcher_pte_pages);
1030 pte_t regs_pte;
1032 #ifdef CONFIG_X86_PAE
1033 pmd_t switcher_pmd;
1034 pmd_t *pmd_table;
1036 switcher_pmd = pfn_pmd(__pa(switcher_pte_page) >> PAGE_SHIFT,
1037 PAGE_KERNEL_EXEC);
1039 /* Figure out where the pmd page is, by reading the PGD, and converting
1040 * it to a virtual address. */
1041 pmd_table = __va(pgd_pfn(cpu->lg->
1042 pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX])
1043 << PAGE_SHIFT);
1044 /* Now write it into the shadow page table. */
1045 set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd);
1046 #else
1047 pgd_t switcher_pgd;
1050 * Make the last PGD entry for this Guest point to the Switcher's PTE
1051 * page for this CPU (with appropriate flags).
1053 switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC);
1055 cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
1057 #endif
1059 * We also change the Switcher PTE page. When we're running the Guest,
1060 * we want the Guest's "regs" page to appear where the first Switcher
1061 * page for this CPU is. This is an optimization: when the Switcher
1062 * saves the Guest registers, it saves them into the first page of this
1063 * CPU's "struct lguest_pages": if we make sure the Guest's register
1064 * page is already mapped there, we don't have to copy them out
1065 * again.
1067 regs_pte = pfn_pte(__pa(cpu->regs_page) >> PAGE_SHIFT, PAGE_KERNEL);
1068 set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], regs_pte);
1070 /*:*/
1072 static void free_switcher_pte_pages(void)
1074 unsigned int i;
1076 for_each_possible_cpu(i)
1077 free_page((long)switcher_pte_page(i));
1080 /*H:520
1081 * Setting up the Switcher PTE page for given CPU is fairly easy, given
1082 * the CPU number and the "struct page"s for the Switcher and per-cpu pages.
1084 static __init void populate_switcher_pte_page(unsigned int cpu,
1085 struct page *switcher_pages[])
1087 pte_t *pte = switcher_pte_page(cpu);
1088 int i;
1090 /* The first entries maps the Switcher code. */
1091 set_pte(&pte[0], mk_pte(switcher_pages[0],
1092 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
1094 /* The only other thing we map is this CPU's pair of pages. */
1095 i = 1 + cpu*2;
1097 /* First page (Guest registers) is writable from the Guest */
1098 set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_pages[i]),
1099 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)));
1102 * The second page contains the "struct lguest_ro_state", and is
1103 * read-only.
1105 set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_pages[i+1]),
1106 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
1110 * We've made it through the page table code. Perhaps our tired brains are
1111 * still processing the details, or perhaps we're simply glad it's over.
1113 * If nothing else, note that all this complexity in juggling shadow page tables
1114 * in sync with the Guest's page tables is for one reason: for most Guests this
1115 * page table dance determines how bad performance will be. This is why Xen
1116 * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD
1117 * have implemented shadow page table support directly into hardware.
1119 * There is just one file remaining in the Host.
1122 /*H:510
1123 * At boot or module load time, init_pagetables() allocates and populates
1124 * the Switcher PTE page for each CPU.
1126 __init int init_pagetables(struct page **switcher_pages)
1128 unsigned int i;
1130 for_each_possible_cpu(i) {
1131 switcher_pte_page(i) = (pte_t *)get_zeroed_page(GFP_KERNEL);
1132 if (!switcher_pte_page(i)) {
1133 free_switcher_pte_pages();
1134 return -ENOMEM;
1136 populate_switcher_pte_page(i, switcher_pages);
1138 return 0;
1140 /*:*/
1142 /* Cleaning up simply involves freeing the PTE page for each CPU. */
1143 void free_pagetables(void)
1145 free_switcher_pte_pages();