Merge tag '6.11-rc-smb-client-fixes-part2' of git://git.samba.org/sfrench/cifs-2.6
[linux-stable.git] / mm / debug_vm_pgtable.c
blobe4969fb54da34616639f45303de16aa72ed91522
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This kernel test validates architecture page table helpers and
4 * accessors and helps in verifying their continued compliance with
5 * expected generic MM semantics.
7 * Copyright (C) 2019 ARM Ltd.
9 * Author: Anshuman Khandual <anshuman.khandual@arm.com>
11 #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
13 #include <linux/gfp.h>
14 #include <linux/highmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/kernel.h>
17 #include <linux/kconfig.h>
18 #include <linux/memblock.h>
19 #include <linux/mm.h>
20 #include <linux/mman.h>
21 #include <linux/mm_types.h>
22 #include <linux/module.h>
23 #include <linux/pfn_t.h>
24 #include <linux/printk.h>
25 #include <linux/pgtable.h>
26 #include <linux/random.h>
27 #include <linux/spinlock.h>
28 #include <linux/swap.h>
29 #include <linux/swapops.h>
30 #include <linux/start_kernel.h>
31 #include <linux/sched/mm.h>
32 #include <linux/io.h>
33 #include <linux/vmalloc.h>
35 #include <asm/cacheflush.h>
36 #include <asm/pgalloc.h>
37 #include <asm/tlbflush.h>
40 * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics
41 * expectations that are being validated here. All future changes in here
42 * or the documentation need to be in sync.
44 #define RANDOM_NZVALUE GENMASK(7, 0)
46 struct pgtable_debug_args {
47 struct mm_struct *mm;
48 struct vm_area_struct *vma;
50 pgd_t *pgdp;
51 p4d_t *p4dp;
52 pud_t *pudp;
53 pmd_t *pmdp;
54 pte_t *ptep;
56 p4d_t *start_p4dp;
57 pud_t *start_pudp;
58 pmd_t *start_pmdp;
59 pgtable_t start_ptep;
61 unsigned long vaddr;
62 pgprot_t page_prot;
63 pgprot_t page_prot_none;
65 bool is_contiguous_page;
66 unsigned long pud_pfn;
67 unsigned long pmd_pfn;
68 unsigned long pte_pfn;
70 unsigned long fixed_alignment;
71 unsigned long fixed_pgd_pfn;
72 unsigned long fixed_p4d_pfn;
73 unsigned long fixed_pud_pfn;
74 unsigned long fixed_pmd_pfn;
75 unsigned long fixed_pte_pfn;
78 static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx)
80 pgprot_t prot = vm_get_page_prot(idx);
81 pte_t pte = pfn_pte(args->fixed_pte_pfn, prot);
82 unsigned long val = idx, *ptr = &val;
84 pr_debug("Validating PTE basic (%pGv)\n", ptr);
87 * This test needs to be executed after the given page table entry
88 * is created with pfn_pte() to make sure that vm_get_page_prot(idx)
89 * does not have the dirty bit enabled from the beginning. This is
90 * important for platforms like arm64 where (!PTE_RDONLY) indicate
91 * dirty bit being set.
93 WARN_ON(pte_dirty(pte_wrprotect(pte)));
95 WARN_ON(!pte_same(pte, pte));
96 WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
97 WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
98 WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte), args->vma)));
99 WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
100 WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
101 WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte, args->vma))));
102 WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
103 WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
106 static void __init pte_advanced_tests(struct pgtable_debug_args *args)
108 struct page *page;
109 pte_t pte;
112 * Architectures optimize set_pte_at by avoiding TLB flush.
113 * This requires set_pte_at to be not used to update an
114 * existing pte entry. Clear pte before we do set_pte_at
116 * flush_dcache_page() is called after set_pte_at() to clear
117 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
118 * when it's released and page allocation check will fail when
119 * the page is allocated again. For architectures other than ARM64,
120 * the unexpected overhead of cache flushing is acceptable.
122 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
123 if (!page)
124 return;
126 pr_debug("Validating PTE advanced\n");
127 if (WARN_ON(!args->ptep))
128 return;
130 pte = pfn_pte(args->pte_pfn, args->page_prot);
131 set_pte_at(args->mm, args->vaddr, args->ptep, pte);
132 flush_dcache_page(page);
133 ptep_set_wrprotect(args->mm, args->vaddr, args->ptep);
134 pte = ptep_get(args->ptep);
135 WARN_ON(pte_write(pte));
136 ptep_get_and_clear(args->mm, args->vaddr, args->ptep);
137 pte = ptep_get(args->ptep);
138 WARN_ON(!pte_none(pte));
140 pte = pfn_pte(args->pte_pfn, args->page_prot);
141 pte = pte_wrprotect(pte);
142 pte = pte_mkclean(pte);
143 set_pte_at(args->mm, args->vaddr, args->ptep, pte);
144 flush_dcache_page(page);
145 pte = pte_mkwrite(pte, args->vma);
146 pte = pte_mkdirty(pte);
147 ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1);
148 pte = ptep_get(args->ptep);
149 WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
150 ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
151 pte = ptep_get(args->ptep);
152 WARN_ON(!pte_none(pte));
154 pte = pfn_pte(args->pte_pfn, args->page_prot);
155 pte = pte_mkyoung(pte);
156 set_pte_at(args->mm, args->vaddr, args->ptep, pte);
157 flush_dcache_page(page);
158 ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep);
159 pte = ptep_get(args->ptep);
160 WARN_ON(pte_young(pte));
162 ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
165 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
166 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx)
168 pgprot_t prot = vm_get_page_prot(idx);
169 unsigned long val = idx, *ptr = &val;
170 pmd_t pmd;
172 if (!has_transparent_hugepage())
173 return;
175 pr_debug("Validating PMD basic (%pGv)\n", ptr);
176 pmd = pfn_pmd(args->fixed_pmd_pfn, prot);
179 * This test needs to be executed after the given page table entry
180 * is created with pfn_pmd() to make sure that vm_get_page_prot(idx)
181 * does not have the dirty bit enabled from the beginning. This is
182 * important for platforms like arm64 where (!PTE_RDONLY) indicate
183 * dirty bit being set.
185 WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
188 WARN_ON(!pmd_same(pmd, pmd));
189 WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
190 WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
191 WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd), args->vma)));
192 WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
193 WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
194 WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd, args->vma))));
195 WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
196 WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
198 * A huge page does not point to next level page table
199 * entry. Hence this must qualify as pmd_bad().
201 WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
204 static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
206 struct page *page;
207 pmd_t pmd;
208 unsigned long vaddr = args->vaddr;
210 if (!has_transparent_hugepage())
211 return;
213 page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL;
214 if (!page)
215 return;
218 * flush_dcache_page() is called after set_pmd_at() to clear
219 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
220 * when it's released and page allocation check will fail when
221 * the page is allocated again. For architectures other than ARM64,
222 * the unexpected overhead of cache flushing is acceptable.
224 pr_debug("Validating PMD advanced\n");
225 /* Align the address wrt HPAGE_PMD_SIZE */
226 vaddr &= HPAGE_PMD_MASK;
228 pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep);
230 pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
231 set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
232 flush_dcache_page(page);
233 pmdp_set_wrprotect(args->mm, vaddr, args->pmdp);
234 pmd = READ_ONCE(*args->pmdp);
235 WARN_ON(pmd_write(pmd));
236 pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
237 pmd = READ_ONCE(*args->pmdp);
238 WARN_ON(!pmd_none(pmd));
240 pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
241 pmd = pmd_wrprotect(pmd);
242 pmd = pmd_mkclean(pmd);
243 set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
244 flush_dcache_page(page);
245 pmd = pmd_mkwrite(pmd, args->vma);
246 pmd = pmd_mkdirty(pmd);
247 pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1);
248 pmd = READ_ONCE(*args->pmdp);
249 WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
250 pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1);
251 pmd = READ_ONCE(*args->pmdp);
252 WARN_ON(!pmd_none(pmd));
254 pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot));
255 pmd = pmd_mkyoung(pmd);
256 set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
257 flush_dcache_page(page);
258 pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp);
259 pmd = READ_ONCE(*args->pmdp);
260 WARN_ON(pmd_young(pmd));
262 /* Clear the pte entries */
263 pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
264 pgtable_trans_huge_withdraw(args->mm, args->pmdp);
267 static void __init pmd_leaf_tests(struct pgtable_debug_args *args)
269 pmd_t pmd;
271 if (!has_transparent_hugepage())
272 return;
274 pr_debug("Validating PMD leaf\n");
275 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
278 * PMD based THP is a leaf entry.
280 pmd = pmd_mkhuge(pmd);
281 WARN_ON(!pmd_leaf(pmd));
284 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
285 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx)
287 pgprot_t prot = vm_get_page_prot(idx);
288 unsigned long val = idx, *ptr = &val;
289 pud_t pud;
291 if (!has_transparent_pud_hugepage())
292 return;
294 pr_debug("Validating PUD basic (%pGv)\n", ptr);
295 pud = pfn_pud(args->fixed_pud_pfn, prot);
298 * This test needs to be executed after the given page table entry
299 * is created with pfn_pud() to make sure that vm_get_page_prot(idx)
300 * does not have the dirty bit enabled from the beginning. This is
301 * important for platforms like arm64 where (!PTE_RDONLY) indicate
302 * dirty bit being set.
304 WARN_ON(pud_dirty(pud_wrprotect(pud)));
306 WARN_ON(!pud_same(pud, pud));
307 WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
308 WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
309 WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
310 WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
311 WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
312 WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
313 WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
314 WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
316 if (mm_pmd_folded(args->mm))
317 return;
320 * A huge page does not point to next level page table
321 * entry. Hence this must qualify as pud_bad().
323 WARN_ON(!pud_bad(pud_mkhuge(pud)));
326 static void __init pud_advanced_tests(struct pgtable_debug_args *args)
328 struct page *page;
329 unsigned long vaddr = args->vaddr;
330 pud_t pud;
332 if (!has_transparent_pud_hugepage())
333 return;
335 page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL;
336 if (!page)
337 return;
340 * flush_dcache_page() is called after set_pud_at() to clear
341 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
342 * when it's released and page allocation check will fail when
343 * the page is allocated again. For architectures other than ARM64,
344 * the unexpected overhead of cache flushing is acceptable.
346 pr_debug("Validating PUD advanced\n");
347 /* Align the address wrt HPAGE_PUD_SIZE */
348 vaddr &= HPAGE_PUD_MASK;
350 pud = pfn_pud(args->pud_pfn, args->page_prot);
352 * Some architectures have debug checks to make sure
353 * huge pud mapping are only found with devmap entries
354 * For now test with only devmap entries.
356 pud = pud_mkdevmap(pud);
357 set_pud_at(args->mm, vaddr, args->pudp, pud);
358 flush_dcache_page(page);
359 pudp_set_wrprotect(args->mm, vaddr, args->pudp);
360 pud = READ_ONCE(*args->pudp);
361 WARN_ON(pud_write(pud));
363 #ifndef __PAGETABLE_PMD_FOLDED
364 pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
365 pud = READ_ONCE(*args->pudp);
366 WARN_ON(!pud_none(pud));
367 #endif /* __PAGETABLE_PMD_FOLDED */
368 pud = pfn_pud(args->pud_pfn, args->page_prot);
369 pud = pud_mkdevmap(pud);
370 pud = pud_wrprotect(pud);
371 pud = pud_mkclean(pud);
372 set_pud_at(args->mm, vaddr, args->pudp, pud);
373 flush_dcache_page(page);
374 pud = pud_mkwrite(pud);
375 pud = pud_mkdirty(pud);
376 pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1);
377 pud = READ_ONCE(*args->pudp);
378 WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
380 #ifndef __PAGETABLE_PMD_FOLDED
381 pudp_huge_get_and_clear_full(args->vma, vaddr, args->pudp, 1);
382 pud = READ_ONCE(*args->pudp);
383 WARN_ON(!pud_none(pud));
384 #endif /* __PAGETABLE_PMD_FOLDED */
386 pud = pfn_pud(args->pud_pfn, args->page_prot);
387 pud = pud_mkdevmap(pud);
388 pud = pud_mkyoung(pud);
389 set_pud_at(args->mm, vaddr, args->pudp, pud);
390 flush_dcache_page(page);
391 pudp_test_and_clear_young(args->vma, vaddr, args->pudp);
392 pud = READ_ONCE(*args->pudp);
393 WARN_ON(pud_young(pud));
395 pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
398 static void __init pud_leaf_tests(struct pgtable_debug_args *args)
400 pud_t pud;
402 if (!has_transparent_pud_hugepage())
403 return;
405 pr_debug("Validating PUD leaf\n");
406 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
408 * PUD based THP is a leaf entry.
410 pud = pud_mkhuge(pud);
411 WARN_ON(!pud_leaf(pud));
413 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
414 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
415 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
416 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
417 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
418 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
419 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { }
420 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
421 static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { }
422 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
423 static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { }
424 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
425 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
427 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
428 static void __init pmd_huge_tests(struct pgtable_debug_args *args)
430 pmd_t pmd;
432 if (!arch_vmap_pmd_supported(args->page_prot) ||
433 args->fixed_alignment < PMD_SIZE)
434 return;
436 pr_debug("Validating PMD huge\n");
438 * X86 defined pmd_set_huge() verifies that the given
439 * PMD is not a populated non-leaf entry.
441 WRITE_ONCE(*args->pmdp, __pmd(0));
442 WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot));
443 WARN_ON(!pmd_clear_huge(args->pmdp));
444 pmd = READ_ONCE(*args->pmdp);
445 WARN_ON(!pmd_none(pmd));
448 static void __init pud_huge_tests(struct pgtable_debug_args *args)
450 pud_t pud;
452 if (!arch_vmap_pud_supported(args->page_prot) ||
453 args->fixed_alignment < PUD_SIZE)
454 return;
456 pr_debug("Validating PUD huge\n");
458 * X86 defined pud_set_huge() verifies that the given
459 * PUD is not a populated non-leaf entry.
461 WRITE_ONCE(*args->pudp, __pud(0));
462 WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot));
463 WARN_ON(!pud_clear_huge(args->pudp));
464 pud = READ_ONCE(*args->pudp);
465 WARN_ON(!pud_none(pud));
467 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
468 static void __init pmd_huge_tests(struct pgtable_debug_args *args) { }
469 static void __init pud_huge_tests(struct pgtable_debug_args *args) { }
470 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
472 static void __init p4d_basic_tests(struct pgtable_debug_args *args)
474 p4d_t p4d;
476 pr_debug("Validating P4D basic\n");
477 memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
478 WARN_ON(!p4d_same(p4d, p4d));
481 static void __init pgd_basic_tests(struct pgtable_debug_args *args)
483 pgd_t pgd;
485 pr_debug("Validating PGD basic\n");
486 memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
487 WARN_ON(!pgd_same(pgd, pgd));
490 #ifndef __PAGETABLE_PUD_FOLDED
491 static void __init pud_clear_tests(struct pgtable_debug_args *args)
493 pud_t pud = READ_ONCE(*args->pudp);
495 if (mm_pmd_folded(args->mm))
496 return;
498 pr_debug("Validating PUD clear\n");
499 WARN_ON(pud_none(pud));
500 pud_clear(args->pudp);
501 pud = READ_ONCE(*args->pudp);
502 WARN_ON(!pud_none(pud));
505 static void __init pud_populate_tests(struct pgtable_debug_args *args)
507 pud_t pud;
509 if (mm_pmd_folded(args->mm))
510 return;
512 pr_debug("Validating PUD populate\n");
514 * This entry points to next level page table page.
515 * Hence this must not qualify as pud_bad().
517 pud_populate(args->mm, args->pudp, args->start_pmdp);
518 pud = READ_ONCE(*args->pudp);
519 WARN_ON(pud_bad(pud));
521 #else /* !__PAGETABLE_PUD_FOLDED */
522 static void __init pud_clear_tests(struct pgtable_debug_args *args) { }
523 static void __init pud_populate_tests(struct pgtable_debug_args *args) { }
524 #endif /* PAGETABLE_PUD_FOLDED */
526 #ifndef __PAGETABLE_P4D_FOLDED
527 static void __init p4d_clear_tests(struct pgtable_debug_args *args)
529 p4d_t p4d = READ_ONCE(*args->p4dp);
531 if (mm_pud_folded(args->mm))
532 return;
534 pr_debug("Validating P4D clear\n");
535 WARN_ON(p4d_none(p4d));
536 p4d_clear(args->p4dp);
537 p4d = READ_ONCE(*args->p4dp);
538 WARN_ON(!p4d_none(p4d));
541 static void __init p4d_populate_tests(struct pgtable_debug_args *args)
543 p4d_t p4d;
545 if (mm_pud_folded(args->mm))
546 return;
548 pr_debug("Validating P4D populate\n");
550 * This entry points to next level page table page.
551 * Hence this must not qualify as p4d_bad().
553 pud_clear(args->pudp);
554 p4d_clear(args->p4dp);
555 p4d_populate(args->mm, args->p4dp, args->start_pudp);
556 p4d = READ_ONCE(*args->p4dp);
557 WARN_ON(p4d_bad(p4d));
560 static void __init pgd_clear_tests(struct pgtable_debug_args *args)
562 pgd_t pgd = READ_ONCE(*(args->pgdp));
564 if (mm_p4d_folded(args->mm))
565 return;
567 pr_debug("Validating PGD clear\n");
568 WARN_ON(pgd_none(pgd));
569 pgd_clear(args->pgdp);
570 pgd = READ_ONCE(*args->pgdp);
571 WARN_ON(!pgd_none(pgd));
574 static void __init pgd_populate_tests(struct pgtable_debug_args *args)
576 pgd_t pgd;
578 if (mm_p4d_folded(args->mm))
579 return;
581 pr_debug("Validating PGD populate\n");
583 * This entry points to next level page table page.
584 * Hence this must not qualify as pgd_bad().
586 p4d_clear(args->p4dp);
587 pgd_clear(args->pgdp);
588 pgd_populate(args->mm, args->pgdp, args->start_p4dp);
589 pgd = READ_ONCE(*args->pgdp);
590 WARN_ON(pgd_bad(pgd));
592 #else /* !__PAGETABLE_P4D_FOLDED */
593 static void __init p4d_clear_tests(struct pgtable_debug_args *args) { }
594 static void __init pgd_clear_tests(struct pgtable_debug_args *args) { }
595 static void __init p4d_populate_tests(struct pgtable_debug_args *args) { }
596 static void __init pgd_populate_tests(struct pgtable_debug_args *args) { }
597 #endif /* PAGETABLE_P4D_FOLDED */
599 static void __init pte_clear_tests(struct pgtable_debug_args *args)
601 struct page *page;
602 pte_t pte = pfn_pte(args->pte_pfn, args->page_prot);
604 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
605 if (!page)
606 return;
609 * flush_dcache_page() is called after set_pte_at() to clear
610 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
611 * when it's released and page allocation check will fail when
612 * the page is allocated again. For architectures other than ARM64,
613 * the unexpected overhead of cache flushing is acceptable.
615 pr_debug("Validating PTE clear\n");
616 if (WARN_ON(!args->ptep))
617 return;
619 set_pte_at(args->mm, args->vaddr, args->ptep, pte);
620 WARN_ON(pte_none(pte));
621 flush_dcache_page(page);
622 barrier();
623 ptep_clear(args->mm, args->vaddr, args->ptep);
624 pte = ptep_get(args->ptep);
625 WARN_ON(!pte_none(pte));
628 static void __init pmd_clear_tests(struct pgtable_debug_args *args)
630 pmd_t pmd = READ_ONCE(*args->pmdp);
632 pr_debug("Validating PMD clear\n");
633 WARN_ON(pmd_none(pmd));
634 pmd_clear(args->pmdp);
635 pmd = READ_ONCE(*args->pmdp);
636 WARN_ON(!pmd_none(pmd));
639 static void __init pmd_populate_tests(struct pgtable_debug_args *args)
641 pmd_t pmd;
643 pr_debug("Validating PMD populate\n");
645 * This entry points to next level page table page.
646 * Hence this must not qualify as pmd_bad().
648 pmd_populate(args->mm, args->pmdp, args->start_ptep);
649 pmd = READ_ONCE(*args->pmdp);
650 WARN_ON(pmd_bad(pmd));
653 static void __init pte_special_tests(struct pgtable_debug_args *args)
655 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
657 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
658 return;
660 pr_debug("Validating PTE special\n");
661 WARN_ON(!pte_special(pte_mkspecial(pte)));
664 static void __init pte_protnone_tests(struct pgtable_debug_args *args)
666 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
668 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
669 return;
671 pr_debug("Validating PTE protnone\n");
672 WARN_ON(!pte_protnone(pte));
673 WARN_ON(!pte_present(pte));
676 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
677 static void __init pmd_protnone_tests(struct pgtable_debug_args *args)
679 pmd_t pmd;
681 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
682 return;
684 if (!has_transparent_hugepage())
685 return;
687 pr_debug("Validating PMD protnone\n");
688 pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none));
689 WARN_ON(!pmd_protnone(pmd));
690 WARN_ON(!pmd_present(pmd));
692 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
693 static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { }
694 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
696 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
697 static void __init pte_devmap_tests(struct pgtable_debug_args *args)
699 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
701 pr_debug("Validating PTE devmap\n");
702 WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
705 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
706 static void __init pmd_devmap_tests(struct pgtable_debug_args *args)
708 pmd_t pmd;
710 if (!has_transparent_hugepage())
711 return;
713 pr_debug("Validating PMD devmap\n");
714 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
715 WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
718 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
719 static void __init pud_devmap_tests(struct pgtable_debug_args *args)
721 pud_t pud;
723 if (!has_transparent_pud_hugepage())
724 return;
726 pr_debug("Validating PUD devmap\n");
727 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
728 WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
730 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
731 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
732 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
733 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
734 static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
735 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
736 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
737 #else
738 static void __init pte_devmap_tests(struct pgtable_debug_args *args) { }
739 static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
740 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
741 #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
743 static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args)
745 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
747 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
748 return;
750 pr_debug("Validating PTE soft dirty\n");
751 WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
752 WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
755 static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args)
757 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
759 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
760 return;
762 pr_debug("Validating PTE swap soft dirty\n");
763 WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
764 WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
767 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
768 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args)
770 pmd_t pmd;
772 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
773 return;
775 if (!has_transparent_hugepage())
776 return;
778 pr_debug("Validating PMD soft dirty\n");
779 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
780 WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
781 WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
784 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args)
786 pmd_t pmd;
788 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
789 !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
790 return;
792 if (!has_transparent_hugepage())
793 return;
795 pr_debug("Validating PMD swap soft dirty\n");
796 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
797 WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
798 WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
800 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
801 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { }
802 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { }
803 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
805 static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args)
807 unsigned long max_swap_offset;
808 swp_entry_t entry, entry2;
809 pte_t pte;
811 pr_debug("Validating PTE swap exclusive\n");
813 /* See generic_max_swapfile_size(): probe the maximum offset */
814 max_swap_offset = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0, ~0UL))));
816 /* Create a swp entry with all possible bits set */
817 entry = swp_entry((1 << MAX_SWAPFILES_SHIFT) - 1, max_swap_offset);
819 pte = swp_entry_to_pte(entry);
820 WARN_ON(pte_swp_exclusive(pte));
821 WARN_ON(!is_swap_pte(pte));
822 entry2 = pte_to_swp_entry(pte);
823 WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
825 pte = pte_swp_mkexclusive(pte);
826 WARN_ON(!pte_swp_exclusive(pte));
827 WARN_ON(!is_swap_pte(pte));
828 WARN_ON(pte_swp_soft_dirty(pte));
829 entry2 = pte_to_swp_entry(pte);
830 WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
832 pte = pte_swp_clear_exclusive(pte);
833 WARN_ON(pte_swp_exclusive(pte));
834 WARN_ON(!is_swap_pte(pte));
835 entry2 = pte_to_swp_entry(pte);
836 WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
839 static void __init pte_swap_tests(struct pgtable_debug_args *args)
841 swp_entry_t swp;
842 pte_t pte;
844 pr_debug("Validating PTE swap\n");
845 pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
846 swp = __pte_to_swp_entry(pte);
847 pte = __swp_entry_to_pte(swp);
848 WARN_ON(args->fixed_pte_pfn != pte_pfn(pte));
851 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
852 static void __init pmd_swap_tests(struct pgtable_debug_args *args)
854 swp_entry_t swp;
855 pmd_t pmd;
857 if (!has_transparent_hugepage())
858 return;
860 pr_debug("Validating PMD swap\n");
861 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
862 swp = __pmd_to_swp_entry(pmd);
863 pmd = __swp_entry_to_pmd(swp);
864 WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd));
866 #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
867 static void __init pmd_swap_tests(struct pgtable_debug_args *args) { }
868 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
870 static void __init swap_migration_tests(struct pgtable_debug_args *args)
872 struct page *page;
873 swp_entry_t swp;
875 if (!IS_ENABLED(CONFIG_MIGRATION))
876 return;
879 * swap_migration_tests() requires a dedicated page as it needs to
880 * be locked before creating a migration entry from it. Locking the
881 * page that actually maps kernel text ('start_kernel') can be real
882 * problematic. Lets use the allocated page explicitly for this
883 * purpose.
885 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
886 if (!page)
887 return;
889 pr_debug("Validating swap migration\n");
892 * make_[readable|writable]_migration_entry() expects given page to
893 * be locked, otherwise it stumbles upon a BUG_ON().
895 __SetPageLocked(page);
896 swp = make_writable_migration_entry(page_to_pfn(page));
897 WARN_ON(!is_migration_entry(swp));
898 WARN_ON(!is_writable_migration_entry(swp));
900 swp = make_readable_migration_entry(swp_offset(swp));
901 WARN_ON(!is_migration_entry(swp));
902 WARN_ON(is_writable_migration_entry(swp));
904 swp = make_readable_migration_entry(page_to_pfn(page));
905 WARN_ON(!is_migration_entry(swp));
906 WARN_ON(is_writable_migration_entry(swp));
907 __ClearPageLocked(page);
910 #ifdef CONFIG_HUGETLB_PAGE
911 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args)
913 struct page *page;
914 pte_t pte;
916 pr_debug("Validating HugeTLB basic\n");
918 * Accessing the page associated with the pfn is safe here,
919 * as it was previously derived from a real kernel symbol.
921 page = pfn_to_page(args->fixed_pmd_pfn);
922 pte = mk_huge_pte(page, args->page_prot);
924 WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
925 WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
926 WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
928 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
929 pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
931 WARN_ON(!pte_huge(arch_make_huge_pte(pte, PMD_SHIFT, VM_ACCESS_FLAGS)));
932 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
934 #else /* !CONFIG_HUGETLB_PAGE */
935 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { }
936 #endif /* CONFIG_HUGETLB_PAGE */
938 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
939 static void __init pmd_thp_tests(struct pgtable_debug_args *args)
941 pmd_t pmd;
943 if (!has_transparent_hugepage())
944 return;
946 pr_debug("Validating PMD based THP\n");
948 * pmd_trans_huge() and pmd_present() must return positive after
949 * MMU invalidation with pmd_mkinvalid(). This behavior is an
950 * optimization for transparent huge page. pmd_trans_huge() must
951 * be true if pmd_page() returns a valid THP to avoid taking the
952 * pmd_lock when others walk over non transhuge pmds (i.e. there
953 * are no THP allocated). Especially when splitting a THP and
954 * removing the present bit from the pmd, pmd_trans_huge() still
955 * needs to return true. pmd_present() should be true whenever
956 * pmd_trans_huge() returns true.
958 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
959 WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
961 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
962 WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
963 WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
964 WARN_ON(!pmd_leaf(pmd_mkinvalid(pmd_mkhuge(pmd))));
965 #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
968 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
969 static void __init pud_thp_tests(struct pgtable_debug_args *args)
971 pud_t pud;
973 if (!has_transparent_pud_hugepage())
974 return;
976 pr_debug("Validating PUD based THP\n");
977 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
978 WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
981 * pud_mkinvalid() has been dropped for now. Enable back
982 * these tests when it comes back with a modified pud_present().
984 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
985 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
988 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
989 static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
990 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
991 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
992 static void __init pmd_thp_tests(struct pgtable_debug_args *args) { }
993 static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
994 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
996 static unsigned long __init get_random_vaddr(void)
998 unsigned long random_vaddr, random_pages, total_user_pages;
1000 total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
1002 random_pages = get_random_long() % total_user_pages;
1003 random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
1005 return random_vaddr;
1008 static void __init destroy_args(struct pgtable_debug_args *args)
1010 struct page *page = NULL;
1012 /* Free (huge) page */
1013 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1014 has_transparent_pud_hugepage() &&
1015 args->pud_pfn != ULONG_MAX) {
1016 if (args->is_contiguous_page) {
1017 free_contig_range(args->pud_pfn,
1018 (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT)));
1019 } else {
1020 page = pfn_to_page(args->pud_pfn);
1021 __free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT);
1024 args->pud_pfn = ULONG_MAX;
1025 args->pmd_pfn = ULONG_MAX;
1026 args->pte_pfn = ULONG_MAX;
1029 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1030 has_transparent_hugepage() &&
1031 args->pmd_pfn != ULONG_MAX) {
1032 if (args->is_contiguous_page) {
1033 free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER));
1034 } else {
1035 page = pfn_to_page(args->pmd_pfn);
1036 __free_pages(page, HPAGE_PMD_ORDER);
1039 args->pmd_pfn = ULONG_MAX;
1040 args->pte_pfn = ULONG_MAX;
1043 if (args->pte_pfn != ULONG_MAX) {
1044 page = pfn_to_page(args->pte_pfn);
1045 __free_page(page);
1047 args->pte_pfn = ULONG_MAX;
1050 /* Free page table entries */
1051 if (args->start_ptep) {
1052 pte_free(args->mm, args->start_ptep);
1053 mm_dec_nr_ptes(args->mm);
1056 if (args->start_pmdp) {
1057 pmd_free(args->mm, args->start_pmdp);
1058 mm_dec_nr_pmds(args->mm);
1061 if (args->start_pudp) {
1062 pud_free(args->mm, args->start_pudp);
1063 mm_dec_nr_puds(args->mm);
1066 if (args->start_p4dp)
1067 p4d_free(args->mm, args->start_p4dp);
1069 /* Free vma and mm struct */
1070 if (args->vma)
1071 vm_area_free(args->vma);
1073 if (args->mm)
1074 mmdrop(args->mm);
1077 static struct page * __init
1078 debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
1080 struct page *page = NULL;
1082 #ifdef CONFIG_CONTIG_ALLOC
1083 if (order > MAX_PAGE_ORDER) {
1084 page = alloc_contig_pages((1 << order), GFP_KERNEL,
1085 first_online_node, NULL);
1086 if (page) {
1087 args->is_contiguous_page = true;
1088 return page;
1091 #endif
1093 if (order <= MAX_PAGE_ORDER)
1094 page = alloc_pages(GFP_KERNEL, order);
1096 return page;
1100 * Check if a physical memory range described by <pstart, pend> contains
1101 * an area that is of size psize, and aligned to psize.
1103 * Don't use address 0, an all-zeroes physical address might mask bugs, and
1104 * it's not used on x86.
1106 static void __init phys_align_check(phys_addr_t pstart,
1107 phys_addr_t pend, unsigned long psize,
1108 phys_addr_t *physp, unsigned long *alignp)
1110 phys_addr_t aligned_start, aligned_end;
1112 if (pstart == 0)
1113 pstart = PAGE_SIZE;
1115 aligned_start = ALIGN(pstart, psize);
1116 aligned_end = aligned_start + psize;
1118 if (aligned_end > aligned_start && aligned_end <= pend) {
1119 *alignp = psize;
1120 *physp = aligned_start;
1124 static void __init init_fixed_pfns(struct pgtable_debug_args *args)
1126 u64 idx;
1127 phys_addr_t phys, pstart, pend;
1130 * Initialize the fixed pfns. To do this, try to find a
1131 * valid physical range, preferably aligned to PUD_SIZE,
1132 * but settling for aligned to PMD_SIZE as a fallback. If
1133 * neither of those is found, use the physical address of
1134 * the start_kernel symbol.
1136 * The memory doesn't need to be allocated, it just needs to exist
1137 * as usable memory. It won't be touched.
1139 * The alignment is recorded, and can be checked to see if we
1140 * can run the tests that require an actual valid physical
1141 * address range on some architectures ({pmd,pud}_huge_test
1142 * on x86).
1145 phys = __pa_symbol(&start_kernel);
1146 args->fixed_alignment = PAGE_SIZE;
1148 for_each_mem_range(idx, &pstart, &pend) {
1149 /* First check for a PUD-aligned area */
1150 phys_align_check(pstart, pend, PUD_SIZE, &phys,
1151 &args->fixed_alignment);
1153 /* If a PUD-aligned area is found, we're done */
1154 if (args->fixed_alignment == PUD_SIZE)
1155 break;
1158 * If no PMD-aligned area found yet, check for one,
1159 * but continue the loop to look for a PUD-aligned area.
1161 if (args->fixed_alignment < PMD_SIZE)
1162 phys_align_check(pstart, pend, PMD_SIZE, &phys,
1163 &args->fixed_alignment);
1166 args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK);
1167 args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK);
1168 args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK);
1169 args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK);
1170 args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK);
1171 WARN_ON(!pfn_valid(args->fixed_pte_pfn));
1175 static int __init init_args(struct pgtable_debug_args *args)
1177 struct page *page = NULL;
1178 int ret = 0;
1181 * Initialize the debugging data.
1183 * vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE)
1184 * will help create page table entries with PROT_NONE permission as
1185 * required for pxx_protnone_tests().
1187 memset(args, 0, sizeof(*args));
1188 args->vaddr = get_random_vaddr();
1189 args->page_prot = vm_get_page_prot(VM_ACCESS_FLAGS);
1190 args->page_prot_none = vm_get_page_prot(VM_NONE);
1191 args->is_contiguous_page = false;
1192 args->pud_pfn = ULONG_MAX;
1193 args->pmd_pfn = ULONG_MAX;
1194 args->pte_pfn = ULONG_MAX;
1195 args->fixed_pgd_pfn = ULONG_MAX;
1196 args->fixed_p4d_pfn = ULONG_MAX;
1197 args->fixed_pud_pfn = ULONG_MAX;
1198 args->fixed_pmd_pfn = ULONG_MAX;
1199 args->fixed_pte_pfn = ULONG_MAX;
1201 /* Allocate mm and vma */
1202 args->mm = mm_alloc();
1203 if (!args->mm) {
1204 pr_err("Failed to allocate mm struct\n");
1205 ret = -ENOMEM;
1206 goto error;
1209 args->vma = vm_area_alloc(args->mm);
1210 if (!args->vma) {
1211 pr_err("Failed to allocate vma\n");
1212 ret = -ENOMEM;
1213 goto error;
1217 * Allocate page table entries. They will be modified in the tests.
1218 * Lets save the page table entries so that they can be released
1219 * when the tests are completed.
1221 args->pgdp = pgd_offset(args->mm, args->vaddr);
1222 args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr);
1223 if (!args->p4dp) {
1224 pr_err("Failed to allocate p4d entries\n");
1225 ret = -ENOMEM;
1226 goto error;
1228 args->start_p4dp = p4d_offset(args->pgdp, 0UL);
1229 WARN_ON(!args->start_p4dp);
1231 args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr);
1232 if (!args->pudp) {
1233 pr_err("Failed to allocate pud entries\n");
1234 ret = -ENOMEM;
1235 goto error;
1237 args->start_pudp = pud_offset(args->p4dp, 0UL);
1238 WARN_ON(!args->start_pudp);
1240 args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr);
1241 if (!args->pmdp) {
1242 pr_err("Failed to allocate pmd entries\n");
1243 ret = -ENOMEM;
1244 goto error;
1246 args->start_pmdp = pmd_offset(args->pudp, 0UL);
1247 WARN_ON(!args->start_pmdp);
1249 if (pte_alloc(args->mm, args->pmdp)) {
1250 pr_err("Failed to allocate pte entries\n");
1251 ret = -ENOMEM;
1252 goto error;
1254 args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp));
1255 WARN_ON(!args->start_ptep);
1257 init_fixed_pfns(args);
1260 * Allocate (huge) pages because some of the tests need to access
1261 * the data in the pages. The corresponding tests will be skipped
1262 * if we fail to allocate (huge) pages.
1264 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1265 has_transparent_pud_hugepage()) {
1266 page = debug_vm_pgtable_alloc_huge_page(args,
1267 HPAGE_PUD_SHIFT - PAGE_SHIFT);
1268 if (page) {
1269 args->pud_pfn = page_to_pfn(page);
1270 args->pmd_pfn = args->pud_pfn;
1271 args->pte_pfn = args->pud_pfn;
1272 return 0;
1276 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1277 has_transparent_hugepage()) {
1278 page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER);
1279 if (page) {
1280 args->pmd_pfn = page_to_pfn(page);
1281 args->pte_pfn = args->pmd_pfn;
1282 return 0;
1286 page = alloc_page(GFP_KERNEL);
1287 if (page)
1288 args->pte_pfn = page_to_pfn(page);
1290 return 0;
1292 error:
1293 destroy_args(args);
1294 return ret;
1297 static int __init debug_vm_pgtable(void)
1299 struct pgtable_debug_args args;
1300 spinlock_t *ptl = NULL;
1301 int idx, ret;
1303 pr_info("Validating architecture page table helpers\n");
1304 ret = init_args(&args);
1305 if (ret)
1306 return ret;
1309 * Iterate over each possible vm_flags to make sure that all
1310 * the basic page table transformation validations just hold
1311 * true irrespective of the starting protection value for a
1312 * given page table entry.
1314 * Protection based vm_flags combinations are always linear
1315 * and increasing i.e starting from VM_NONE and going up to
1316 * (VM_SHARED | READ | WRITE | EXEC).
1318 #define VM_FLAGS_START (VM_NONE)
1319 #define VM_FLAGS_END (VM_SHARED | VM_EXEC | VM_WRITE | VM_READ)
1321 for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) {
1322 pte_basic_tests(&args, idx);
1323 pmd_basic_tests(&args, idx);
1324 pud_basic_tests(&args, idx);
1328 * Both P4D and PGD level tests are very basic which do not
1329 * involve creating page table entries from the protection
1330 * value and the given pfn. Hence just keep them out from
1331 * the above iteration for now to save some test execution
1332 * time.
1334 p4d_basic_tests(&args);
1335 pgd_basic_tests(&args);
1337 pmd_leaf_tests(&args);
1338 pud_leaf_tests(&args);
1340 pte_special_tests(&args);
1341 pte_protnone_tests(&args);
1342 pmd_protnone_tests(&args);
1344 pte_devmap_tests(&args);
1345 pmd_devmap_tests(&args);
1346 pud_devmap_tests(&args);
1348 pte_soft_dirty_tests(&args);
1349 pmd_soft_dirty_tests(&args);
1350 pte_swap_soft_dirty_tests(&args);
1351 pmd_swap_soft_dirty_tests(&args);
1353 pte_swap_exclusive_tests(&args);
1355 pte_swap_tests(&args);
1356 pmd_swap_tests(&args);
1358 swap_migration_tests(&args);
1360 pmd_thp_tests(&args);
1361 pud_thp_tests(&args);
1363 hugetlb_basic_tests(&args);
1366 * Page table modifying tests. They need to hold
1367 * proper page table lock.
1370 args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl);
1371 pte_clear_tests(&args);
1372 pte_advanced_tests(&args);
1373 if (args.ptep)
1374 pte_unmap_unlock(args.ptep, ptl);
1376 ptl = pmd_lock(args.mm, args.pmdp);
1377 pmd_clear_tests(&args);
1378 pmd_advanced_tests(&args);
1379 pmd_huge_tests(&args);
1380 pmd_populate_tests(&args);
1381 spin_unlock(ptl);
1383 ptl = pud_lock(args.mm, args.pudp);
1384 pud_clear_tests(&args);
1385 pud_advanced_tests(&args);
1386 pud_huge_tests(&args);
1387 pud_populate_tests(&args);
1388 spin_unlock(ptl);
1390 spin_lock(&(args.mm->page_table_lock));
1391 p4d_clear_tests(&args);
1392 pgd_clear_tests(&args);
1393 p4d_populate_tests(&args);
1394 pgd_populate_tests(&args);
1395 spin_unlock(&(args.mm->page_table_lock));
1397 destroy_args(&args);
1398 return 0;
1400 late_initcall(debug_vm_pgtable);