2 #include <linux/highmem.h>
3 #include <linux/sched.h>
4 #include <linux/hugetlb.h>
6 static int walk_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
12 pte
= pte_offset_map(pmd
, addr
);
14 err
= walk
->pte_entry(pte
, addr
, addr
+ PAGE_SIZE
, walk
);
27 static int walk_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
,
34 pmd
= pmd_offset(pud
, addr
);
36 next
= pmd_addr_end(addr
, end
);
37 split_huge_page_pmd(walk
->mm
, pmd
);
38 if (pmd_none_or_clear_bad(pmd
)) {
40 err
= walk
->pte_hole(addr
, next
, walk
);
46 err
= walk
->pmd_entry(pmd
, addr
, next
, walk
);
47 if (!err
&& walk
->pte_entry
)
48 err
= walk_pte_range(pmd
, addr
, next
, walk
);
51 } while (pmd
++, addr
= next
, addr
!= end
);
56 static int walk_pud_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
63 pud
= pud_offset(pgd
, addr
);
65 next
= pud_addr_end(addr
, end
);
66 if (pud_none_or_clear_bad(pud
)) {
68 err
= walk
->pte_hole(addr
, next
, walk
);
74 err
= walk
->pud_entry(pud
, addr
, next
, walk
);
75 if (!err
&& (walk
->pmd_entry
|| walk
->pte_entry
))
76 err
= walk_pmd_range(pud
, addr
, next
, walk
);
79 } while (pud
++, addr
= next
, addr
!= end
);
84 #ifdef CONFIG_HUGETLB_PAGE
85 static unsigned long hugetlb_entry_end(struct hstate
*h
, unsigned long addr
,
88 unsigned long boundary
= (addr
& huge_page_mask(h
)) + huge_page_size(h
);
89 return boundary
< end
? boundary
: end
;
92 static int walk_hugetlb_range(struct vm_area_struct
*vma
,
93 unsigned long addr
, unsigned long end
,
96 struct hstate
*h
= hstate_vma(vma
);
98 unsigned long hmask
= huge_page_mask(h
);
103 next
= hugetlb_entry_end(h
, addr
, end
);
104 pte
= huge_pte_offset(walk
->mm
, addr
& hmask
);
105 if (pte
&& walk
->hugetlb_entry
)
106 err
= walk
->hugetlb_entry(pte
, hmask
, addr
, next
, walk
);
109 } while (addr
= next
, addr
!= end
);
116 * walk_page_range - walk a memory map's page tables with a callback
117 * @mm: memory map to walk
118 * @addr: starting address
119 * @end: ending address
120 * @walk: set of callbacks to invoke for each level of the tree
122 * Recursively walk the page table for the memory area in a VMA,
123 * calling supplied callbacks. Callbacks are called in-order (first
124 * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
125 * etc.). If lower-level callbacks are omitted, walking depth is reduced.
127 * Each callback receives an entry pointer and the start and end of the
128 * associated range, and a copy of the original mm_walk for access to
129 * the ->private or ->mm fields.
131 * No locks are taken, but the bottom level iterator will map PTE
132 * directories from highmem if necessary.
134 * If any callback returns a non-zero value, the walk is aborted and
135 * the return value is propagated back to the caller. Otherwise 0 is returned.
137 int walk_page_range(unsigned long addr
, unsigned long end
,
138 struct mm_walk
*walk
)
150 pgd
= pgd_offset(walk
->mm
, addr
);
152 struct vm_area_struct
*uninitialized_var(vma
);
154 next
= pgd_addr_end(addr
, end
);
156 #ifdef CONFIG_HUGETLB_PAGE
158 * handle hugetlb vma individually because pagetable walk for
159 * the hugetlb page is dependent on the architecture and
160 * we can't handled it in the same manner as non-huge pages.
162 vma
= find_vma(walk
->mm
, addr
);
163 if (vma
&& is_vm_hugetlb_page(vma
)) {
164 if (vma
->vm_end
< next
)
167 * Hugepage is very tightly coupled with vma, so
168 * walk through hugetlb entries within a given vma.
170 err
= walk_hugetlb_range(vma
, addr
, next
, walk
);
173 pgd
= pgd_offset(walk
->mm
, next
);
177 if (pgd_none_or_clear_bad(pgd
)) {
179 err
= walk
->pte_hole(addr
, next
, walk
);
186 err
= walk
->pgd_entry(pgd
, addr
, next
, walk
);
188 (walk
->pud_entry
|| walk
->pmd_entry
|| walk
->pte_entry
))
189 err
= walk_pud_range(pgd
, addr
, next
, walk
);
193 } while (addr
= next
, addr
!= end
);