xen/blkback: fix memory leaks
[linux-stable.git] / mm / page_vma_mapped.c
blobe00d985a51c56d09df22247d342a1d03aa044427
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
8 #include "internal.h"
10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
12 page_vma_mapped_walk_done(pvmw);
13 return false;
16 static bool map_pte(struct page_vma_mapped_walk *pvmw)
18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19 if (!(pvmw->flags & PVMW_SYNC)) {
20 if (pvmw->flags & PVMW_MIGRATION) {
21 if (!is_swap_pte(*pvmw->pte))
22 return false;
23 } else {
25 * We get here when we are trying to unmap a private
26 * device page from the process address space. Such
27 * page is not CPU accessible and thus is mapped as
28 * a special swap entry, nonetheless it still does
29 * count as a valid regular mapping for the page (and
30 * is accounted as such in page maps count).
32 * So handle this special case as if it was a normal
33 * page mapping ie lock CPU page table and returns
34 * true.
36 * For more details on device private memory see HMM
37 * (include/linux/hmm.h or mm/hmm.c).
39 if (is_swap_pte(*pvmw->pte)) {
40 swp_entry_t entry;
42 /* Handle un-addressable ZONE_DEVICE memory */
43 entry = pte_to_swp_entry(*pvmw->pte);
44 if (!is_device_private_entry(entry))
45 return false;
46 } else if (!pte_present(*pvmw->pte))
47 return false;
50 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
51 spin_lock(pvmw->ptl);
52 return true;
55 /**
56 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
58 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
59 * mapped. check_pte() has to validate this.
61 * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
62 * page.
64 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
65 * entry that points to @pvmw->page or any subpage in case of THP.
67 * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
68 * @pvmw->page or any subpage in case of THP.
70 * Otherwise, return false.
73 static bool check_pte(struct page_vma_mapped_walk *pvmw)
75 unsigned long pfn;
77 if (pvmw->flags & PVMW_MIGRATION) {
78 swp_entry_t entry;
79 if (!is_swap_pte(*pvmw->pte))
80 return false;
81 entry = pte_to_swp_entry(*pvmw->pte);
83 if (!is_migration_entry(entry))
84 return false;
86 pfn = migration_entry_to_pfn(entry);
87 } else if (is_swap_pte(*pvmw->pte)) {
88 swp_entry_t entry;
90 /* Handle un-addressable ZONE_DEVICE memory */
91 entry = pte_to_swp_entry(*pvmw->pte);
92 if (!is_device_private_entry(entry))
93 return false;
95 pfn = device_private_entry_to_pfn(entry);
96 } else {
97 if (!pte_present(*pvmw->pte))
98 return false;
100 pfn = pte_pfn(*pvmw->pte);
103 if (pfn < page_to_pfn(pvmw->page))
104 return false;
106 /* THP can be referenced by any subpage */
107 if (pfn - page_to_pfn(pvmw->page) >= hpage_nr_pages(pvmw->page))
108 return false;
110 return true;
114 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
115 * @pvmw->address
116 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
117 * must be set. pmd, pte and ptl must be NULL.
119 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
120 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
121 * adjusted if needed (for PTE-mapped THPs).
123 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
124 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
125 * a loop to find all PTEs that map the THP.
127 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
128 * regardless of which page table level the page is mapped at. @pvmw->pmd is
129 * NULL.
131 * Retruns false if there are no more page table entries for the page in
132 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
134 * If you need to stop the walk before page_vma_mapped_walk() returned false,
135 * use page_vma_mapped_walk_done(). It will do the housekeeping.
137 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
139 struct mm_struct *mm = pvmw->vma->vm_mm;
140 struct page *page = pvmw->page;
141 pgd_t *pgd;
142 p4d_t *p4d;
143 pud_t *pud;
144 pmd_t pmde;
146 /* The only possible pmd mapping has been handled on last iteration */
147 if (pvmw->pmd && !pvmw->pte)
148 return not_found(pvmw);
150 if (pvmw->pte)
151 goto next_pte;
153 if (unlikely(PageHuge(pvmw->page))) {
154 /* when pud is not present, pte will be NULL */
155 pvmw->pte = huge_pte_offset(mm, pvmw->address,
156 PAGE_SIZE << compound_order(page));
157 if (!pvmw->pte)
158 return false;
160 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
161 spin_lock(pvmw->ptl);
162 if (!check_pte(pvmw))
163 return not_found(pvmw);
164 return true;
166 restart:
167 pgd = pgd_offset(mm, pvmw->address);
168 if (!pgd_present(*pgd))
169 return false;
170 p4d = p4d_offset(pgd, pvmw->address);
171 if (!p4d_present(*p4d))
172 return false;
173 pud = pud_offset(p4d, pvmw->address);
174 if (!pud_present(*pud))
175 return false;
176 pvmw->pmd = pmd_offset(pud, pvmw->address);
178 * Make sure the pmd value isn't cached in a register by the
179 * compiler and used as a stale value after we've observed a
180 * subsequent update.
182 pmde = READ_ONCE(*pvmw->pmd);
183 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
184 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
185 if (likely(pmd_trans_huge(*pvmw->pmd))) {
186 if (pvmw->flags & PVMW_MIGRATION)
187 return not_found(pvmw);
188 if (pmd_page(*pvmw->pmd) != page)
189 return not_found(pvmw);
190 return true;
191 } else if (!pmd_present(*pvmw->pmd)) {
192 if (thp_migration_supported()) {
193 if (!(pvmw->flags & PVMW_MIGRATION))
194 return not_found(pvmw);
195 if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
196 swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
198 if (migration_entry_to_page(entry) != page)
199 return not_found(pvmw);
200 return true;
203 return not_found(pvmw);
204 } else {
205 /* THP pmd was split under us: handle on pte level */
206 spin_unlock(pvmw->ptl);
207 pvmw->ptl = NULL;
209 } else if (!pmd_present(pmde)) {
210 return false;
212 if (!map_pte(pvmw))
213 goto next_pte;
214 while (1) {
215 if (check_pte(pvmw))
216 return true;
217 next_pte:
218 /* Seek to next pte only makes sense for THP */
219 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
220 return not_found(pvmw);
221 do {
222 pvmw->address += PAGE_SIZE;
223 if (pvmw->address >= pvmw->vma->vm_end ||
224 pvmw->address >=
225 __vma_address(pvmw->page, pvmw->vma) +
226 hpage_nr_pages(pvmw->page) * PAGE_SIZE)
227 return not_found(pvmw);
228 /* Did we cross page table boundary? */
229 if (pvmw->address % PMD_SIZE == 0) {
230 pte_unmap(pvmw->pte);
231 if (pvmw->ptl) {
232 spin_unlock(pvmw->ptl);
233 pvmw->ptl = NULL;
235 goto restart;
236 } else {
237 pvmw->pte++;
239 } while (pte_none(*pvmw->pte));
241 if (!pvmw->ptl) {
242 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
243 spin_lock(pvmw->ptl);
249 * page_mapped_in_vma - check whether a page is really mapped in a VMA
250 * @page: the page to test
251 * @vma: the VMA to test
253 * Returns 1 if the page is mapped into the page tables of the VMA, 0
254 * if the page is not mapped into the page tables of this VMA. Only
255 * valid for normal file or anonymous VMAs.
257 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
259 struct page_vma_mapped_walk pvmw = {
260 .page = page,
261 .vma = vma,
262 .flags = PVMW_SYNC,
264 unsigned long start, end;
266 start = __vma_address(page, vma);
267 end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
269 if (unlikely(end < vma->vm_start || start >= vma->vm_end))
270 return 0;
271 pvmw.address = max(start, vma->vm_start);
272 if (!page_vma_mapped_walk(&pvmw))
273 return 0;
274 page_vma_mapped_walk_done(&pvmw);
275 return 1;