split dev_queue
[cor.git] / mm / pgtable-generic.c
blob3d7c01e76efc33dafe863d4c2bb5e265c1486a2b
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mm/pgtable-generic.c
5 * Generic pgtable methods declared in asm-generic/pgtable.h
7 * Copyright (C) 2010 Linus Torvalds
8 */
10 #include <linux/pagemap.h>
11 #include <linux/hugetlb.h>
12 #include <asm/tlb.h>
13 #include <asm-generic/pgtable.h>
16 * If a p?d_bad entry is found while walking page tables, report
17 * the error, before resetting entry to p?d_none. Usually (but
18 * very seldom) called out from the p?d_none_or_clear_bad macros.
21 void pgd_clear_bad(pgd_t *pgd)
23 pgd_ERROR(*pgd);
24 pgd_clear(pgd);
27 #ifndef __PAGETABLE_P4D_FOLDED
28 void p4d_clear_bad(p4d_t *p4d)
30 p4d_ERROR(*p4d);
31 p4d_clear(p4d);
33 #endif
35 #ifndef __PAGETABLE_PUD_FOLDED
36 void pud_clear_bad(pud_t *pud)
38 pud_ERROR(*pud);
39 pud_clear(pud);
41 #endif
44 * Note that the pmd variant below can't be stub'ed out just as for p4d/pud
45 * above. pmd folding is special and typically pmd_* macros refer to upper
46 * level even when folded
48 void pmd_clear_bad(pmd_t *pmd)
50 pmd_ERROR(*pmd);
51 pmd_clear(pmd);
54 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
56 * Only sets the access flags (dirty, accessed), as well as write
57 * permission. Furthermore, we know it always gets set to a "more
58 * permissive" setting, which allows most architectures to optimize
59 * this. We return whether the PTE actually changed, which in turn
60 * instructs the caller to do things like update__mmu_cache. This
61 * used to be done in the caller, but sparc needs minor faults to
62 * force that call on sun4c so we changed this macro slightly
64 int ptep_set_access_flags(struct vm_area_struct *vma,
65 unsigned long address, pte_t *ptep,
66 pte_t entry, int dirty)
68 int changed = !pte_same(*ptep, entry);
69 if (changed) {
70 set_pte_at(vma->vm_mm, address, ptep, entry);
71 flush_tlb_fix_spurious_fault(vma, address);
73 return changed;
75 #endif
77 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
78 int ptep_clear_flush_young(struct vm_area_struct *vma,
79 unsigned long address, pte_t *ptep)
81 int young;
82 young = ptep_test_and_clear_young(vma, address, ptep);
83 if (young)
84 flush_tlb_page(vma, address);
85 return young;
87 #endif
89 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
90 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
91 pte_t *ptep)
93 struct mm_struct *mm = (vma)->vm_mm;
94 pte_t pte;
95 pte = ptep_get_and_clear(mm, address, ptep);
96 if (pte_accessible(mm, pte))
97 flush_tlb_page(vma, address);
98 return pte;
100 #endif
102 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
104 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
105 int pmdp_set_access_flags(struct vm_area_struct *vma,
106 unsigned long address, pmd_t *pmdp,
107 pmd_t entry, int dirty)
109 int changed = !pmd_same(*pmdp, entry);
110 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
111 if (changed) {
112 set_pmd_at(vma->vm_mm, address, pmdp, entry);
113 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
115 return changed;
117 #endif
119 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
120 int pmdp_clear_flush_young(struct vm_area_struct *vma,
121 unsigned long address, pmd_t *pmdp)
123 int young;
124 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
125 young = pmdp_test_and_clear_young(vma, address, pmdp);
126 if (young)
127 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
128 return young;
130 #endif
132 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
133 pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
134 pmd_t *pmdp)
136 pmd_t pmd;
137 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
138 VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
139 !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
140 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
141 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
142 return pmd;
145 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
146 pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
147 pud_t *pudp)
149 pud_t pud;
151 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
152 VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
153 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
154 flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
155 return pud;
157 #endif
158 #endif
160 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
161 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
162 pgtable_t pgtable)
164 assert_spin_locked(pmd_lockptr(mm, pmdp));
166 /* FIFO */
167 if (!pmd_huge_pte(mm, pmdp))
168 INIT_LIST_HEAD(&pgtable->lru);
169 else
170 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
171 pmd_huge_pte(mm, pmdp) = pgtable;
173 #endif
175 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
176 /* no "address" argument so destroys page coloring of some arch */
177 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
179 pgtable_t pgtable;
181 assert_spin_locked(pmd_lockptr(mm, pmdp));
183 /* FIFO */
184 pgtable = pmd_huge_pte(mm, pmdp);
185 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
186 struct page, lru);
187 if (pmd_huge_pte(mm, pmdp))
188 list_del(&pgtable->lru);
189 return pgtable;
191 #endif
193 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
194 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
195 pmd_t *pmdp)
197 pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mknotpresent(*pmdp));
198 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
199 return old;
201 #endif
203 #ifndef pmdp_collapse_flush
204 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
205 pmd_t *pmdp)
208 * pmd and hugepage pte format are same. So we could
209 * use the same function.
211 pmd_t pmd;
213 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
214 VM_BUG_ON(pmd_trans_huge(*pmdp));
215 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
217 /* collapse entails shooting down ptes not pmd */
218 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
219 return pmd;
221 #endif
222 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */