2 * Lockless get_user_pages_fast for s390
4 * Copyright IBM Corp. 2010
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 #include <linux/sched.h>
9 #include <linux/hugetlb.h>
10 #include <linux/vmstat.h>
11 #include <linux/pagemap.h>
12 #include <linux/rwsem.h>
13 #include <asm/pgtable.h>
16 * The performance critical leaf functions are made noinline otherwise gcc
17 * inlines everything into a single function which results in too much
20 static inline int gup_pte_range(pmd_t
*pmdp
, pmd_t pmd
, unsigned long addr
,
21 unsigned long end
, int write
, struct page
**pages
, int *nr
)
27 mask
= (write
? _PAGE_RO
: 0) | _PAGE_INVALID
| _PAGE_SPECIAL
;
29 ptep
= ((pte_t
*) pmd_deref(pmd
)) + pte_index(addr
);
33 if ((pte_val(pte
) & mask
) != 0)
35 VM_BUG_ON(!pfn_valid(pte_pfn(pte
)));
37 if (!page_cache_get_speculative(page
))
39 if (unlikely(pte_val(pte
) != pte_val(*ptep
))) {
46 } while (ptep
++, addr
+= PAGE_SIZE
, addr
!= end
);
51 static inline int gup_huge_pmd(pmd_t
*pmdp
, pmd_t pmd
, unsigned long addr
,
52 unsigned long end
, int write
, struct page
**pages
, int *nr
)
54 unsigned long mask
, result
;
55 struct page
*head
, *page
, *tail
;
58 result
= write
? 0 : _SEGMENT_ENTRY_RO
;
59 mask
= result
| _SEGMENT_ENTRY_INV
;
60 if ((pmd_val(pmd
) & mask
) != result
)
62 VM_BUG_ON(!pfn_valid(pmd_val(pmd
) >> PAGE_SHIFT
));
66 page
= head
+ ((addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
69 VM_BUG_ON(compound_head(page
) != head
);
74 } while (addr
+= PAGE_SIZE
, addr
!= end
);
76 if (!page_cache_add_speculative(head
, refs
)) {
81 if (unlikely(pmd_val(pmd
) != pmd_val(*pmdp
))) {
89 * Any tail page need their mapcount reference taken before we
94 get_huge_page_tail(tail
);
102 static inline int gup_pmd_range(pud_t
*pudp
, pud_t pud
, unsigned long addr
,
103 unsigned long end
, int write
, struct page
**pages
, int *nr
)
108 pmdp
= (pmd_t
*) pudp
;
110 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R3
)
111 pmdp
= (pmd_t
*) pud_deref(pud
);
112 pmdp
+= pmd_index(addr
);
117 next
= pmd_addr_end(addr
, end
);
119 * The pmd_trans_splitting() check below explains why
120 * pmdp_splitting_flush() has to serialize with
121 * smp_call_function() against our disabled IRQs, to stop
122 * this gup-fast code from running while we set the
123 * splitting bit in the pmd. Returning zero will take
124 * the slow path that will call wait_split_huge_page()
125 * if the pmd is still in splitting state.
127 if (pmd_none(pmd
) || pmd_trans_splitting(pmd
))
129 if (unlikely(pmd_large(pmd
))) {
130 if (!gup_huge_pmd(pmdp
, pmd
, addr
, next
,
133 } else if (!gup_pte_range(pmdp
, pmd
, addr
, next
,
136 } while (pmdp
++, addr
= next
, addr
!= end
);
141 static inline int gup_pud_range(pgd_t
*pgdp
, pgd_t pgd
, unsigned long addr
,
142 unsigned long end
, int write
, struct page
**pages
, int *nr
)
147 pudp
= (pud_t
*) pgdp
;
149 if ((pgd_val(pgd
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R2
)
150 pudp
= (pud_t
*) pgd_deref(pgd
);
151 pudp
+= pud_index(addr
);
156 next
= pud_addr_end(addr
, end
);
159 if (!gup_pmd_range(pudp
, pud
, addr
, next
, write
, pages
, nr
))
161 } while (pudp
++, addr
= next
, addr
!= end
);
167 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
168 * back to the regular GUP.
170 int __get_user_pages_fast(unsigned long start
, int nr_pages
, int write
,
173 struct mm_struct
*mm
= current
->mm
;
174 unsigned long addr
, len
, end
;
175 unsigned long next
, flags
;
181 len
= (unsigned long) nr_pages
<< PAGE_SHIFT
;
183 if ((end
< start
) || (end
> TASK_SIZE
))
186 local_irq_save(flags
);
187 pgdp
= pgd_offset(mm
, addr
);
191 next
= pgd_addr_end(addr
, end
);
194 if (!gup_pud_range(pgdp
, pgd
, addr
, next
, write
, pages
, &nr
))
196 } while (pgdp
++, addr
= next
, addr
!= end
);
197 local_irq_restore(flags
);
203 * get_user_pages_fast() - pin user pages in memory
204 * @start: starting user address
205 * @nr_pages: number of pages from start to pin
206 * @write: whether pages will be written to
207 * @pages: array that receives pointers to the pages pinned.
208 * Should be at least nr_pages long.
210 * Attempt to pin user pages in memory without taking mm->mmap_sem.
211 * If not successful, it will fall back to taking the lock and
212 * calling get_user_pages().
214 * Returns number of pages pinned. This may be fewer than the number
215 * requested. If nr_pages is 0 or negative, returns 0. If no pages
216 * were pinned, returns -errno.
218 int get_user_pages_fast(unsigned long start
, int nr_pages
, int write
,
221 struct mm_struct
*mm
= current
->mm
;
222 unsigned long addr
, len
, end
;
229 len
= (unsigned long) nr_pages
<< PAGE_SHIFT
;
231 if ((end
< start
) || (end
> TASK_SIZE
))
235 * local_irq_disable() doesn't prevent pagetable teardown, but does
236 * prevent the pagetables from being freed on s390.
238 * So long as we atomically load page table pointers versus teardown,
239 * we can follow the address down to the the page and take a ref on it.
242 pgdp
= pgd_offset(mm
, addr
);
246 next
= pgd_addr_end(addr
, end
);
249 if (!gup_pud_range(pgdp
, pgd
, addr
, next
, write
, pages
, &nr
))
251 } while (pgdp
++, addr
= next
, addr
!= end
);
254 VM_BUG_ON(nr
!= (end
- start
) >> PAGE_SHIFT
);
262 /* Try to get the remaining pages with get_user_pages */
263 start
+= nr
<< PAGE_SHIFT
;
266 down_read(&mm
->mmap_sem
);
267 ret
= get_user_pages(current
, mm
, start
,
268 (end
- start
) >> PAGE_SHIFT
, write
, 0, pages
, NULL
);
269 up_read(&mm
->mmap_sem
);
271 /* Have to be a bit careful with return values */