2 * arch/s390/lib/uaccess_pt.c
4 * User access functions based on page table walks for enhanced
5 * system layout without hardware support.
7 * Copyright IBM Corp. 2006
8 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
11 #include <linux/errno.h>
12 #include <linux/hardirq.h>
14 #include <asm/uaccess.h>
15 #include <asm/futex.h>
18 static int __handle_fault(struct mm_struct
*mm
, unsigned long address
,
21 struct vm_area_struct
*vma
;
26 down_read(&mm
->mmap_sem
);
27 vma
= find_vma(mm
, address
);
30 if (unlikely(vma
->vm_start
> address
)) {
31 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
33 if (expand_stack(vma
, address
))
38 /* page not present, check vm flags */
39 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
| VM_WRITE
)))
42 if (!(vma
->vm_flags
& VM_WRITE
))
47 switch (handle_mm_fault(mm
, vma
, address
, write_access
)) {
63 up_read(&mm
->mmap_sem
);
67 up_read(&mm
->mmap_sem
);
68 if (is_init(current
)) {
70 down_read(&mm
->mmap_sem
);
73 printk("VM: killing process %s\n", current
->comm
);
77 up_read(&mm
->mmap_sem
);
78 current
->thread
.prot_addr
= address
;
79 current
->thread
.trap_no
= 0x11;
80 force_sig(SIGBUS
, current
);
84 static size_t __user_copy_pt(unsigned long uaddr
, void *kptr
,
85 size_t n
, int write_user
)
87 struct mm_struct
*mm
= current
->mm
;
88 unsigned long offset
, pfn
, done
, size
;
96 spin_lock(&mm
->page_table_lock
);
98 pgd
= pgd_offset(mm
, uaddr
);
99 if (pgd_none(*pgd
) || unlikely(pgd_bad(*pgd
)))
102 pmd
= pmd_offset(pgd
, uaddr
);
103 if (pmd_none(*pmd
) || unlikely(pmd_bad(*pmd
)))
106 pte
= pte_offset_map(pmd
, uaddr
);
107 if (!pte
|| !pte_present(*pte
) ||
108 (write_user
&& !pte_write(*pte
)))
115 offset
= uaddr
& (PAGE_SIZE
- 1);
116 size
= min(n
- done
, PAGE_SIZE
- offset
);
118 to
= (void *)((pfn
<< PAGE_SHIFT
) + offset
);
121 from
= (void *)((pfn
<< PAGE_SHIFT
) + offset
);
124 memcpy(to
, from
, size
);
129 spin_unlock(&mm
->page_table_lock
);
132 spin_unlock(&mm
->page_table_lock
);
133 if (__handle_fault(mm
, uaddr
, write_user
))
139 * Do DAT for user address by page table walk, return kernel address.
140 * This function needs to be called with current->mm->page_table_lock held.
142 static unsigned long __dat_user_addr(unsigned long uaddr
)
144 struct mm_struct
*mm
= current
->mm
;
145 unsigned long pfn
, ret
;
153 pgd
= pgd_offset(mm
, uaddr
);
154 if (pgd_none(*pgd
) || unlikely(pgd_bad(*pgd
)))
157 pmd
= pmd_offset(pgd
, uaddr
);
158 if (pmd_none(*pmd
) || unlikely(pmd_bad(*pmd
)))
161 pte
= pte_offset_map(pmd
, uaddr
);
162 if (!pte
|| !pte_present(*pte
))
169 ret
= (pfn
<< PAGE_SHIFT
) + (uaddr
& (PAGE_SIZE
- 1));
173 spin_unlock(&mm
->page_table_lock
);
174 rc
= __handle_fault(mm
, uaddr
, 0);
175 spin_lock(&mm
->page_table_lock
);
181 size_t copy_from_user_pt(size_t n
, const void __user
*from
, void *to
)
185 if (segment_eq(get_fs(), KERNEL_DS
)) {
186 memcpy(to
, (void __kernel __force
*) from
, n
);
189 rc
= __user_copy_pt((unsigned long) from
, to
, n
, 0);
191 memset(to
+ n
- rc
, 0, rc
);
195 size_t copy_to_user_pt(size_t n
, void __user
*to
, const void *from
)
197 if (segment_eq(get_fs(), KERNEL_DS
)) {
198 memcpy((void __kernel __force
*) to
, from
, n
);
201 return __user_copy_pt((unsigned long) to
, (void *) from
, n
, 1);
204 static size_t clear_user_pt(size_t n
, void __user
*to
)
206 long done
, size
, ret
;
208 if (segment_eq(get_fs(), KERNEL_DS
)) {
209 memset((void __kernel __force
*) to
, 0, n
);
214 if (n
- done
> PAGE_SIZE
)
218 ret
= __user_copy_pt((unsigned long) to
+ done
,
219 &empty_zero_page
, size
, 1);
222 return ret
+ n
- done
;
227 static size_t strnlen_user_pt(size_t count
, const char __user
*src
)
230 unsigned long uaddr
= (unsigned long) src
;
231 struct mm_struct
*mm
= current
->mm
;
232 unsigned long offset
, pfn
, done
, len
;
238 if (segment_eq(get_fs(), KERNEL_DS
))
239 return strnlen((const char __kernel __force
*) src
, count
) + 1;
242 spin_lock(&mm
->page_table_lock
);
244 pgd
= pgd_offset(mm
, uaddr
);
245 if (pgd_none(*pgd
) || unlikely(pgd_bad(*pgd
)))
248 pmd
= pmd_offset(pgd
, uaddr
);
249 if (pmd_none(*pmd
) || unlikely(pmd_bad(*pmd
)))
252 pte
= pte_offset_map(pmd
, uaddr
);
253 if (!pte
|| !pte_present(*pte
))
257 if (!pfn_valid(pfn
)) {
262 offset
= uaddr
& (PAGE_SIZE
-1);
263 addr
= (char *)(pfn
<< PAGE_SHIFT
) + offset
;
264 len
= min(count
- done
, PAGE_SIZE
- offset
);
265 len_str
= strnlen(addr
, len
);
268 } while ((len_str
== len
) && (done
< count
));
270 spin_unlock(&mm
->page_table_lock
);
273 spin_unlock(&mm
->page_table_lock
);
274 if (__handle_fault(mm
, uaddr
, 0)) {
280 static size_t strncpy_from_user_pt(size_t count
, const char __user
*src
,
283 size_t n
= strnlen_user_pt(count
, src
);
289 if (segment_eq(get_fs(), KERNEL_DS
)) {
290 memcpy(dst
, (const char __kernel __force
*) src
, n
);
291 if (dst
[n
-1] == '\0')
296 if (__user_copy_pt((unsigned long) src
, dst
, n
, 0))
298 if (dst
[n
-1] == '\0')
304 static size_t copy_in_user_pt(size_t n
, void __user
*to
,
305 const void __user
*from
)
307 struct mm_struct
*mm
= current
->mm
;
308 unsigned long offset_from
, offset_to
, offset_max
, pfn_from
, pfn_to
,
310 unsigned long uaddr_from
= (unsigned long) from
;
311 unsigned long uaddr_to
= (unsigned long) to
;
312 pgd_t
*pgd_from
, *pgd_to
;
313 pmd_t
*pmd_from
, *pmd_to
;
314 pte_t
*pte_from
, *pte_to
;
319 spin_lock(&mm
->page_table_lock
);
321 pgd_from
= pgd_offset(mm
, uaddr_from
);
322 if (pgd_none(*pgd_from
) || unlikely(pgd_bad(*pgd_from
))) {
327 pgd_to
= pgd_offset(mm
, uaddr_to
);
328 if (pgd_none(*pgd_to
) || unlikely(pgd_bad(*pgd_to
))) {
334 pmd_from
= pmd_offset(pgd_from
, uaddr_from
);
335 if (pmd_none(*pmd_from
) || unlikely(pmd_bad(*pmd_from
))) {
340 pmd_to
= pmd_offset(pgd_to
, uaddr_to
);
341 if (pmd_none(*pmd_to
) || unlikely(pmd_bad(*pmd_to
))) {
347 pte_from
= pte_offset_map(pmd_from
, uaddr_from
);
348 if (!pte_from
|| !pte_present(*pte_from
)) {
353 pte_to
= pte_offset_map(pmd_to
, uaddr_to
);
354 if (!pte_to
|| !pte_present(*pte_to
) || !pte_write(*pte_to
)) {
360 pfn_from
= pte_pfn(*pte_from
);
361 if (!pfn_valid(pfn_from
))
363 pfn_to
= pte_pfn(*pte_to
);
364 if (!pfn_valid(pfn_to
))
367 offset_from
= uaddr_from
& (PAGE_SIZE
-1);
368 offset_to
= uaddr_from
& (PAGE_SIZE
-1);
369 offset_max
= max(offset_from
, offset_to
);
370 size
= min(n
- done
, PAGE_SIZE
- offset_max
);
372 memcpy((void *)(pfn_to
<< PAGE_SHIFT
) + offset_to
,
373 (void *)(pfn_from
<< PAGE_SHIFT
) + offset_from
, size
);
379 spin_unlock(&mm
->page_table_lock
);
382 spin_unlock(&mm
->page_table_lock
);
383 if (__handle_fault(mm
, uaddr
, write_user
))
388 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
389 asm volatile("0: l %1,0(%6)\n" \
391 "2: cs %1,%2,0(%6)\n" \
395 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
396 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
398 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
399 "m" (*uaddr) : "cc" );
401 int futex_atomic_op_pt(int op
, int __user
*uaddr
, int oparg
, int *old
)
403 int oldval
= 0, newval
, ret
;
405 spin_lock(¤t
->mm
->page_table_lock
);
406 uaddr
= (int __user
*) __dat_user_addr((unsigned long) uaddr
);
408 spin_unlock(¤t
->mm
->page_table_lock
);
411 get_page(virt_to_page(uaddr
));
412 spin_unlock(¤t
->mm
->page_table_lock
);
415 __futex_atomic_op("lr %2,%5\n",
416 ret
, oldval
, newval
, uaddr
, oparg
);
419 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
420 ret
, oldval
, newval
, uaddr
, oparg
);
423 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
424 ret
, oldval
, newval
, uaddr
, oparg
);
427 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
428 ret
, oldval
, newval
, uaddr
, oparg
);
431 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
432 ret
, oldval
, newval
, uaddr
, oparg
);
437 put_page(virt_to_page(uaddr
));
442 int futex_atomic_cmpxchg_pt(int __user
*uaddr
, int oldval
, int newval
)
446 spin_lock(¤t
->mm
->page_table_lock
);
447 uaddr
= (int __user
*) __dat_user_addr((unsigned long) uaddr
);
449 spin_unlock(¤t
->mm
->page_table_lock
);
452 get_page(virt_to_page(uaddr
));
453 spin_unlock(¤t
->mm
->page_table_lock
);
454 asm volatile(" cs %1,%4,0(%5)\n"
458 : "=d" (ret
), "+d" (oldval
), "=m" (*uaddr
)
459 : "0" (-EFAULT
), "d" (newval
), "a" (uaddr
), "m" (*uaddr
)
461 put_page(virt_to_page(uaddr
));
465 struct uaccess_ops uaccess_pt
= {
466 .copy_from_user
= copy_from_user_pt
,
467 .copy_from_user_small
= copy_from_user_pt
,
468 .copy_to_user
= copy_to_user_pt
,
469 .copy_to_user_small
= copy_to_user_pt
,
470 .copy_in_user
= copy_in_user_pt
,
471 .clear_user
= clear_user_pt
,
472 .strnlen_user
= strnlen_user_pt
,
473 .strncpy_from_user
= strncpy_from_user_pt
,
474 .futex_atomic_op
= futex_atomic_op_pt
,
475 .futex_atomic_cmpxchg
= futex_atomic_cmpxchg_pt
,