2 * arch/s390/lib/uaccess_pt.c
4 * User access functions based on page table walks for enhanced
5 * system layout without hardware support.
7 * Copyright IBM Corp. 2006
8 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
11 #include <linux/errno.h>
12 #include <linux/hardirq.h>
14 #include <asm/uaccess.h>
15 #include <asm/futex.h>
18 static inline pte_t
*follow_table(struct mm_struct
*mm
, unsigned long addr
)
24 pgd
= pgd_offset(mm
, addr
);
25 if (pgd_none(*pgd
) || unlikely(pgd_bad(*pgd
)))
28 pud
= pud_offset(pgd
, addr
);
29 if (pud_none(*pud
) || unlikely(pud_bad(*pud
)))
32 pmd
= pmd_offset(pud
, addr
);
33 if (pmd_none(*pmd
) || unlikely(pmd_bad(*pmd
)))
36 return pte_offset_map(pmd
, addr
);
39 static int __handle_fault(struct mm_struct
*mm
, unsigned long address
,
42 struct vm_area_struct
*vma
;
48 down_read(&mm
->mmap_sem
);
49 vma
= find_vma(mm
, address
);
52 if (unlikely(vma
->vm_start
> address
)) {
53 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
55 if (expand_stack(vma
, address
))
60 /* page not present, check vm flags */
61 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
| VM_WRITE
)))
64 if (!(vma
->vm_flags
& VM_WRITE
))
69 fault
= handle_mm_fault(mm
, vma
, address
, write_access
? FAULT_FLAG_WRITE
: 0);
70 if (unlikely(fault
& VM_FAULT_ERROR
)) {
71 if (fault
& VM_FAULT_OOM
)
73 else if (fault
& VM_FAULT_SIGBUS
)
77 if (fault
& VM_FAULT_MAJOR
)
83 up_read(&mm
->mmap_sem
);
87 up_read(&mm
->mmap_sem
);
88 if (is_global_init(current
)) {
90 down_read(&mm
->mmap_sem
);
93 printk("VM: killing process %s\n", current
->comm
);
97 up_read(&mm
->mmap_sem
);
98 current
->thread
.prot_addr
= address
;
99 current
->thread
.trap_no
= 0x11;
100 force_sig(SIGBUS
, current
);
104 static size_t __user_copy_pt(unsigned long uaddr
, void *kptr
,
105 size_t n
, int write_user
)
107 struct mm_struct
*mm
= current
->mm
;
108 unsigned long offset
, pfn
, done
, size
;
114 spin_lock(&mm
->page_table_lock
);
116 pte
= follow_table(mm
, uaddr
);
117 if (!pte
|| !pte_present(*pte
) ||
118 (write_user
&& !pte_write(*pte
)))
123 offset
= uaddr
& (PAGE_SIZE
- 1);
124 size
= min(n
- done
, PAGE_SIZE
- offset
);
126 to
= (void *)((pfn
<< PAGE_SHIFT
) + offset
);
129 from
= (void *)((pfn
<< PAGE_SHIFT
) + offset
);
132 memcpy(to
, from
, size
);
136 spin_unlock(&mm
->page_table_lock
);
139 spin_unlock(&mm
->page_table_lock
);
140 if (__handle_fault(mm
, uaddr
, write_user
))
146 * Do DAT for user address by page table walk, return kernel address.
147 * This function needs to be called with current->mm->page_table_lock held.
149 static unsigned long __dat_user_addr(unsigned long uaddr
)
151 struct mm_struct
*mm
= current
->mm
;
152 unsigned long pfn
, ret
;
158 pte
= follow_table(mm
, uaddr
);
159 if (!pte
|| !pte_present(*pte
))
163 ret
= (pfn
<< PAGE_SHIFT
) + (uaddr
& (PAGE_SIZE
- 1));
167 spin_unlock(&mm
->page_table_lock
);
168 rc
= __handle_fault(mm
, uaddr
, 0);
169 spin_lock(&mm
->page_table_lock
);
175 size_t copy_from_user_pt(size_t n
, const void __user
*from
, void *to
)
179 if (segment_eq(get_fs(), KERNEL_DS
)) {
180 memcpy(to
, (void __kernel __force
*) from
, n
);
183 rc
= __user_copy_pt((unsigned long) from
, to
, n
, 0);
185 memset(to
+ n
- rc
, 0, rc
);
189 size_t copy_to_user_pt(size_t n
, void __user
*to
, const void *from
)
191 if (segment_eq(get_fs(), KERNEL_DS
)) {
192 memcpy((void __kernel __force
*) to
, from
, n
);
195 return __user_copy_pt((unsigned long) to
, (void *) from
, n
, 1);
198 static size_t clear_user_pt(size_t n
, void __user
*to
)
200 long done
, size
, ret
;
202 if (segment_eq(get_fs(), KERNEL_DS
)) {
203 memset((void __kernel __force
*) to
, 0, n
);
208 if (n
- done
> PAGE_SIZE
)
212 ret
= __user_copy_pt((unsigned long) to
+ done
,
213 &empty_zero_page
, size
, 1);
216 return ret
+ n
- done
;
221 static size_t strnlen_user_pt(size_t count
, const char __user
*src
)
224 unsigned long uaddr
= (unsigned long) src
;
225 struct mm_struct
*mm
= current
->mm
;
226 unsigned long offset
, pfn
, done
, len
;
230 if (segment_eq(get_fs(), KERNEL_DS
))
231 return strnlen((const char __kernel __force
*) src
, count
) + 1;
234 spin_lock(&mm
->page_table_lock
);
236 pte
= follow_table(mm
, uaddr
);
237 if (!pte
|| !pte_present(*pte
))
241 offset
= uaddr
& (PAGE_SIZE
-1);
242 addr
= (char *)(pfn
<< PAGE_SHIFT
) + offset
;
243 len
= min(count
- done
, PAGE_SIZE
- offset
);
244 len_str
= strnlen(addr
, len
);
247 } while ((len_str
== len
) && (done
< count
));
248 spin_unlock(&mm
->page_table_lock
);
251 spin_unlock(&mm
->page_table_lock
);
252 if (__handle_fault(mm
, uaddr
, 0)) {
258 static size_t strncpy_from_user_pt(size_t count
, const char __user
*src
,
261 size_t n
= strnlen_user_pt(count
, src
);
267 if (segment_eq(get_fs(), KERNEL_DS
)) {
268 memcpy(dst
, (const char __kernel __force
*) src
, n
);
269 if (dst
[n
-1] == '\0')
274 if (__user_copy_pt((unsigned long) src
, dst
, n
, 0))
276 if (dst
[n
-1] == '\0')
282 static size_t copy_in_user_pt(size_t n
, void __user
*to
,
283 const void __user
*from
)
285 struct mm_struct
*mm
= current
->mm
;
286 unsigned long offset_from
, offset_to
, offset_max
, pfn_from
, pfn_to
,
288 unsigned long uaddr_from
= (unsigned long) from
;
289 unsigned long uaddr_to
= (unsigned long) to
;
290 pte_t
*pte_from
, *pte_to
;
293 if (segment_eq(get_fs(), KERNEL_DS
)) {
294 memcpy((void __force
*) to
, (void __force
*) from
, n
);
299 spin_lock(&mm
->page_table_lock
);
301 pte_from
= follow_table(mm
, uaddr_from
);
302 if (!pte_from
|| !pte_present(*pte_from
)) {
308 pte_to
= follow_table(mm
, uaddr_to
);
309 if (!pte_to
|| !pte_present(*pte_to
) || !pte_write(*pte_to
)) {
315 pfn_from
= pte_pfn(*pte_from
);
316 pfn_to
= pte_pfn(*pte_to
);
317 offset_from
= uaddr_from
& (PAGE_SIZE
-1);
318 offset_to
= uaddr_from
& (PAGE_SIZE
-1);
319 offset_max
= max(offset_from
, offset_to
);
320 size
= min(n
- done
, PAGE_SIZE
- offset_max
);
322 memcpy((void *)(pfn_to
<< PAGE_SHIFT
) + offset_to
,
323 (void *)(pfn_from
<< PAGE_SHIFT
) + offset_from
, size
);
328 spin_unlock(&mm
->page_table_lock
);
331 spin_unlock(&mm
->page_table_lock
);
332 if (__handle_fault(mm
, uaddr
, write_user
))
337 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
338 asm volatile("0: l %1,0(%6)\n" \
340 "2: cs %1,%2,0(%6)\n" \
344 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
345 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
347 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
348 "m" (*uaddr) : "cc" );
350 static int __futex_atomic_op_pt(int op
, int __user
*uaddr
, int oparg
, int *old
)
352 int oldval
= 0, newval
, ret
;
356 __futex_atomic_op("lr %2,%5\n",
357 ret
, oldval
, newval
, uaddr
, oparg
);
360 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
361 ret
, oldval
, newval
, uaddr
, oparg
);
364 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
365 ret
, oldval
, newval
, uaddr
, oparg
);
368 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
369 ret
, oldval
, newval
, uaddr
, oparg
);
372 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
373 ret
, oldval
, newval
, uaddr
, oparg
);
383 int futex_atomic_op_pt(int op
, int __user
*uaddr
, int oparg
, int *old
)
387 if (segment_eq(get_fs(), KERNEL_DS
))
388 return __futex_atomic_op_pt(op
, uaddr
, oparg
, old
);
389 spin_lock(¤t
->mm
->page_table_lock
);
390 uaddr
= (int __user
*) __dat_user_addr((unsigned long) uaddr
);
392 spin_unlock(¤t
->mm
->page_table_lock
);
395 get_page(virt_to_page(uaddr
));
396 spin_unlock(¤t
->mm
->page_table_lock
);
397 ret
= __futex_atomic_op_pt(op
, uaddr
, oparg
, old
);
398 put_page(virt_to_page(uaddr
));
402 static int __futex_atomic_cmpxchg_pt(int __user
*uaddr
, int oldval
, int newval
)
406 asm volatile("0: cs %1,%4,0(%5)\n"
409 EX_TABLE(0b
,2b
) EX_TABLE(1b
,2b
)
410 : "=d" (ret
), "+d" (oldval
), "=m" (*uaddr
)
411 : "0" (-EFAULT
), "d" (newval
), "a" (uaddr
), "m" (*uaddr
)
416 int futex_atomic_cmpxchg_pt(int __user
*uaddr
, int oldval
, int newval
)
420 if (segment_eq(get_fs(), KERNEL_DS
))
421 return __futex_atomic_cmpxchg_pt(uaddr
, oldval
, newval
);
422 spin_lock(¤t
->mm
->page_table_lock
);
423 uaddr
= (int __user
*) __dat_user_addr((unsigned long) uaddr
);
425 spin_unlock(¤t
->mm
->page_table_lock
);
428 get_page(virt_to_page(uaddr
));
429 spin_unlock(¤t
->mm
->page_table_lock
);
430 ret
= __futex_atomic_cmpxchg_pt(uaddr
, oldval
, newval
);
431 put_page(virt_to_page(uaddr
));
435 struct uaccess_ops uaccess_pt
= {
436 .copy_from_user
= copy_from_user_pt
,
437 .copy_from_user_small
= copy_from_user_pt
,
438 .copy_to_user
= copy_to_user_pt
,
439 .copy_to_user_small
= copy_to_user_pt
,
440 .copy_in_user
= copy_in_user_pt
,
441 .clear_user
= clear_user_pt
,
442 .strnlen_user
= strnlen_user_pt
,
443 .strncpy_from_user
= strncpy_from_user_pt
,
444 .futex_atomic_op
= futex_atomic_op_pt
,
445 .futex_atomic_cmpxchg
= futex_atomic_cmpxchg_pt
,