1 /* $Id: uaccess.h,v 1.11 2003/10/13 07:21:20 lethal Exp $
3 * User space memory access functions
5 * Copyright (C) 1999, 2002 Niibe Yutaka
6 * Copyright (C) 2003 Paul Mundt
9 * MIPS implementation version 1.15 by
10 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
13 #ifndef __ASM_SH_UACCESS_H
14 #define __ASM_SH_UACCESS_H
16 #include <linux/errno.h>
17 #include <linux/sched.h>
20 * NOTE: Macro/functions in this file depends on threads_info.h implementation.
24 * USER_ADDR_LIMIT == 0x80000000
28 #define VERIFY_WRITE 1
31 unsigned int is_user_space
;
35 * The fs value determines whether argument validity checking should be
36 * performed or not. If get_fs() == USER_DS, checking is performed, with
37 * get_fs() == KERNEL_DS, checking is bypassed.
39 * For historical reasons (Data Segment Register?), these macros are misnamed.
42 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
43 #define segment_eq(a,b) ((a).is_user_space == (b).is_user_space)
45 #define USER_ADDR_LIMIT 0x80000000
47 #define KERNEL_DS MAKE_MM_SEG(0)
48 #define USER_DS MAKE_MM_SEG(1)
50 #define get_ds() (KERNEL_DS)
52 #if !defined(CONFIG_MMU)
53 static inline mm_segment_t
get_fs(void)
58 static inline void set_fs(mm_segment_t s
)
63 * __access_ok: Check if address with size is OK or not.
65 * If we don't have an MMU (or if its disabled) the only thing we really have
66 * to look out for is if the address resides somewhere outside of what
67 * available RAM we have.
69 * TODO: This check could probably also stand to be restricted somewhat more..
70 * though it still does the Right Thing(tm) for the time being.
72 static inline int __access_ok(unsigned long addr
, unsigned long size
)
74 extern unsigned long memory_start
, memory_end
;
76 return ((addr
>= memory_start
) && ((addr
+ size
) < memory_end
));
78 #else /* CONFIG_MMU */
79 static inline mm_segment_t
get_fs(void)
81 return MAKE_MM_SEG(test_thread_flag(TIF_USERSPACE
));
84 static inline void set_fs(mm_segment_t s
)
86 unsigned long ti
, flag
;
89 "mov.l @(8,%0), %1\n\t"
94 : "=&r" (ti
), "=&r" (flag
)
95 : "r" (s
.is_user_space
)
99 set_thread_flag(TIF_USERSPACE);
101 clear_thread_flag(TIF_USERSPACE);
106 * __access_ok: Check if address with size is OK or not.
108 * We do three checks:
109 * (1) is it user space?
110 * (2) addr + size --> carry?
111 * (3) addr + size >= 0x80000000 (USER_ADDR_LIMIT)
113 * (1) (2) (3) | RESULT
123 static inline int __access_ok(unsigned long addr
, unsigned long size
)
125 unsigned long flag
, tmp
;
127 __asm__("stc r7_bank, %0\n\t"
128 "mov.l @(8,%0), %0\n\t"
135 : "=&z" (flag
), "=r" (tmp
)
136 : "r" (addr
), "1" (size
)
141 #endif /* CONFIG_MMU */
143 static inline int access_ok(int type
, const void __user
*p
, unsigned long size
)
145 unsigned long addr
= (unsigned long)p
;
146 return __access_ok(addr
, size
);
149 static inline int verify_area(int type
, const void __user
* addr
, unsigned long size
)
151 return access_ok(type
,addr
,size
) ? 0 : -EFAULT
;
155 * Uh, these should become the main single-value transfer routines ...
156 * They automatically use the right size if we just have the right
159 * As SuperH uses the same address space for kernel and user data, we
160 * can just do these as direct assignments.
163 * (a) re-use the arguments for side effects (sizeof is ok)
164 * (b) require any knowledge of processes at this stage
166 #define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
167 #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
170 * The "__xxx" versions do not do address space checking, useful when
171 * doing multiple accesses to the same area (the user has to do the
172 * checks by hand with "access_ok()")
174 #define __put_user(x,ptr) \
175 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
176 #define __get_user(x,ptr) \
177 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
179 struct __large_struct
{ unsigned long buf
[100]; };
180 #define __m(x) (*(struct __large_struct *)(x))
182 #define __get_user_size(x,ptr,size,retval) \
187 __get_user_asm(x, ptr, retval, "b"); \
190 __get_user_asm(x, ptr, retval, "w"); \
193 __get_user_asm(x, ptr, retval, "l"); \
196 __get_user_unknown(); \
201 #define __get_user_nocheck(x,ptr,size) \
203 long __gu_err, __gu_val; \
204 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
205 (x) = (__typeof__(*(ptr)))__gu_val; \
209 #define __get_user_check(x,ptr,size) \
211 long __gu_err, __gu_val; \
214 __get_user_1(__gu_val, (ptr), __gu_err); \
217 __get_user_2(__gu_val, (ptr), __gu_err); \
220 __get_user_4(__gu_val, (ptr), __gu_err); \
223 __get_user_unknown(); \
227 (x) = (__typeof__(*(ptr)))__gu_val; \
231 #define __get_user_1(x,addr,err) ({ \
232 __asm__("stc r7_bank, %1\n\t" \
233 "mov.l @(8,%1), %1\n\t" \
243 "mov.b @%2, %1\n\t" \
246 ".section __ex_table,\"a\"\n\t" \
249 : "=&r" (err), "=&r" (x) \
254 #define __get_user_2(x,addr,err) ({ \
255 __asm__("stc r7_bank, %1\n\t" \
256 "mov.l @(8,%1), %1\n\t" \
266 "mov.w @%2, %1\n\t" \
269 ".section __ex_table,\"a\"\n\t" \
272 : "=&r" (err), "=&r" (x) \
277 #define __get_user_4(x,addr,err) ({ \
278 __asm__("stc r7_bank, %1\n\t" \
279 "mov.l @(8,%1), %1\n\t" \
289 "mov.l @%2, %1\n\t" \
291 ".section __ex_table,\"a\"\n\t" \
294 : "=&r" (err), "=&r" (x) \
299 #define __get_user_asm(x, addr, err, insn) \
301 __asm__ __volatile__( \
303 "mov." insn " %2, %1\n\t" \
306 ".section .fixup,\"ax\"\n" \
314 ".section __ex_table,\"a\"\n\t" \
317 :"=&r" (err), "=&r" (x) \
318 :"m" (__m(addr)), "i" (-EFAULT)); })
320 extern void __get_user_unknown(void);
322 #define __put_user_size(x,ptr,size,retval) \
327 __put_user_asm(x, ptr, retval, "b"); \
330 __put_user_asm(x, ptr, retval, "w"); \
333 __put_user_asm(x, ptr, retval, "l"); \
336 __put_user_u64(x, ptr, retval); \
339 __put_user_unknown(); \
343 #define __put_user_nocheck(x,ptr,size) \
346 __put_user_size((x),(ptr),(size),__pu_err); \
350 #define __put_user_check(x,ptr,size) \
352 long __pu_err = -EFAULT; \
353 __typeof__(*(ptr)) *__pu_addr = (ptr); \
355 if (__access_ok((unsigned long)__pu_addr,size)) \
356 __put_user_size((x),__pu_addr,(size),__pu_err); \
360 #define __put_user_asm(x, addr, err, insn) \
362 __asm__ __volatile__( \
364 "mov." insn " %1, %2\n\t" \
367 ".section .fixup,\"ax\"\n" \
375 ".section __ex_table,\"a\"\n\t" \
379 :"r" (x), "m" (__m(addr)), "i" (-EFAULT) \
382 #if defined(__LITTLE_ENDIAN__)
383 #define __put_user_u64(val,addr,retval) \
385 __asm__ __volatile__( \
388 "mov.l %S1,%T2\n\t" \
391 ".section .fixup,\"ax\"\n" \
399 ".section __ex_table,\"a\"\n\t" \
403 : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \
406 #define __put_user_u64(val,addr,retval) \
408 __asm__ __volatile__( \
411 "mov.l %R1,%T2\n\t" \
414 ".section .fixup,\"ax\"\n" \
422 ".section __ex_table,\"a\"\n\t" \
426 : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \
430 extern void __put_user_unknown(void);
432 /* Generic arbitrary sized copy. */
433 /* Return the number of bytes NOT copied */
434 extern __kernel_size_t
__copy_user(void *to
, const void *from
, __kernel_size_t n
);
436 #define copy_to_user(to,from,n) ({ \
437 void *__copy_to = (void *) (to); \
438 __kernel_size_t __copy_size = (__kernel_size_t) (n); \
439 __kernel_size_t __copy_res; \
440 if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
441 __copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
442 } else __copy_res = __copy_size; \
445 #define __copy_to_user(to,from,n) \
446 __copy_user((void *)(to), \
449 #define __copy_to_user_inatomic __copy_to_user
450 #define __copy_from_user_inatomic __copy_from_user
453 #define copy_from_user(to,from,n) ({ \
454 void *__copy_to = (void *) (to); \
455 void *__copy_from = (void *) (from); \
456 __kernel_size_t __copy_size = (__kernel_size_t) (n); \
457 __kernel_size_t __copy_res; \
458 if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
459 __copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
460 } else __copy_res = __copy_size; \
463 #define __copy_from_user(to,from,n) \
464 __copy_user((void *)(to), \
468 * Clear the area and return remaining number of bytes
469 * (on failure. Usually it's 0.)
471 extern __kernel_size_t
__clear_user(void *addr
, __kernel_size_t size
);
473 #define clear_user(addr,n) ({ \
474 void * __cl_addr = (addr); \
475 unsigned long __cl_size = (n); \
476 if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
477 __cl_size = __clear_user(__cl_addr, __cl_size); \
480 static __inline__
int
481 __strncpy_from_user(unsigned long __dest
, unsigned long __user __src
, int __count
)
484 unsigned long __dummy
, _d
, _s
;
486 __asm__
__volatile__(
499 ".section .fixup,\"ax\"\n"
507 ".section __ex_table,\"a\"\n"
511 : "=r" (res
), "=&z" (__dummy
), "=r" (_s
), "=r" (_d
)
512 : "0" (__count
), "2" (__src
), "3" (__dest
), "r" (__count
),
519 #define strncpy_from_user(dest,src,count) ({ \
520 unsigned long __sfu_src = (unsigned long) (src); \
521 int __sfu_count = (int) (count); \
522 long __sfu_res = -EFAULT; \
523 if(__access_ok(__sfu_src, __sfu_count)) { \
524 __sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
528 * Return the size of a string (including the ending 0!)
530 static __inline__
long __strnlen_user(const char __user
*__s
, long __n
)
533 unsigned long __dummy
;
535 __asm__
__volatile__(
540 "mov.b @(%0,%3), %1\n\t"
545 ".section .fixup,\"ax\"\n"
553 ".section __ex_table,\"a\"\n"
557 : "=z" (res
), "=&r" (__dummy
)
558 : "0" (0), "r" (__s
), "r" (__n
), "i" (-EFAULT
)
563 static __inline__
long strnlen_user(const char __user
*s
, long n
)
565 if (!access_ok(VERIFY_READ
, s
, n
))
568 return __strnlen_user(s
, n
);
571 static __inline__
long strlen_user(const char __user
*s
)
573 if (!access_ok(VERIFY_READ
, s
, 0))
576 return __strnlen_user(s
, ~0UL >> 1);
580 * The exception table consists of pairs of addresses: the first is the
581 * address of an instruction that is allowed to fault, and the second is
582 * the address at which the program should continue. No registers are
583 * modified, so it is entirely up to the continuation code to figure out
586 * All the routines below use bits of fixup code that are out of line
587 * with the main instruction path. This means when everything is well,
588 * we don't even have to jump over them. Further, they do not intrude
589 * on our cache or tlb entries.
592 struct exception_table_entry
594 unsigned long insn
, fixup
;
597 extern int fixup_exception(struct pt_regs
*regs
);
599 #endif /* __ASM_SH_UACCESS_H */