2 * User space memory access functions
4 * Copyright (C) 1999 Niibe Yutaka
7 * MIPS implementation version 1.15 by
8 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
11 #ifndef __ASM_SH_UACCESS_H
12 #define __ASM_SH_UACCESS_H
14 #include <linux/errno.h>
15 #include <linux/sched.h>
18 #define VERIFY_WRITE 1
21 * The fs value determines whether argument validity checking should be
22 * performed or not. If get_fs() == USER_DS, checking is performed, with
23 * get_fs() == KERNEL_DS, checking is bypassed.
25 * For historical reasons (Data Segment Register?), these macros are misnamed.
28 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
30 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
31 #define USER_DS MAKE_MM_SEG(0x80000000)
33 #define get_ds() (KERNEL_DS)
34 #define get_fs() (current->addr_limit)
35 #define set_fs(x) (current->addr_limit=(x))
37 #define segment_eq(a,b) ((a).seg == (b).seg)
39 #define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
42 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
44 * sum := addr + size; carry? --> flag = true;
45 * if (sum >= addr_limit) flag = true;
47 #define __range_ok(addr,size) ({ \
48 unsigned long flag,sum; \
49 __asm__("clrt; addc %3,%1; movt %0; cmp/hi %4,%1; rotcl %0" \
50 :"=&r" (flag), "=r" (sum) \
51 :"1" (addr), "r" (size), "r" (current->addr_limit.seg)); \
54 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
55 #define __access_ok(addr,size) (__range_ok(addr,size) == 0)
57 extern inline int verify_area(int type
, const void * addr
, unsigned long size
)
59 return access_ok(type
,addr
,size
) ? 0 : -EFAULT
;
63 * Uh, these should become the main single-value transfer routines ...
64 * They automatically use the right size if we just have the right
67 * As MIPS uses the same address space for kernel and user data, we
68 * can just do these as direct assignments.
71 * (a) re-use the arguments for side effects (sizeof is ok)
72 * (b) require any knowledge of processes at this stage
74 #define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
75 #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
78 * The "__xxx" versions do not do address space checking, useful when
79 * doing multiple accesses to the same area (the user has to do the
80 * checks by hand with "access_ok()")
82 #define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
83 #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
86 * The "xxx_ret" versions return constant specified in third argument, if
87 * something bad happens. These macros can be optimized for the
88 * case of just returning from the function xxx_ret is used.
91 #define put_user_ret(x,ptr,ret) ({ \
92 if (put_user(x,ptr)) return ret; })
94 #define get_user_ret(x,ptr,ret) ({ \
95 if (get_user(x,ptr)) return ret; })
97 #define __put_user_ret(x,ptr,ret) ({ \
98 if (__put_user(x,ptr)) return ret; })
100 #define __get_user_ret(x,ptr,ret) ({ \
101 if (__get_user(x,ptr)) return ret; })
103 struct __large_struct
{ unsigned long buf
[100]; };
104 #define __m(x) (*(struct __large_struct *)(x))
106 #define __get_user_nocheck(x,ptr,size) ({ \
108 __typeof(*(ptr)) __gu_val; \
110 __asm__("":"=r" (__gu_val)); \
111 __gu_addr = (long) (ptr); \
112 __asm__("":"=r" (__gu_err)); \
114 case 1: __get_user_asm("b"); break; \
115 case 2: __get_user_asm("w"); break; \
116 case 4: __get_user_asm("l"); break; \
117 default: __get_user_unknown(); break; \
118 } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
120 #define __get_user_check(x,ptr,size) ({ \
122 __typeof__(*(ptr)) __gu_val; \
124 __asm__("":"=r" (__gu_val)); \
125 __gu_addr = (long) (ptr); \
126 __asm__("":"=r" (__gu_err)); \
127 if (__access_ok(__gu_addr,size)) { \
129 case 1: __get_user_asm("b"); break; \
130 case 2: __get_user_asm("w"); break; \
131 case 4: __get_user_asm("l"); break; \
132 default: __get_user_unknown(); break; \
133 } } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
135 #define __get_user_asm(insn) \
137 __asm__ __volatile__( \
139 "mov." insn " %2,%1\n\t" \
142 ".section .fixup,\"ax\"\n" \
150 ".section __ex_table,\"a\"\n\t" \
153 :"=&r" (__gu_err), "=&r" (__gu_val) \
154 :"m" (__m(__gu_addr)), "i" (-EFAULT)); })
156 extern void __get_user_unknown(void);
158 #define __put_user_nocheck(x,ptr,size) ({ \
160 __typeof__(*(ptr)) __pu_val; \
163 __pu_addr = (long) (ptr); \
164 __asm__("":"=r" (__pu_err)); \
166 case 1: __put_user_asm("b"); break; \
167 case 2: __put_user_asm("w"); break; \
168 case 4: __put_user_asm("l"); break; \
169 default: __put_user_unknown(); break; \
172 #define __put_user_check(x,ptr,size) ({ \
174 __typeof__(*(ptr)) __pu_val; \
177 __pu_addr = (long) (ptr); \
178 __asm__("":"=r" (__pu_err)); \
179 if (__access_ok(__pu_addr,size)) { \
181 case 1: __put_user_asm("b"); break; \
182 case 2: __put_user_asm("w"); break; \
183 case 4: __put_user_asm("l"); break; \
184 default: __put_user_unknown(); break; \
187 #define __put_user_asm(insn) \
189 __asm__ __volatile__( \
191 "mov." insn " %1,%2\n\t" \
194 ".section .fixup,\"ax\"\n" \
202 ".section __ex_table,\"a\"\n\t" \
206 :"r" (__pu_val), "m" (__m(__pu_addr)), "i" (-EFAULT)); })
208 extern void __put_user_unknown(void);
210 /* Generic arbitrary sized copy. */
211 /* XXX: should be such that: 4byte and the rest. */
212 extern __inline__ __kernel_size_t
213 __copy_user(void *__to
, const void *__from
, __kernel_size_t __n
)
215 unsigned long __dummy
, _f
, _t
;
218 __asm__
__volatile__(
227 ".section .fixup,\"ax\"\n"
235 ".section __ex_table,\"a\"\n"
240 : "=&r" (res
), "=&z" (__dummy
), "=&r" (_f
), "=&r" (_t
)
241 : "2" (__from
), "3" (__to
), "0" (__n
), "i" (-EFAULT
)
247 #define copy_to_user(to,from,n) ({ \
248 void *__copy_to = (void *) (to); \
249 __kernel_size_t __copy_size = (__kernel_size_t) (n); \
250 __kernel_size_t __copy_res; \
251 if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
252 __copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
253 } else __copy_res = __copy_size; \
256 #define copy_to_user_ret(to,from,n,retval) ({ \
257 if (copy_to_user(to,from,n)) \
261 #define __copy_to_user(to,from,n) \
262 __copy_user((void *)(to), \
265 #define __copy_to_user_ret(to,from,n,retval) ({ \
266 if (__copy_to_user(to,from,n)) \
270 #define copy_from_user(to,from,n) ({ \
271 void *__copy_to = (void *) (to); \
272 void *__copy_from = (void *) (from); \
273 __kernel_size_t __copy_size = (__kernel_size_t) (n); \
274 __kernel_size_t __copy_res; \
275 if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
276 __copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
277 } else __copy_res = __copy_size; \
280 #define copy_from_user_ret(to,from,n,retval) ({ \
281 if (copy_from_user(to,from,n)) \
285 #define __copy_from_user(to,from,n) \
286 __copy_user((void *)(to), \
289 #define __copy_from_user_ret(to,from,n,retval) ({ \
290 if (__copy_from_user(to,from,n)) \
294 /* XXX: Not sure it works well..
295 should be such that: 4byte clear and the rest. */
296 extern __inline__ __kernel_size_t
297 __clear_user(void *addr
, __kernel_size_t size
)
300 unsigned long __a
, __s
;
302 __asm__
__volatile__(
311 ".section .fixup,\"ax\"\n"
319 ".section __ex_table,\"a\"\n"
323 : "=&r" (res
), "=&r" (__a
), "=&r" (__s
)
324 : "1" (addr
), "2" (size
), "r" (0), "0" (size
), "i" (-EFAULT
));
329 #define clear_user(addr,n) ({ \
330 void * __cl_addr = (addr); \
331 unsigned long __cl_size = (n); \
332 if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
333 __cl_size = __clear_user(__cl_addr, __cl_size); \
336 extern __inline__
int
337 __strncpy_from_user(unsigned long __dest
, unsigned long __src
, int __count
)
340 unsigned long __dummy
, _d
, _s
;
342 __asm__
__volatile__(
354 ".section .fixup,\"ax\"\n"
362 ".section __ex_table,\"a\"\n"
367 : "=&r" (res
), "=&z" (__dummy
), "=&r" (_s
), "=&r" (_d
)
368 : "2" (__src
), "3" (__dest
), "r" (__count
), "0" (__count
),
375 #define strncpy_from_user(dest,src,count) ({ \
376 unsigned long __sfu_src = (unsigned long) (src); \
377 int __sfu_count = (int) (count); \
378 long __sfu_res = -EFAULT; \
379 if(__access_ok(__sfu_src, __sfu_count)) { \
380 __sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
384 * Return the size of a string (including the ending 0!)
386 extern __inline__
long __strlen_user(const char *__s
)
389 unsigned long __dummy
;
391 __asm__
__volatile__(
400 ".section .fixup,\"ax\"\n"
408 ".section __ex_table,\"a\"\n"
412 : "=&r" (res
), "=&z" (__dummy
)
413 : "0" (__s
), "r" (__s
), "i" (-EFAULT
));
417 extern __inline__
long strlen_user(const char *s
)
419 if(!access_ok(VERIFY_READ
, s
, 0))
422 return __strlen_user(s
);
425 struct exception_table_entry
427 unsigned long insn
, fixup
;
430 /* Returns 0 if exception not found and fixup.unit otherwise. */
431 extern unsigned long search_exception_table(unsigned long addr
);
433 /* Returns the new pc */
434 #define fixup_exception(map_reg, fixup_unit, pc) \
439 #endif /* __ASM_SH_UACCESS_H */