2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996, 1997, 1998, 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 #define _ASM_UACCESS_H
12 #include <linux/errno.h>
13 #include <linux/thread_info.h>
15 #define STR(x) __STR(x)
19 * The fs value determines whether argument validity checking should be
20 * performed or not. If get_fs() == USER_DS, checking is performed, with
21 * get_fs() == KERNEL_DS, checking is bypassed.
23 * For historical reasons, these macros are grossly misnamed.
25 #define KERNEL_DS ((mm_segment_t) { (unsigned long) 0L })
26 #define USER_DS ((mm_segment_t) { (unsigned long) -1L })
29 #define VERIFY_WRITE 1
31 #define get_ds() (KERNEL_DS)
32 #define get_fs() (current_thread_info()->addr_limit)
33 #define set_fs(x) (current_thread_info()->addr_limit = (x))
35 #define segment_eq(a,b) ((a).seg == (b).seg)
39 * Is a address valid? This does a straighforward calculation rather
43 * - "addr" doesn't have any high-bits set
44 * - AND "size" doesn't have any high-bits set
45 * - AND "addr+size" doesn't have any high-bits set
46 * - OR we are in kernel mode.
48 #define __ua_size(size) \
49 (__builtin_constant_p(size) && (signed long) (size) > 0 ? 0 : (size))
51 #define __access_ok(addr,size,mask) \
52 (((signed long)((mask)&(addr | (addr + size) | __ua_size(size)))) >= 0)
54 #define __access_mask ((long)(get_fs().seg))
56 #define access_ok(type,addr,size) \
57 __access_ok(((unsigned long)(addr)),(size),__access_mask)
59 static inline int verify_area(int type
, const void * addr
, unsigned long size
)
61 return access_ok(type
,addr
,size
) ? 0 : -EFAULT
;
65 * Uh, these should become the main single-value transfer routines ...
66 * They automatically use the right size if we just have the right
69 * As MIPS uses the same address space for kernel and user data, we
70 * can just do these as direct assignments.
73 * (a) re-use the arguments for side effects (sizeof is ok)
74 * (b) require any knowledge of processes at this stage
76 #define put_user(x,ptr) \
77 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
78 #define get_user(x,ptr) \
79 __get_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
82 * The "__xxx" versions do not do address space checking, useful when
83 * doing multiple accesses to the same area (the user has to do the
84 * checks by hand with "access_ok()")
86 #define __put_user(x,ptr) \
87 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
88 #define __get_user(x,ptr) \
89 __get_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
91 struct __large_struct
{ unsigned long buf
[100]; };
92 #define __m(x) (*(struct __large_struct *)(x))
95 * Yuck. We need two variants, one for 64bit operation and one
96 * for 32 bit mode and old iron.
99 #define __GET_USER_DW __get_user_asm("ld")
101 #define __GET_USER_DW __get_user_asm_ll32
104 #define __get_user_nocheck(x,ptr,size) ({ \
106 __typeof(*(ptr)) __gu_val; \
108 __asm__("":"=r" (__gu_val)); \
109 __gu_addr = (long) (ptr); \
110 __asm__("":"=r" (__gu_err)); \
112 case 1: __get_user_asm("lb"); break; \
113 case 2: __get_user_asm("lh"); break; \
114 case 4: __get_user_asm("lw"); break; \
115 case 8: __GET_USER_DW; break; \
116 default: __get_user_unknown(); break; \
117 } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
119 #define __get_user_check(x,ptr,size) ({ \
121 __typeof__(*(ptr)) __gu_val; \
123 __asm__("":"=r" (__gu_val)); \
124 __gu_addr = (long) (ptr); \
125 __asm__("":"=r" (__gu_err)); \
126 if (__access_ok(__gu_addr,size,__access_mask)) { \
128 case 1: __get_user_asm("lb"); break; \
129 case 2: __get_user_asm("lh"); break; \
130 case 4: __get_user_asm("lw"); break; \
131 case 8: __GET_USER_DW; break; \
132 default: __get_user_unknown(); break; \
133 } } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
135 #define __get_user_asm(insn) \
137 __asm__ __volatile__( \
138 "1:\t" insn "\t%1,%2\n\t" \
141 ".section\t.fixup,\"ax\"\n" \
142 "3:\tli\t%0,%3\n\t" \
146 ".section\t__ex_table,\"a\"\n\t" \
149 :"=r" (__gu_err), "=r" (__gu_val) \
150 :"o" (__m(__gu_addr)), "i" (-EFAULT)); })
153 * Get a long long 64 using 32 bit registers.
155 #define __get_user_asm_ll32 \
157 __asm__ __volatile__( \
159 "2:\tlw\t%D1,%3\n\t" \
161 "3:\t.section\t.fixup,\"ax\"\n" \
162 "4:\tli\t%0,%4\n\t" \
167 ".section\t__ex_table,\"a\"\n\t" \
171 :"=r" (__gu_err), "=&r" (__gu_val) \
172 :"o" (__m(__gu_addr)), "o" (__m(__gu_addr + 4)), \
175 extern void __get_user_unknown(void);
178 * Yuck. We need two variants, one for 64bit operation and one
179 * for 32 bit mode and old iron.
182 #define __PUT_USER_DW __put_user_asm("sd")
184 #define __PUT_USER_DW __put_user_asm_ll32
187 #define __put_user_nocheck(x,ptr,size) ({ \
189 __typeof__(*(ptr)) __pu_val; \
192 __pu_addr = (long) (ptr); \
193 __asm__("":"=r" (__pu_err)); \
195 case 1: __put_user_asm("sb"); break; \
196 case 2: __put_user_asm("sh"); break; \
197 case 4: __put_user_asm("sw"); break; \
198 case 8: __PUT_USER_DW; break; \
199 default: __put_user_unknown(); break; \
202 #define __put_user_check(x,ptr,size) ({ \
204 __typeof__(*(ptr)) __pu_val; \
207 __pu_addr = (long) (ptr); \
208 __asm__("":"=r" (__pu_err)); \
209 if (__access_ok(__pu_addr,size,__access_mask)) { \
211 case 1: __put_user_asm("sb"); break; \
212 case 2: __put_user_asm("sh"); break; \
213 case 4: __put_user_asm("sw"); break; \
214 case 8: __PUT_USER_DW; break; \
215 default: __put_user_unknown(); break; \
218 #define __put_user_asm(insn) \
220 __asm__ __volatile__( \
221 "1:\t" insn "\t%z1, %2\t\t\t# __put_user_asm\n\t" \
224 ".section\t.fixup,\"ax\"\n" \
225 "3:\tli\t%0,%3\n\t" \
228 ".section\t__ex_table,\"a\"\n\t" \
232 :"Jr" (__pu_val), "o" (__m(__pu_addr)), "i" (-EFAULT)); })
234 #define __put_user_asm_ll32 \
236 __asm__ __volatile__( \
237 "1:\tsw\t%1, %2\t\t\t# __put_user_asm_ll32\n\t" \
238 "2:\tsw\t%D1, %3\n" \
241 ".section\t.fixup,\"ax\"\n" \
242 "4:\tli\t%0,%4\n\t" \
245 ".section\t__ex_table,\"a\"\n\t" \
250 :"r" (__pu_val), "o" (__m(__pu_addr)), "o" (__m(__pu_addr + 4)), \
253 extern void __put_user_unknown(void);
256 * We're generating jump to subroutines which will be outside the range of
260 #define __MODULE_JAL(destination) \
262 "la\t$1, " #destination "\n\t" \
266 #define __MODULE_JAL(destination) \
267 "jal\t" #destination "\n\t"
270 extern size_t __copy_user(void *__to
, const void *__from
, size_t __n
);
272 #define __invoke_copy_to_user(to,from,n) ({ \
273 register void *__cu_to_r __asm__ ("$4"); \
274 register const void *__cu_from_r __asm__ ("$5"); \
275 register long __cu_len_r __asm__ ("$6"); \
278 __cu_from_r = (from); \
280 __asm__ __volatile__( \
281 __MODULE_JAL(__copy_user) \
282 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
284 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
289 #define __copy_to_user(to,from,n) ({ \
291 const void *__cu_from; \
295 __cu_from = (from); \
297 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
301 #define copy_to_user(to,from,n) ({ \
303 const void *__cu_from; \
307 __cu_from = (from); \
309 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) \
310 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
315 #define __invoke_copy_from_user(to,from,n) ({ \
316 register void *__cu_to_r __asm__ ("$4"); \
317 register const void *__cu_from_r __asm__ ("$5"); \
318 register long __cu_len_r __asm__ ("$6"); \
321 __cu_from_r = (from); \
323 __asm__ __volatile__( \
324 ".set\tnoreorder\n\t" \
325 __MODULE_JAL(__copy_user) \
327 "addu\t$1, %1, %2\n\t" \
329 ".set\treorder\n\t" \
330 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
332 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
337 #define __copy_from_user(to,from,n) ({ \
339 const void *__cu_from; \
343 __cu_from = (from); \
345 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
350 #define copy_from_user(to,from,n) ({ \
352 const void *__cu_from; \
356 __cu_from = (from); \
358 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) \
359 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
364 static inline __kernel_size_t
365 __clear_user(void *addr
, __kernel_size_t size
)
369 __asm__
__volatile__(
373 __MODULE_JAL(__bzero
)
376 : "r" (addr
), "r" (size
)
377 : "$4", "$5", "$6", "$8", "$9", "$31");
382 #define clear_user(addr,n) ({ \
383 void * __cl_addr = (addr); \
384 unsigned long __cl_size = (n); \
385 if (__cl_size && access_ok(VERIFY_WRITE, ((unsigned long)(__cl_addr)), __cl_size)) \
386 __cl_size = __clear_user(__cl_addr, __cl_size); \
390 * Returns: -EFAULT if exception before terminator, N if the entire
391 * buffer filled, else strlen.
394 __strncpy_from_user(char *__to
, const char *__from
, long __len
)
398 __asm__
__volatile__(
402 __MODULE_JAL(__strncpy_from_user_nocheck_asm
)
405 : "r" (__to
), "r" (__from
), "r" (__len
)
406 : "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
412 strncpy_from_user(char *__to
, const char *__from
, long __len
)
416 __asm__
__volatile__(
420 __MODULE_JAL(__strncpy_from_user_asm
)
423 : "r" (__to
), "r" (__from
), "r" (__len
)
424 : "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
429 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
430 static inline long __strlen_user(const char *s
)
434 __asm__
__volatile__(
436 __MODULE_JAL(__strlen_user_nocheck_asm
)
440 : "$2", "$4", "$8", "$31");
445 static inline long strlen_user(const char *s
)
449 __asm__
__volatile__(
451 __MODULE_JAL(__strlen_user_asm
)
455 : "$2", "$4", "$8", "$31");
460 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
461 static inline long __strnlen_user(const char *s
, long n
)
465 __asm__
__volatile__(
468 __MODULE_JAL(__strnlen_user_nocheck_asm
)
472 : "$2", "$4", "$5", "$8", "$31");
477 static inline long strnlen_user(const char *s
, long n
)
481 __asm__
__volatile__(
484 __MODULE_JAL(__strnlen_user_asm
)
488 : "$2", "$4", "$5", "$8", "$31");
493 struct exception_table_entry
496 unsigned long nextinsn
;
499 #endif /* _ASM_UACCESS_H */