2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996, 1997, 1998, 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 #define _ASM_UACCESS_H
12 #include <linux/errno.h>
13 #include <linux/sched.h>
15 #define STR(x) __STR(x)
19 * The fs value determines whether argument validity checking should be
20 * performed or not. If get_fs() == USER_DS, checking is performed, with
21 * get_fs() == KERNEL_DS, checking is bypassed.
23 * For historical reasons, these macros are grossly misnamed.
25 #define KERNEL_DS ((mm_segment_t) { (unsigned long) 0L })
26 #define USER_DS ((mm_segment_t) { (unsigned long) -1L })
29 #define VERIFY_WRITE 1
31 #define get_fs() (current->thread.current_ds)
32 #define get_ds() (KERNEL_DS)
33 #define set_fs(x) (current->thread.current_ds=(x))
35 #define segment_eq(a,b) ((a).seg == (b).seg)
39 * Is a address valid? This does a straighforward calculation rather
43 * - "addr" doesn't have any high-bits set
44 * - AND "size" doesn't have any high-bits set
45 * - AND "addr+size" doesn't have any high-bits set
46 * - OR we are in kernel mode.
48 #define __access_ok(addr,size,mask) \
49 (((__signed__ long)((mask)&(addr | size | (addr+size)))) >= 0)
50 #define __access_mask ((long)(get_fs().seg))
52 #define access_ok(type,addr,size) \
53 __access_ok(((unsigned long)(addr)),(size),__access_mask)
55 extern inline int verify_area(int type
, const void * addr
, unsigned long size
)
57 return access_ok(type
,addr
,size
) ? 0 : -EFAULT
;
61 * Uh, these should become the main single-value transfer routines ...
62 * They automatically use the right size if we just have the right
65 * As MIPS uses the same address space for kernel and user data, we
66 * can just do these as direct assignments.
69 * (a) re-use the arguments for side effects (sizeof is ok)
70 * (b) require any knowledge of processes at this stage
72 #define put_user(x,ptr) \
73 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
74 #define get_user(x,ptr) \
75 __get_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
78 * The "__xxx" versions do not do address space checking, useful when
79 * doing multiple accesses to the same area (the user has to do the
80 * checks by hand with "access_ok()")
82 #define __put_user(x,ptr) \
83 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
84 #define __get_user(x,ptr) \
85 __get_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
87 struct __large_struct
{ unsigned long buf
[100]; };
88 #define __m(x) (*(struct __large_struct *)(x))
91 * Yuck. We need two variants, one for 64bit operation and one
92 * for 32 bit mode and old iron.
95 #define __GET_USER_DW __get_user_asm("ld")
97 #define __GET_USER_DW __get_user_asm_ll32
100 #define __get_user_nocheck(x,ptr,size) ({ \
102 __typeof(*(ptr)) __gu_val; \
104 __asm__("":"=r" (__gu_val)); \
105 __gu_addr = (long) (ptr); \
106 __asm__("":"=r" (__gu_err)); \
108 case 1: __get_user_asm("lb"); break; \
109 case 2: __get_user_asm("lh"); break; \
110 case 4: __get_user_asm("lw"); break; \
111 case 8: __GET_USER_DW; break; \
112 default: __get_user_unknown(); break; \
113 } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
115 #define __get_user_check(x,ptr,size) ({ \
117 __typeof__(*(ptr)) __gu_val; \
119 __asm__("":"=r" (__gu_val)); \
120 __gu_addr = (long) (ptr); \
121 __asm__("":"=r" (__gu_err)); \
122 if (__access_ok(__gu_addr,size,__access_mask)) { \
124 case 1: __get_user_asm("lb"); break; \
125 case 2: __get_user_asm("lh"); break; \
126 case 4: __get_user_asm("lw"); break; \
127 case 8: __GET_USER_DW; break; \
128 default: __get_user_unknown(); break; \
129 } } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
131 #define __get_user_asm(insn) \
133 __asm__ __volatile__( \
134 "1:\t" insn "\t%1,%2\n\t" \
137 ".section\t.fixup,\"ax\"\n" \
138 "3:\tli\t%0,%3\n\t" \
142 ".section\t__ex_table,\"a\"\n\t" \
145 :"=r" (__gu_err), "=r" (__gu_val) \
146 :"o" (__m(__gu_addr)), "i" (-EFAULT)); })
149 * Get a long long 64 using 32 bit registers.
151 #define __get_user_asm_ll32 \
153 __asm__ __volatile__( \
155 "2:\tlw\t%D1,%3\n\t" \
157 "3:\t.section\t.fixup,\"ax\"\n" \
158 "4:\tli\t%0,%4\n\t" \
163 ".section\t__ex_table,\"a\"\n\t" \
167 :"=r" (__gu_err), "=&r" (__gu_val) \
168 :"o" (__m(__gu_addr)), "o" (__m(__gu_addr + 4)), \
171 extern void __get_user_unknown(void);
174 * Yuck. We need two variants, one for 64bit operation and one
175 * for 32 bit mode and old iron.
178 #define __PUT_USER_DW __put_user_asm("sd")
180 #define __PUT_USER_DW __put_user_asm_ll32
183 #define __put_user_nocheck(x,ptr,size) ({ \
185 __typeof__(*(ptr)) __pu_val; \
188 __pu_addr = (long) (ptr); \
189 __asm__("":"=r" (__pu_err)); \
191 case 1: __put_user_asm("sb"); break; \
192 case 2: __put_user_asm("sh"); break; \
193 case 4: __put_user_asm("sw"); break; \
194 case 8: __PUT_USER_DW; break; \
195 default: __put_user_unknown(); break; \
198 #define __put_user_check(x,ptr,size) ({ \
200 __typeof__(*(ptr)) __pu_val; \
203 __pu_addr = (long) (ptr); \
204 __asm__("":"=r" (__pu_err)); \
205 if (__access_ok(__pu_addr,size,__access_mask)) { \
207 case 1: __put_user_asm("sb"); break; \
208 case 2: __put_user_asm("sh"); break; \
209 case 4: __put_user_asm("sw"); break; \
210 case 8: __PUT_USER_DW; break; \
211 default: __put_user_unknown(); break; \
214 #define __put_user_asm(insn) \
216 __asm__ __volatile__( \
217 "1:\t" insn "\t%1,%2\n\t" \
220 ".section\t.fixup,\"ax\"\n" \
221 "3:\tli\t%0,%3\n\t" \
224 ".section\t__ex_table,\"a\"\n\t" \
228 :"r" (__pu_val), "o" (__m(__pu_addr)), "i" (-EFAULT)); })
230 #define __put_user_asm_ll32 \
232 __asm__ __volatile__( \
233 "1:\tsw\t%1,%2\n\t" \
237 ".section\t.fixup,\"ax\"\n" \
238 "4:\tli\t%0,%4\n\t" \
241 ".section\t__ex_table,\"a\"\n\t" \
246 :"r" (__pu_val), "o" (__m(__pu_addr)), "o" (__m(__pu_addr + 4)), \
249 extern void __put_user_unknown(void);
252 * We're generating jump to subroutines which will be outside the range of
256 #define __MODULE_JAL(destination) \
258 "la\t$1, " #destination "\n\t" \
262 #define __MODULE_JAL(destination) \
263 "jal\t" #destination "\n\t"
266 extern size_t __copy_user(void *__to
, const void *__from
, size_t __n
);
268 #define __copy_to_user(to,from,n) ({ \
270 const void *__cu_from; \
274 __cu_from = (from); \
276 __asm__ __volatile__( \
280 __MODULE_JAL(__copy_user) \
283 : "r" (__cu_to), "r" (__cu_from), "r" (__cu_len) \
284 : "$4", "$5", "$6", "$8", "$9", "$10", "$11", "$12", "$15", \
285 "$24", "$31","memory"); \
289 #define __copy_from_user(to,from,n) ({ \
291 const void *__cu_from; \
295 __cu_from = (from); \
297 __asm__ __volatile__( \
301 ".set\tnoreorder\n\t" \
302 __MODULE_JAL(__copy_user) \
304 "addu\t$1, %2, %3\n\t" \
306 ".set\treorder\n\t" \
309 : "r" (__cu_to), "r" (__cu_from), "r" (__cu_len) \
310 : "$4", "$5", "$6", "$8", "$9", "$10", "$11", "$12", "$15", \
311 "$24", "$31","memory"); \
315 #define copy_to_user(to,from,n) ({ \
317 const void *__cu_from; \
321 __cu_from = (from); \
323 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) \
324 __asm__ __volatile__( \
328 __MODULE_JAL(__copy_user) \
331 : "r" (__cu_to), "r" (__cu_from), "r" (__cu_len) \
332 : "$4", "$5", "$6", "$8", "$9", "$10", "$11", "$12", \
333 "$15", "$24", "$31","memory"); \
337 #define copy_from_user(to,from,n) ({ \
339 const void *__cu_from; \
343 __cu_from = (from); \
345 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) \
346 __asm__ __volatile__( \
350 ".set\tnoreorder\n\t" \
351 __MODULE_JAL(__copy_user) \
353 "addu\t$1, %2, %3\n\t" \
355 ".set\treorder\n\t" \
358 : "r" (__cu_to), "r" (__cu_from), "r" (__cu_len) \
359 : "$4", "$5", "$6", "$8", "$9", "$10", "$11", "$12", \
360 "$15", "$24", "$31","memory"); \
364 extern inline __kernel_size_t
365 __clear_user(void *addr
, __kernel_size_t size
)
369 __asm__
__volatile__(
373 __MODULE_JAL(__bzero
)
376 : "r" (addr
), "r" (size
)
377 : "$4", "$5", "$6", "$8", "$9", "$31");
382 #define clear_user(addr,n) ({ \
383 void * __cl_addr = (addr); \
384 unsigned long __cl_size = (n); \
385 if (__cl_size && access_ok(VERIFY_WRITE, ((unsigned long)(__cl_addr)), __cl_size)) \
386 __cl_size = __clear_user(__cl_addr, __cl_size); \
390 * Returns: -EFAULT if exception before terminator, N if the entire
391 * buffer filled, else strlen.
394 __strncpy_from_user(char *__to
, const char *__from
, long __len
)
398 __asm__
__volatile__(
402 __MODULE_JAL(__strncpy_from_user_nocheck_asm
)
405 : "r" (__to
), "r" (__from
), "r" (__len
)
406 : "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
412 strncpy_from_user(char *__to
, const char *__from
, long __len
)
416 __asm__
__volatile__(
420 __MODULE_JAL(__strncpy_from_user_asm
)
423 : "r" (__to
), "r" (__from
), "r" (__len
)
424 : "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
429 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
430 extern inline long __strlen_user(const char *s
)
434 __asm__
__volatile__(
436 __MODULE_JAL(__strlen_user_nocheck_asm
)
440 : "$2", "$4", "$8", "$31");
445 extern inline long strlen_user(const char *s
)
449 __asm__
__volatile__(
451 __MODULE_JAL(__strlen_user_asm
)
455 : "$2", "$4", "$8", "$31");
460 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
461 extern inline long __strnlen_user(const char *s
, long n
)
465 __asm__
__volatile__(
468 __MODULE_JAL(__strnlen_user_nocheck_asm
)
472 : "$2", "$4", "$5", "$8", "$31");
477 extern inline long strnlen_user(const char *s
, long n
)
481 __asm__
__volatile__(
484 __MODULE_JAL(__strnlen_user_asm
)
488 : "$2", "$4", "$5", "$8", "$31");
493 struct exception_table_entry
496 unsigned long nextinsn
;
499 /* Returns 0 if exception not found and fixup.unit otherwise. */
500 extern unsigned long search_exception_table(unsigned long addr
);
502 /* Returns the new pc */
503 #define fixup_exception(map_reg, fixup_unit, pc) \
508 #endif /* _ASM_UACCESS_H */