1 #ifndef __i386_UACCESS_H
2 #define __i386_UACCESS_H
5 * User space memory access functions
7 #include <linux/sched.h>
11 extern void __check_locks(unsigned int);
13 #define __check_locks(x) do { } while (0)
17 #define VERIFY_WRITE 1
20 * The fs value determines whether argument validity checking should be
21 * performed or not. If get_fs() == USER_DS, checking is performed, with
22 * get_fs() == KERNEL_DS, checking is bypassed.
24 * For historical reasons, these macros are grossly misnamed.
27 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
30 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
31 #define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
33 #define get_ds() (KERNEL_DS)
34 #define get_fs() (current->addr_limit)
35 #define set_fs(x) (current->addr_limit = (x))
37 #define segment_eq(a,b) ((a).seg == (b).seg)
39 extern int __verify_write(const void *, unsigned long);
41 #define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
44 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
46 #define __range_ok(addr,size) ({ \
47 unsigned long flag,sum; \
48 asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
49 :"=&r" (flag), "=r" (sum) \
50 :"1" (addr),"g" (size),"g" (current->addr_limit.seg)); \
55 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
59 #define access_ok(type,addr,size) ( (__range_ok(addr,size) == 0) && \
60 ((type) == VERIFY_READ || boot_cpu_data.wp_works_ok || \
61 segment_eq(get_fs(),KERNEL_DS) || \
62 __verify_write((void *)(addr),(size))))
66 extern inline int verify_area(int type
, const void * addr
, unsigned long size
)
68 return access_ok(type
,addr
,size
) ? 0 : -EFAULT
;
73 * The exception table consists of pairs of addresses: the first is the
74 * address of an instruction that is allowed to fault, and the second is
75 * the address at which the program should continue. No registers are
76 * modified, so it is entirely up to the continuation code to figure out
79 * All the routines below use bits of fixup code that are out of line
80 * with the main instruction path. This means when everything is well,
81 * we don't even have to jump over them. Further, they do not intrude
82 * on our cache or tlb entries.
85 struct exception_table_entry
87 unsigned long insn
, fixup
;
90 /* Returns 0 if exception not found and fixup otherwise. */
91 extern unsigned long search_exception_table(unsigned long);
95 * These are the main single-value transfer routines. They automatically
96 * use the right size if we just have the right pointer type.
98 * This gets kind of ugly. We want to return _two_ values in "get_user()"
99 * and yet we don't want to do any pointers, because that is too much
100 * of a performance impact. Thus we have a few rather ugly macros here,
101 * and hide all the uglyness from the user.
103 * The "__xxx" versions of the user access functions are versions that
104 * do not verify the address space, that must have been done previously
105 * with a separate "access_ok()" call (this is used when we do multiple
106 * accesses to the same area of user memory).
109 extern void __get_user_1(void);
110 extern void __get_user_2(void);
111 extern void __get_user_4(void);
113 #define __get_user_x(size,ret,x,ptr) \
114 __asm__ __volatile__("call __get_user_" #size \
115 :"=a" (ret),"=d" (x) \
118 /* Careful: we have to cast the result to the type of the pointer for sign reasons */
119 #define get_user(x,ptr) \
120 ({ int __ret_gu,__val_gu; \
122 switch(sizeof (*(ptr))) { \
123 case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
124 case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
125 case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
126 default: __get_user_x(X,__ret_gu,__val_gu,ptr); break; \
128 (x) = (__typeof__(*(ptr)))__val_gu; \
132 extern void __put_user_1(void);
133 extern void __put_user_2(void);
134 extern void __put_user_4(void);
136 extern void __put_user_bad(void);
138 #define __put_user_x(size,ret,x,ptr) \
139 __asm__ __volatile__("call __put_user_" #size \
144 #define put_user(x,ptr) \
147 switch(sizeof (*(ptr))) { \
148 case 1: __put_user_x(1,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \
149 case 2: __put_user_x(2,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \
150 case 4: __put_user_x(4,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \
151 default: __put_user_x(X,__ret_pu,x,ptr); break; \
156 #define __get_user(x,ptr) \
157 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
158 #define __put_user(x,ptr) \
159 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
161 #define __put_user_nocheck(x,ptr,size) \
165 __put_user_size((x),(ptr),(size),__pu_err); \
169 #define __put_user_size(x,ptr,size,retval) \
173 case 1: __put_user_asm(x,ptr,retval,"b","b","iq"); break; \
174 case 2: __put_user_asm(x,ptr,retval,"w","w","ir"); break; \
175 case 4: __put_user_asm(x,ptr,retval,"l","","ir"); break; \
176 default: __put_user_bad(); \
180 struct __large_struct
{ unsigned long buf
[100]; };
181 #define __m(x) (*(struct __large_struct *)(x))
184 * Tell gcc we read from memory instead of writing: this is because
185 * we do not write to any memory gcc knows about, so there are no
188 #define __put_user_asm(x, addr, err, itype, rtype, ltype) \
189 __asm__ __volatile__( \
190 "1: mov"itype" %"rtype"1,%2\n" \
192 ".section .fixup,\"ax\"\n" \
196 ".section __ex_table,\"a\"\n" \
201 : ltype (x), "m"(__m(addr)), "i"(-EFAULT), "0"(err))
204 #define __get_user_nocheck(x,ptr,size) \
206 long __gu_err, __gu_val; \
208 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
209 (x) = (__typeof__(*(ptr)))__gu_val; \
213 extern long __get_user_bad(void);
215 #define __get_user_size(x,ptr,size,retval) \
219 case 1: __get_user_asm(x,ptr,retval,"b","b","=q"); break; \
220 case 2: __get_user_asm(x,ptr,retval,"w","w","=r"); break; \
221 case 4: __get_user_asm(x,ptr,retval,"l","","=r"); break; \
222 default: (x) = __get_user_bad(); \
226 #define __get_user_asm(x, addr, err, itype, rtype, ltype) \
227 __asm__ __volatile__( \
228 "1: mov"itype" %2,%"rtype"1\n" \
230 ".section .fixup,\"ax\"\n" \
232 " xor"itype" %"rtype"1,%"rtype"1\n" \
235 ".section __ex_table,\"a\"\n" \
239 : "=r"(err), ltype (x) \
240 : "m"(__m(addr)), "i"(-EFAULT), "0"(err))
243 * The "xxx_ret" versions return constant specified in third argument, if
244 * something bad happens. These macros can be optimized for the
245 * case of just returning from the function xxx_ret is used.
248 #define put_user_ret(x,ptr,ret) ({ if (put_user(x,ptr)) return ret; })
250 #define get_user_ret(x,ptr,ret) ({ if (get_user(x,ptr)) return ret; })
252 #define __put_user_ret(x,ptr,ret) ({ if (__put_user(x,ptr)) return ret; })
254 #define __get_user_ret(x,ptr,ret) ({ if (__get_user(x,ptr)) return ret; })
258 * Copy To/From Userspace
261 /* Generic arbitrary sized copy. */
262 #define __copy_user(to,from,size) \
263 __asm__ __volatile__( \
268 ".section .fixup,\"ax\"\n" \
269 "3: lea 0(%1,%0,4),%0\n" \
272 ".section __ex_table,\"a\"\n" \
278 : "r"(size & 3), "0"(size / 4), "D"(to), "S"(from) \
279 : "di", "si", "memory")
281 /* We let the __ versions of copy_from/to_user inline, because they're often
282 * used in fast paths and have only a small space overhead.
284 static inline unsigned long
285 __generic_copy_from_user_nocheck(void *to
, const void *from
, unsigned long n
)
288 __copy_user(to
,from
,n
);
292 static inline unsigned long
293 __generic_copy_to_user_nocheck(void *to
, const void *from
, unsigned long n
)
296 __copy_user(to
,from
,n
);
301 /* Optimize just a little bit when we know the size of the move. */
302 #define __constant_copy_user(to, from, size) \
304 switch (size & 3) { \
306 __asm__ __volatile__( \
309 ".section .fixup,\"ax\"\n" \
313 ".section __ex_table,\"a\"\n" \
318 : "S"(from), "D"(to), "0"(size/4) \
319 : "di", "si", "memory"); \
322 __asm__ __volatile__( \
326 ".section .fixup,\"ax\"\n" \
331 ".section __ex_table,\"a\"\n" \
337 : "S"(from), "D"(to), "0"(size/4) \
338 : "di", "si", "memory"); \
341 __asm__ __volatile__( \
345 ".section .fixup,\"ax\"\n" \
350 ".section __ex_table,\"a\"\n" \
356 : "S"(from), "D"(to), "0"(size/4) \
357 : "di", "si", "memory"); \
360 __asm__ __volatile__( \
365 ".section .fixup,\"ax\"\n" \
371 ".section __ex_table,\"a\"\n" \
378 : "S"(from), "D"(to), "0"(size/4) \
379 : "di", "si", "memory"); \
384 unsigned long __generic_copy_to_user(void *, const void *, unsigned long);
385 unsigned long __generic_copy_from_user(void *, const void *, unsigned long);
387 static inline unsigned long
388 __constant_copy_to_user(void *to
, const void *from
, unsigned long n
)
391 if (access_ok(VERIFY_WRITE
, to
, n
))
392 __constant_copy_user(to
,from
,n
);
396 static inline unsigned long
397 __constant_copy_from_user(void *to
, const void *from
, unsigned long n
)
400 if (access_ok(VERIFY_READ
, from
, n
))
401 __constant_copy_user(to
,from
,n
);
405 static inline unsigned long
406 __constant_copy_to_user_nocheck(void *to
, const void *from
, unsigned long n
)
409 __constant_copy_user(to
,from
,n
);
413 static inline unsigned long
414 __constant_copy_from_user_nocheck(void *to
, const void *from
, unsigned long n
)
417 __constant_copy_user(to
,from
,n
);
421 #define copy_to_user(to,from,n) \
422 (__builtin_constant_p(n) ? \
423 __constant_copy_to_user((to),(from),(n)) : \
424 __generic_copy_to_user((to),(from),(n)))
426 #define copy_from_user(to,from,n) \
427 (__builtin_constant_p(n) ? \
428 __constant_copy_from_user((to),(from),(n)) : \
429 __generic_copy_from_user((to),(from),(n)))
431 #define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; })
433 #define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; })
435 #define __copy_to_user(to,from,n) \
436 (__builtin_constant_p(n) ? \
437 __constant_copy_to_user_nocheck((to),(from),(n)) : \
438 __generic_copy_to_user_nocheck((to),(from),(n)))
440 #define __copy_from_user(to,from,n) \
441 (__builtin_constant_p(n) ? \
442 __constant_copy_from_user_nocheck((to),(from),(n)) : \
443 __generic_copy_from_user_nocheck((to),(from),(n)))
445 long strncpy_from_user(char *dst
, const char *src
, long count
);
446 long __strncpy_from_user(char *dst
, const char *src
, long count
);
447 long strlen_user(const char *str
);
448 unsigned long clear_user(void *mem
, unsigned long len
);
449 unsigned long __clear_user(void *mem
, unsigned long len
);
451 #endif /* __i386_UACCESS_H */