Import 2.1.81
[davej-history.git] / include / asm-i386 / uaccess.h
blobef08ac5101838d40f3f16ef1962c370c06cba989
1 #ifndef __i386_UACCESS_H
2 #define __i386_UACCESS_H
4 /*
5 * User space memory access functions
6 */
7 #include <linux/sched.h>
9 #define VERIFY_READ 0
10 #define VERIFY_WRITE 1
13 * The fs value determines whether argument validity checking should be
14 * performed or not. If get_fs() == USER_DS, checking is performed, with
15 * get_fs() == KERNEL_DS, checking is bypassed.
17 * For historical reasons, these macros are grossly misnamed.
20 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
23 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
24 #define USER_DS MAKE_MM_SEG(0xC0000000)
26 #define get_ds() (KERNEL_DS)
27 #define get_fs() (current->addr_limit)
28 #define set_fs(x) (current->addr_limit = (x))
30 #define segment_eq(a,b) ((a).seg == (b).seg)
32 extern int __verify_write(const void *, unsigned long);
34 #define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
37 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
39 #define __range_ok(addr,size) ({ \
40 unsigned long flag,sum; \
41 asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
42 :"=&r" (flag), "=r" (sum) \
43 :"1" (addr),"g" (size),"g" (current->addr_limit.seg)); \
44 flag; })
46 #if CPU > 386
48 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
50 #else
52 #define access_ok(type,addr,size) ( (__range_ok(addr,size) == 0) && \
53 ((type) == VERIFY_READ || boot_cpu_data.wp_works_ok || \
54 segment_eq(get_fs(),KERNEL_DS) || \
55 __verify_write((void *)(addr),(size))))
57 #endif /* CPU */
59 extern inline int verify_area(int type, const void * addr, unsigned long size)
61 return access_ok(type,addr,size) ? 0 : -EFAULT;
66 * The exception table consists of pairs of addresses: the first is the
67 * address of an instruction that is allowed to fault, and the second is
68 * the address at which the program should continue. No registers are
69 * modified, so it is entirely up to the continuation code to figure out
70 * what to do.
72 * All the routines below use bits of fixup code that are out of line
73 * with the main instruction path. This means when everything is well,
74 * we don't even have to jump over them. Further, they do not intrude
75 * on our cache or tlb entries.
78 struct exception_table_entry
80 unsigned long insn, fixup;
83 /* Returns 0 if exception not found and fixup otherwise. */
84 extern unsigned long search_exception_table(unsigned long);
88 * These are the main single-value transfer routines. They automatically
89 * use the right size if we just have the right pointer type.
91 * This gets kind of ugly. We want to return _two_ values in "get_user()"
92 * and yet we don't want to do any pointers, because that is too much
93 * of a performance impact. Thus we have a few rather ugly macros here,
94 * and hide all the uglyness from the user.
96 * The "__xxx" versions of the user access functions are versions that
97 * do not verify the address space, that must have been done previously
98 * with a separate "access_ok()" call (this is used when we do multiple
99 * accesses to the same area of user memory).
102 extern void __get_user_1(void);
103 extern void __get_user_2(void);
104 extern void __get_user_4(void);
106 #define __get_user_x(size,ret,x,ptr) \
107 __asm__ __volatile__("call __get_user_" #size \
108 :"=a" (ret),"=d" (x) \
109 :"0" (ptr))
111 /* Careful: we have to cast the result to the type of the pointer for sign reasons */
112 #define get_user(x,ptr) \
113 ({ int __ret_gu,__val_gu; \
114 switch(sizeof (*(ptr))) { \
115 case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
116 case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
117 case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
118 default: __get_user_x(X,__ret_gu,__val_gu,ptr); break; \
120 (x) = (__typeof__(*(ptr)))__val_gu; \
121 __ret_gu; \
124 extern void __put_user_1(void);
125 extern void __put_user_2(void);
126 extern void __put_user_4(void);
128 extern void __put_user_bad(void);
130 #define __put_user_x(size,ret,x,ptr) \
131 __asm__ __volatile__("call __put_user_" #size \
132 :"=a" (ret) \
133 :"0" (ptr),"d" (x) \
134 :"cx")
136 #define put_user(x,ptr) \
137 ({ int __ret_pu; \
138 switch(sizeof (*(ptr))) { \
139 case 1: __put_user_x(1,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \
140 case 2: __put_user_x(2,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \
141 case 4: __put_user_x(4,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \
142 default: __put_user_x(X,__ret_pu,x,ptr); break; \
144 __ret_pu; \
147 #define __get_user(x,ptr) \
148 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
149 #define __put_user(x,ptr) \
150 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
152 #define __put_user_nocheck(x,ptr,size) \
153 ({ \
154 long __pu_err; \
155 __put_user_size((x),(ptr),(size),__pu_err); \
156 __pu_err; \
159 #define __put_user_size(x,ptr,size,retval) \
160 do { \
161 retval = 0; \
162 switch (size) { \
163 case 1: __put_user_asm(x,ptr,retval,"b","b","iq"); break; \
164 case 2: __put_user_asm(x,ptr,retval,"w","w","ir"); break; \
165 case 4: __put_user_asm(x,ptr,retval,"l","","ir"); break; \
166 default: __put_user_bad(); \
168 } while (0)
170 struct __large_struct { unsigned long buf[100]; };
171 #define __m(x) (*(struct __large_struct *)(x))
174 * Tell gcc we read from memory instead of writing: this is because
175 * we do not write to any memory gcc knows about, so there are no
176 * aliasing issues.
178 #define __put_user_asm(x, addr, err, itype, rtype, ltype) \
179 __asm__ __volatile__( \
180 "1: mov"itype" %"rtype"1,%2\n" \
181 "2:\n" \
182 ".section .fixup,\"ax\"\n" \
183 "3: movl %3,%0\n" \
184 " jmp 2b\n" \
185 ".previous\n" \
186 ".section __ex_table,\"a\"\n" \
187 " .align 4\n" \
188 " .long 1b,3b\n" \
189 ".previous" \
190 : "=r"(err) \
191 : ltype (x), "m"(__m(addr)), "i"(-EFAULT), "0"(err))
194 #define __get_user_nocheck(x,ptr,size) \
195 ({ \
196 long __gu_err, __gu_val; \
197 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
198 (x) = (__typeof__(*(ptr)))__gu_val; \
199 __gu_err; \
202 extern long __get_user_bad(void);
204 #define __get_user_size(x,ptr,size,retval) \
205 do { \
206 retval = 0; \
207 switch (size) { \
208 case 1: __get_user_asm(x,ptr,retval,"b","b","=q"); break; \
209 case 2: __get_user_asm(x,ptr,retval,"w","w","=r"); break; \
210 case 4: __get_user_asm(x,ptr,retval,"l","","=r"); break; \
211 default: (x) = __get_user_bad(); \
213 } while (0)
215 #define __get_user_asm(x, addr, err, itype, rtype, ltype) \
216 __asm__ __volatile__( \
217 "1: mov"itype" %2,%"rtype"1\n" \
218 "2:\n" \
219 ".section .fixup,\"ax\"\n" \
220 "3: movl %3,%0\n" \
221 " xor"itype" %"rtype"1,%"rtype"1\n" \
222 " jmp 2b\n" \
223 ".previous\n" \
224 ".section __ex_table,\"a\"\n" \
225 " .align 4\n" \
226 " .long 1b,3b\n" \
227 ".previous" \
228 : "=r"(err), ltype (x) \
229 : "m"(__m(addr)), "i"(-EFAULT), "0"(err))
232 * The "xxx_ret" versions return constant specified in third argument, if
233 * something bad happens. These macros can be optimized for the
234 * case of just returning from the function xxx_ret is used.
237 #define put_user_ret(x,ptr,ret) ({ if (put_user(x,ptr)) return ret; })
239 #define get_user_ret(x,ptr,ret) ({ if (get_user(x,ptr)) return ret; })
241 #define __put_user_ret(x,ptr,ret) ({ if (__put_user(x,ptr)) return ret; })
243 #define __get_user_ret(x,ptr,ret) ({ if (__get_user(x,ptr)) return ret; })
247 * Copy To/From Userspace
250 /* Generic arbitrary sized copy. */
251 #define __copy_user(to,from,size) \
252 __asm__ __volatile__( \
253 "0: rep; movsl\n" \
254 " movl %1,%0\n" \
255 "1: rep; movsb\n" \
256 "2:\n" \
257 ".section .fixup,\"ax\"\n" \
258 "3: lea 0(%1,%0,4),%0\n" \
259 " jmp 2b\n" \
260 ".previous\n" \
261 ".section __ex_table,\"a\"\n" \
262 " .align 4\n" \
263 " .long 0b,3b\n" \
264 " .long 1b,2b\n" \
265 ".previous" \
266 : "=&c"(size) \
267 : "r"(size & 3), "0"(size / 4), "D"(to), "S"(from) \
268 : "di", "si", "memory")
270 /* We let the __ versions of copy_from/to_user inline, because they're often
271 * used in fast paths and have only a small space overhead.
273 static inline unsigned long
274 __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
276 __copy_user(to,from,n);
277 return n;
280 static inline unsigned long
281 __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
283 __copy_user(to,from,n);
284 return n;
288 /* Optimize just a little bit when we know the size of the move. */
289 #define __constant_copy_user(to, from, size) \
290 do { \
291 switch (size & 3) { \
292 default: \
293 __asm__ __volatile__( \
294 "0: rep; movsl\n" \
295 "1:\n" \
296 ".section .fixup,\"ax\"\n" \
297 "2: shl $2,%0\n" \
298 " jmp 1b\n" \
299 ".previous\n" \
300 ".section __ex_table,\"a\"\n" \
301 " .align 4\n" \
302 " .long 0b,2b\n" \
303 ".previous" \
304 : "=c"(size) \
305 : "S"(from), "D"(to), "0"(size/4) \
306 : "di", "si", "memory"); \
307 break; \
308 case 1: \
309 __asm__ __volatile__( \
310 "0: rep; movsl\n" \
311 "1: movsb\n" \
312 "2:\n" \
313 ".section .fixup,\"ax\"\n" \
314 "3: shl $2,%0\n" \
315 "4: incl %0\n" \
316 " jmp 2b\n" \
317 ".previous\n" \
318 ".section __ex_table,\"a\"\n" \
319 " .align 4\n" \
320 " .long 0b,3b\n" \
321 " .long 1b,4b\n" \
322 ".previous" \
323 : "=c"(size) \
324 : "S"(from), "D"(to), "0"(size/4) \
325 : "di", "si", "memory"); \
326 break; \
327 case 2: \
328 __asm__ __volatile__( \
329 "0: rep; movsl\n" \
330 "1: movsw\n" \
331 "2:\n" \
332 ".section .fixup,\"ax\"\n" \
333 "3: shl $2,%0\n" \
334 "4: addl $2,%0\n" \
335 " jmp 2b\n" \
336 ".previous\n" \
337 ".section __ex_table,\"a\"\n" \
338 " .align 4\n" \
339 " .long 0b,3b\n" \
340 " .long 1b,4b\n" \
341 ".previous" \
342 : "=c"(size) \
343 : "S"(from), "D"(to), "0"(size/4) \
344 : "di", "si", "memory"); \
345 break; \
346 case 3: \
347 __asm__ __volatile__( \
348 "0: rep; movsl\n" \
349 "1: movsw\n" \
350 "2: movsb\n" \
351 "3:\n" \
352 ".section .fixup,\"ax\"\n" \
353 "4: shl $2,%0\n" \
354 "5: addl $2,%0\n" \
355 "6: incl %0\n" \
356 " jmp 3b\n" \
357 ".previous\n" \
358 ".section __ex_table,\"a\"\n" \
359 " .align 4\n" \
360 " .long 0b,4b\n" \
361 " .long 1b,5b\n" \
362 " .long 2b,6b\n" \
363 ".previous" \
364 : "=c"(size) \
365 : "S"(from), "D"(to), "0"(size/4) \
366 : "di", "si", "memory"); \
367 break; \
369 } while (0)
371 unsigned long __generic_copy_to_user(void *, const void *, unsigned long);
372 unsigned long __generic_copy_from_user(void *, const void *, unsigned long);
374 static inline unsigned long
375 __constant_copy_to_user(void *to, const void *from, unsigned long n)
377 if (access_ok(VERIFY_WRITE, to, n))
378 __constant_copy_user(to,from,n);
379 return n;
382 static inline unsigned long
383 __constant_copy_from_user(void *to, const void *from, unsigned long n)
385 if (access_ok(VERIFY_READ, from, n))
386 __constant_copy_user(to,from,n);
387 return n;
390 static inline unsigned long
391 __constant_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
393 __constant_copy_user(to,from,n);
394 return n;
397 static inline unsigned long
398 __constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
400 __constant_copy_user(to,from,n);
401 return n;
404 #define copy_to_user(to,from,n) \
405 (__builtin_constant_p(n) ? \
406 __constant_copy_to_user((to),(from),(n)) : \
407 __generic_copy_to_user((to),(from),(n)))
409 #define copy_from_user(to,from,n) \
410 (__builtin_constant_p(n) ? \
411 __constant_copy_from_user((to),(from),(n)) : \
412 __generic_copy_from_user((to),(from),(n)))
414 #define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; })
416 #define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; })
418 #define __copy_to_user(to,from,n) \
419 (__builtin_constant_p(n) ? \
420 __constant_copy_to_user_nocheck((to),(from),(n)) : \
421 __generic_copy_to_user_nocheck((to),(from),(n)))
423 #define __copy_from_user(to,from,n) \
424 (__builtin_constant_p(n) ? \
425 __constant_copy_from_user_nocheck((to),(from),(n)) : \
426 __generic_copy_from_user_nocheck((to),(from),(n)))
428 long strncpy_from_user(char *dst, const char *src, long count);
429 long __strncpy_from_user(char *dst, const char *src, long count);
430 long strlen_user(const char *str);
431 unsigned long clear_user(void *mem, unsigned long len);
432 unsigned long __clear_user(void *mem, unsigned long len);
434 #endif /* __i386_UACCESS_H */