- Linus: drop support for old-style Makefiles entirely. Big.
[davej-history.git] / include / asm-i386 / uaccess.h
blob906845849359401bb676f1409145211deddc8d9f
1 #ifndef __i386_UACCESS_H
2 #define __i386_UACCESS_H
4 /*
5 * User space memory access functions
6 */
7 #include <linux/config.h>
8 #include <linux/sched.h>
9 #include <asm/page.h>
11 #define VERIFY_READ 0
12 #define VERIFY_WRITE 1
15 * The fs value determines whether argument validity checking should be
16 * performed or not. If get_fs() == USER_DS, checking is performed, with
17 * get_fs() == KERNEL_DS, checking is bypassed.
19 * For historical reasons, these macros are grossly misnamed.
22 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
25 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
26 #define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
28 #define get_ds() (KERNEL_DS)
29 #define get_fs() (current->addr_limit)
30 #define set_fs(x) (current->addr_limit = (x))
32 #define segment_eq(a,b) ((a).seg == (b).seg)
34 extern int __verify_write(const void *, unsigned long);
36 #define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
39 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
41 #define __range_ok(addr,size) ({ \
42 unsigned long flag,sum; \
43 asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
44 :"=&r" (flag), "=r" (sum) \
45 :"1" (addr),"g" ((int)(size)),"g" (current->addr_limit.seg)); \
46 flag; })
48 #ifdef CONFIG_X86_WP_WORKS_OK
50 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
52 #else
54 #define access_ok(type,addr,size) ( (__range_ok(addr,size) == 0) && \
55 ((type) == VERIFY_READ || boot_cpu_data.wp_works_ok || \
56 segment_eq(get_fs(),KERNEL_DS) || \
57 __verify_write((void *)(addr),(size))))
59 #endif
61 extern inline int verify_area(int type, const void * addr, unsigned long size)
63 return access_ok(type,addr,size) ? 0 : -EFAULT;
68 * The exception table consists of pairs of addresses: the first is the
69 * address of an instruction that is allowed to fault, and the second is
70 * the address at which the program should continue. No registers are
71 * modified, so it is entirely up to the continuation code to figure out
72 * what to do.
74 * All the routines below use bits of fixup code that are out of line
75 * with the main instruction path. This means when everything is well,
76 * we don't even have to jump over them. Further, they do not intrude
77 * on our cache or tlb entries.
80 struct exception_table_entry
82 unsigned long insn, fixup;
85 /* Returns 0 if exception not found and fixup otherwise. */
86 extern unsigned long search_exception_table(unsigned long);
90 * These are the main single-value transfer routines. They automatically
91 * use the right size if we just have the right pointer type.
93 * This gets kind of ugly. We want to return _two_ values in "get_user()"
94 * and yet we don't want to do any pointers, because that is too much
95 * of a performance impact. Thus we have a few rather ugly macros here,
96 * and hide all the uglyness from the user.
98 * The "__xxx" versions of the user access functions are versions that
99 * do not verify the address space, that must have been done previously
100 * with a separate "access_ok()" call (this is used when we do multiple
101 * accesses to the same area of user memory).
104 extern void __get_user_1(void);
105 extern void __get_user_2(void);
106 extern void __get_user_4(void);
108 #define __get_user_x(size,ret,x,ptr) \
109 __asm__ __volatile__("call __get_user_" #size \
110 :"=a" (ret),"=d" (x) \
111 :"0" (ptr))
113 /* Careful: we have to cast the result to the type of the pointer for sign reasons */
114 #define get_user(x,ptr) \
115 ({ int __ret_gu,__val_gu; \
116 switch(sizeof (*(ptr))) { \
117 case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
118 case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
119 case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
120 default: __get_user_x(X,__ret_gu,__val_gu,ptr); break; \
122 (x) = (__typeof__(*(ptr)))__val_gu; \
123 __ret_gu; \
126 extern void __put_user_1(void);
127 extern void __put_user_2(void);
128 extern void __put_user_4(void);
130 extern void __put_user_bad(void);
132 #define __put_user_x(size,ret,x,ptr) \
133 __asm__ __volatile__("call __put_user_" #size \
134 :"=a" (ret) \
135 :"0" (ptr),"d" (x) \
136 :"cx")
138 #define put_user(x,ptr) \
139 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
141 #define __get_user(x,ptr) \
142 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
143 #define __put_user(x,ptr) \
144 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
146 #define __put_user_nocheck(x,ptr,size) \
147 ({ \
148 long __pu_err; \
149 __put_user_size((x),(ptr),(size),__pu_err); \
150 __pu_err; \
154 #define __put_user_check(x,ptr,size) \
155 ({ \
156 long __pu_err = -EFAULT; \
157 __typeof__(*(ptr)) *__pu_addr = (ptr); \
158 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
159 __put_user_size((x),__pu_addr,(size),__pu_err); \
160 __pu_err; \
163 #define __put_user_size(x,ptr,size,retval) \
164 do { \
165 retval = 0; \
166 switch (size) { \
167 case 1: __put_user_asm(x,ptr,retval,"b","b","iq"); break; \
168 case 2: __put_user_asm(x,ptr,retval,"w","w","ir"); break; \
169 case 4: __put_user_asm(x,ptr,retval,"l","","ir"); break; \
170 default: __put_user_bad(); \
172 } while (0)
174 struct __large_struct { unsigned long buf[100]; };
175 #define __m(x) (*(struct __large_struct *)(x))
178 * Tell gcc we read from memory instead of writing: this is because
179 * we do not write to any memory gcc knows about, so there are no
180 * aliasing issues.
182 #define __put_user_asm(x, addr, err, itype, rtype, ltype) \
183 __asm__ __volatile__( \
184 "1: mov"itype" %"rtype"1,%2\n" \
185 "2:\n" \
186 ".section .fixup,\"ax\"\n" \
187 "3: movl %3,%0\n" \
188 " jmp 2b\n" \
189 ".previous\n" \
190 ".section __ex_table,\"a\"\n" \
191 " .align 4\n" \
192 " .long 1b,3b\n" \
193 ".previous" \
194 : "=r"(err) \
195 : ltype (x), "m"(__m(addr)), "i"(-EFAULT), "0"(err))
198 #define __get_user_nocheck(x,ptr,size) \
199 ({ \
200 long __gu_err, __gu_val; \
201 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
202 (x) = (__typeof__(*(ptr)))__gu_val; \
203 __gu_err; \
206 extern long __get_user_bad(void);
208 #define __get_user_size(x,ptr,size,retval) \
209 do { \
210 retval = 0; \
211 switch (size) { \
212 case 1: __get_user_asm(x,ptr,retval,"b","b","=q"); break; \
213 case 2: __get_user_asm(x,ptr,retval,"w","w","=r"); break; \
214 case 4: __get_user_asm(x,ptr,retval,"l","","=r"); break; \
215 default: (x) = __get_user_bad(); \
217 } while (0)
219 #define __get_user_asm(x, addr, err, itype, rtype, ltype) \
220 __asm__ __volatile__( \
221 "1: mov"itype" %2,%"rtype"1\n" \
222 "2:\n" \
223 ".section .fixup,\"ax\"\n" \
224 "3: movl %3,%0\n" \
225 " xor"itype" %"rtype"1,%"rtype"1\n" \
226 " jmp 2b\n" \
227 ".previous\n" \
228 ".section __ex_table,\"a\"\n" \
229 " .align 4\n" \
230 " .long 1b,3b\n" \
231 ".previous" \
232 : "=r"(err), ltype (x) \
233 : "m"(__m(addr)), "i"(-EFAULT), "0"(err))
237 * Copy To/From Userspace
240 /* Generic arbitrary sized copy. */
241 #define __copy_user(to,from,size) \
242 do { \
243 int __d0, __d1; \
244 __asm__ __volatile__( \
245 "0: rep; movsl\n" \
246 " movl %3,%0\n" \
247 "1: rep; movsb\n" \
248 "2:\n" \
249 ".section .fixup,\"ax\"\n" \
250 "3: lea 0(%3,%0,4),%0\n" \
251 " jmp 2b\n" \
252 ".previous\n" \
253 ".section __ex_table,\"a\"\n" \
254 " .align 4\n" \
255 " .long 0b,3b\n" \
256 " .long 1b,2b\n" \
257 ".previous" \
258 : "=&c"(size), "=&D" (__d0), "=&S" (__d1) \
259 : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \
260 : "memory"); \
261 } while (0)
263 #define __copy_user_zeroing(to,from,size) \
264 do { \
265 int __d0, __d1; \
266 __asm__ __volatile__( \
267 "0: rep; movsl\n" \
268 " movl %3,%0\n" \
269 "1: rep; movsb\n" \
270 "2:\n" \
271 ".section .fixup,\"ax\"\n" \
272 "3: lea 0(%3,%0,4),%0\n" \
273 "4: pushl %0\n" \
274 " pushl %%eax\n" \
275 " xorl %%eax,%%eax\n" \
276 " rep; stosb\n" \
277 " popl %%eax\n" \
278 " popl %0\n" \
279 " jmp 2b\n" \
280 ".previous\n" \
281 ".section __ex_table,\"a\"\n" \
282 " .align 4\n" \
283 " .long 0b,3b\n" \
284 " .long 1b,4b\n" \
285 ".previous" \
286 : "=&c"(size), "=&D" (__d0), "=&S" (__d1) \
287 : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \
288 : "memory"); \
289 } while (0)
291 /* We let the __ versions of copy_from/to_user inline, because they're often
292 * used in fast paths and have only a small space overhead.
294 static inline unsigned long
295 __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
297 __copy_user_zeroing(to,from,n);
298 return n;
301 static inline unsigned long
302 __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
304 __copy_user(to,from,n);
305 return n;
309 /* Optimize just a little bit when we know the size of the move. */
310 #define __constant_copy_user(to, from, size) \
311 do { \
312 int __d0, __d1; \
313 switch (size & 3) { \
314 default: \
315 __asm__ __volatile__( \
316 "0: rep; movsl\n" \
317 "1:\n" \
318 ".section .fixup,\"ax\"\n" \
319 "2: shl $2,%0\n" \
320 " jmp 1b\n" \
321 ".previous\n" \
322 ".section __ex_table,\"a\"\n" \
323 " .align 4\n" \
324 " .long 0b,2b\n" \
325 ".previous" \
326 : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
327 : "1"(from), "2"(to), "0"(size/4) \
328 : "memory"); \
329 break; \
330 case 1: \
331 __asm__ __volatile__( \
332 "0: rep; movsl\n" \
333 "1: movsb\n" \
334 "2:\n" \
335 ".section .fixup,\"ax\"\n" \
336 "3: shl $2,%0\n" \
337 "4: incl %0\n" \
338 " jmp 2b\n" \
339 ".previous\n" \
340 ".section __ex_table,\"a\"\n" \
341 " .align 4\n" \
342 " .long 0b,3b\n" \
343 " .long 1b,4b\n" \
344 ".previous" \
345 : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
346 : "1"(from), "2"(to), "0"(size/4) \
347 : "memory"); \
348 break; \
349 case 2: \
350 __asm__ __volatile__( \
351 "0: rep; movsl\n" \
352 "1: movsw\n" \
353 "2:\n" \
354 ".section .fixup,\"ax\"\n" \
355 "3: shl $2,%0\n" \
356 "4: addl $2,%0\n" \
357 " jmp 2b\n" \
358 ".previous\n" \
359 ".section __ex_table,\"a\"\n" \
360 " .align 4\n" \
361 " .long 0b,3b\n" \
362 " .long 1b,4b\n" \
363 ".previous" \
364 : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
365 : "1"(from), "2"(to), "0"(size/4) \
366 : "memory"); \
367 break; \
368 case 3: \
369 __asm__ __volatile__( \
370 "0: rep; movsl\n" \
371 "1: movsw\n" \
372 "2: movsb\n" \
373 "3:\n" \
374 ".section .fixup,\"ax\"\n" \
375 "4: shl $2,%0\n" \
376 "5: addl $2,%0\n" \
377 "6: incl %0\n" \
378 " jmp 3b\n" \
379 ".previous\n" \
380 ".section __ex_table,\"a\"\n" \
381 " .align 4\n" \
382 " .long 0b,4b\n" \
383 " .long 1b,5b\n" \
384 " .long 2b,6b\n" \
385 ".previous" \
386 : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
387 : "1"(from), "2"(to), "0"(size/4) \
388 : "memory"); \
389 break; \
391 } while (0)
393 /* Optimize just a little bit when we know the size of the move. */
394 #define __constant_copy_user_zeroing(to, from, size) \
395 do { \
396 int __d0, __d1; \
397 switch (size & 3) { \
398 default: \
399 __asm__ __volatile__( \
400 "0: rep; movsl\n" \
401 "1:\n" \
402 ".section .fixup,\"ax\"\n" \
403 "2: pushl %0\n" \
404 " pushl %%eax\n" \
405 " xorl %%eax,%%eax\n" \
406 " rep; stosl\n" \
407 " popl %%eax\n" \
408 " popl %0\n" \
409 " shl $2,%0\n" \
410 " jmp 1b\n" \
411 ".previous\n" \
412 ".section __ex_table,\"a\"\n" \
413 " .align 4\n" \
414 " .long 0b,2b\n" \
415 ".previous" \
416 : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
417 : "1"(from), "2"(to), "0"(size/4) \
418 : "memory"); \
419 break; \
420 case 1: \
421 __asm__ __volatile__( \
422 "0: rep; movsl\n" \
423 "1: movsb\n" \
424 "2:\n" \
425 ".section .fixup,\"ax\"\n" \
426 "3: pushl %0\n" \
427 " pushl %%eax\n" \
428 " xorl %%eax,%%eax\n" \
429 " rep; stosl\n" \
430 " stosb\n" \
431 " popl %%eax\n" \
432 " popl %0\n" \
433 " shl $2,%0\n" \
434 " incl %0\n" \
435 " jmp 2b\n" \
436 "4: pushl %%eax\n" \
437 " xorl %%eax,%%eax\n" \
438 " stosb\n" \
439 " popl %%eax\n" \
440 " incl %0\n" \
441 " jmp 2b\n" \
442 ".previous\n" \
443 ".section __ex_table,\"a\"\n" \
444 " .align 4\n" \
445 " .long 0b,3b\n" \
446 " .long 1b,4b\n" \
447 ".previous" \
448 : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
449 : "1"(from), "2"(to), "0"(size/4) \
450 : "memory"); \
451 break; \
452 case 2: \
453 __asm__ __volatile__( \
454 "0: rep; movsl\n" \
455 "1: movsw\n" \
456 "2:\n" \
457 ".section .fixup,\"ax\"\n" \
458 "3: pushl %0\n" \
459 " pushl %%eax\n" \
460 " xorl %%eax,%%eax\n" \
461 " rep; stosl\n" \
462 " stosw\n" \
463 " popl %%eax\n" \
464 " popl %0\n" \
465 " shl $2,%0\n" \
466 " addl $2,%0\n" \
467 " jmp 2b\n" \
468 "4: pushl %%eax\n" \
469 " xorl %%eax,%%eax\n" \
470 " stosw\n" \
471 " popl %%eax\n" \
472 " addl $2,%0\n" \
473 " jmp 2b\n" \
474 ".previous\n" \
475 ".section __ex_table,\"a\"\n" \
476 " .align 4\n" \
477 " .long 0b,3b\n" \
478 " .long 1b,4b\n" \
479 ".previous" \
480 : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
481 : "1"(from), "2"(to), "0"(size/4) \
482 : "memory"); \
483 break; \
484 case 3: \
485 __asm__ __volatile__( \
486 "0: rep; movsl\n" \
487 "1: movsw\n" \
488 "2: movsb\n" \
489 "3:\n" \
490 ".section .fixup,\"ax\"\n" \
491 "4: pushl %0\n" \
492 " pushl %%eax\n" \
493 " xorl %%eax,%%eax\n" \
494 " rep; stosl\n" \
495 " stosw\n" \
496 " stosb\n" \
497 " popl %%eax\n" \
498 " popl %0\n" \
499 " shl $2,%0\n" \
500 " addl $3,%0\n" \
501 " jmp 2b\n" \
502 "5: pushl %%eax\n" \
503 " xorl %%eax,%%eax\n" \
504 " stosw\n" \
505 " stosb\n" \
506 " popl %%eax\n" \
507 " addl $3,%0\n" \
508 " jmp 2b\n" \
509 "6: pushl %%eax\n" \
510 " xorl %%eax,%%eax\n" \
511 " stosb\n" \
512 " popl %%eax\n" \
513 " incl %0\n" \
514 " jmp 3b\n" \
515 ".previous\n" \
516 ".section __ex_table,\"a\"\n" \
517 " .align 4\n" \
518 " .long 0b,4b\n" \
519 " .long 1b,5b\n" \
520 " .long 2b,6b\n" \
521 ".previous" \
522 : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
523 : "1"(from), "2"(to), "0"(size/4) \
524 : "memory"); \
525 break; \
527 } while (0)
529 unsigned long __generic_copy_to_user(void *, const void *, unsigned long);
530 unsigned long __generic_copy_from_user(void *, const void *, unsigned long);
532 static inline unsigned long
533 __constant_copy_to_user(void *to, const void *from, unsigned long n)
535 if (access_ok(VERIFY_WRITE, to, n))
536 __constant_copy_user(to,from,n);
537 return n;
540 static inline unsigned long
541 __constant_copy_from_user(void *to, const void *from, unsigned long n)
543 if (access_ok(VERIFY_READ, from, n))
544 __constant_copy_user_zeroing(to,from,n);
545 return n;
548 static inline unsigned long
549 __constant_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
551 __constant_copy_user(to,from,n);
552 return n;
555 static inline unsigned long
556 __constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
558 __constant_copy_user_zeroing(to,from,n);
559 return n;
562 #define copy_to_user(to,from,n) \
563 (__builtin_constant_p(n) ? \
564 __constant_copy_to_user((to),(from),(n)) : \
565 __generic_copy_to_user((to),(from),(n)))
567 #define copy_from_user(to,from,n) \
568 (__builtin_constant_p(n) ? \
569 __constant_copy_from_user((to),(from),(n)) : \
570 __generic_copy_from_user((to),(from),(n)))
572 #define __copy_to_user(to,from,n) \
573 (__builtin_constant_p(n) ? \
574 __constant_copy_to_user_nocheck((to),(from),(n)) : \
575 __generic_copy_to_user_nocheck((to),(from),(n)))
577 #define __copy_from_user(to,from,n) \
578 (__builtin_constant_p(n) ? \
579 __constant_copy_from_user_nocheck((to),(from),(n)) : \
580 __generic_copy_from_user_nocheck((to),(from),(n)))
582 long strncpy_from_user(char *dst, const char *src, long count);
583 long __strncpy_from_user(char *dst, const char *src, long count);
584 #define strlen_user(str) strnlen_user(str, ~0UL >> 1)
585 long strnlen_user(const char *str, long n);
586 unsigned long clear_user(void *mem, unsigned long len);
587 unsigned long __clear_user(void *mem, unsigned long len);
589 #endif /* __i386_UACCESS_H */