Import 2.1.116pre2
[davej-history.git] / include / asm-i386 / uaccess.h
blobae8bcbcf275c41fe2d553bfaff6fa97b4fbcc643
1 #ifndef __i386_UACCESS_H
2 #define __i386_UACCESS_H
4 /*
5 * User space memory access functions
6 */
7 #include <linux/sched.h>
8 #include <asm/page.h>
10 #ifdef __SMP__
11 extern void __check_locks(unsigned int);
12 #else
13 #define __check_locks(x) do { } while (0)
14 #endif
16 #define VERIFY_READ 0
17 #define VERIFY_WRITE 1
20 * The fs value determines whether argument validity checking should be
21 * performed or not. If get_fs() == USER_DS, checking is performed, with
22 * get_fs() == KERNEL_DS, checking is bypassed.
24 * For historical reasons, these macros are grossly misnamed.
27 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
30 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
31 #define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
33 #define get_ds() (KERNEL_DS)
34 #define get_fs() (current->addr_limit)
35 #define set_fs(x) (current->addr_limit = (x))
37 #define segment_eq(a,b) ((a).seg == (b).seg)
39 extern int __verify_write(const void *, unsigned long);
41 #define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
44 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
46 #define __range_ok(addr,size) ({ \
47 unsigned long flag,sum; \
48 asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
49 :"=&r" (flag), "=r" (sum) \
50 :"1" (addr),"g" (size),"g" (current->addr_limit.seg)); \
51 flag; })
53 #if CPU > 386
55 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
57 #else
59 #define access_ok(type,addr,size) ( (__range_ok(addr,size) == 0) && \
60 ((type) == VERIFY_READ || boot_cpu_data.wp_works_ok || \
61 segment_eq(get_fs(),KERNEL_DS) || \
62 __verify_write((void *)(addr),(size))))
64 #endif /* CPU */
66 extern inline int verify_area(int type, const void * addr, unsigned long size)
68 return access_ok(type,addr,size) ? 0 : -EFAULT;
73 * The exception table consists of pairs of addresses: the first is the
74 * address of an instruction that is allowed to fault, and the second is
75 * the address at which the program should continue. No registers are
76 * modified, so it is entirely up to the continuation code to figure out
77 * what to do.
79 * All the routines below use bits of fixup code that are out of line
80 * with the main instruction path. This means when everything is well,
81 * we don't even have to jump over them. Further, they do not intrude
82 * on our cache or tlb entries.
85 struct exception_table_entry
87 unsigned long insn, fixup;
90 /* Returns 0 if exception not found and fixup otherwise. */
91 extern unsigned long search_exception_table(unsigned long);
95 * These are the main single-value transfer routines. They automatically
96 * use the right size if we just have the right pointer type.
98 * This gets kind of ugly. We want to return _two_ values in "get_user()"
99 * and yet we don't want to do any pointers, because that is too much
100 * of a performance impact. Thus we have a few rather ugly macros here,
101 * and hide all the uglyness from the user.
103 * The "__xxx" versions of the user access functions are versions that
104 * do not verify the address space, that must have been done previously
105 * with a separate "access_ok()" call (this is used when we do multiple
106 * accesses to the same area of user memory).
109 extern void __get_user_1(void);
110 extern void __get_user_2(void);
111 extern void __get_user_4(void);
113 #define __get_user_x(size,ret,x,ptr) \
114 __asm__ __volatile__("call __get_user_" #size \
115 :"=a" (ret),"=d" (x) \
116 :"0" (ptr))
118 /* Careful: we have to cast the result to the type of the pointer for sign reasons */
119 #define get_user(x,ptr) \
120 ({ int __ret_gu,__val_gu; \
121 __check_locks(1); \
122 switch(sizeof (*(ptr))) { \
123 case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
124 case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
125 case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
126 default: __get_user_x(X,__ret_gu,__val_gu,ptr); break; \
128 (x) = (__typeof__(*(ptr)))__val_gu; \
129 __ret_gu; \
132 extern void __put_user_1(void);
133 extern void __put_user_2(void);
134 extern void __put_user_4(void);
136 extern void __put_user_bad(void);
138 #define __put_user_x(size,ret,x,ptr) \
139 __asm__ __volatile__("call __put_user_" #size \
140 :"=a" (ret) \
141 :"0" (ptr),"d" (x) \
142 :"cx")
144 #define put_user(x,ptr) \
145 ({ int __ret_pu; \
146 __check_locks(1); \
147 switch(sizeof (*(ptr))) { \
148 case 1: __put_user_x(1,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \
149 case 2: __put_user_x(2,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \
150 case 4: __put_user_x(4,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \
151 default: __put_user_x(X,__ret_pu,x,ptr); break; \
153 __ret_pu; \
156 #define __get_user(x,ptr) \
157 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
158 #define __put_user(x,ptr) \
159 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
161 #define __put_user_nocheck(x,ptr,size) \
162 ({ \
163 long __pu_err; \
164 __check_locks(1); \
165 __put_user_size((x),(ptr),(size),__pu_err); \
166 __pu_err; \
169 #define __put_user_size(x,ptr,size,retval) \
170 do { \
171 retval = 0; \
172 switch (size) { \
173 case 1: __put_user_asm(x,ptr,retval,"b","b","iq"); break; \
174 case 2: __put_user_asm(x,ptr,retval,"w","w","ir"); break; \
175 case 4: __put_user_asm(x,ptr,retval,"l","","ir"); break; \
176 default: __put_user_bad(); \
178 } while (0)
180 struct __large_struct { unsigned long buf[100]; };
181 #define __m(x) (*(struct __large_struct *)(x))
184 * Tell gcc we read from memory instead of writing: this is because
185 * we do not write to any memory gcc knows about, so there are no
186 * aliasing issues.
188 #define __put_user_asm(x, addr, err, itype, rtype, ltype) \
189 __asm__ __volatile__( \
190 "1: mov"itype" %"rtype"1,%2\n" \
191 "2:\n" \
192 ".section .fixup,\"ax\"\n" \
193 "3: movl %3,%0\n" \
194 " jmp 2b\n" \
195 ".previous\n" \
196 ".section __ex_table,\"a\"\n" \
197 " .align 4\n" \
198 " .long 1b,3b\n" \
199 ".previous" \
200 : "=r"(err) \
201 : ltype (x), "m"(__m(addr)), "i"(-EFAULT), "0"(err))
204 #define __get_user_nocheck(x,ptr,size) \
205 ({ \
206 long __gu_err, __gu_val; \
207 __check_locks(1); \
208 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
209 (x) = (__typeof__(*(ptr)))__gu_val; \
210 __gu_err; \
213 extern long __get_user_bad(void);
215 #define __get_user_size(x,ptr,size,retval) \
216 do { \
217 retval = 0; \
218 switch (size) { \
219 case 1: __get_user_asm(x,ptr,retval,"b","b","=q"); break; \
220 case 2: __get_user_asm(x,ptr,retval,"w","w","=r"); break; \
221 case 4: __get_user_asm(x,ptr,retval,"l","","=r"); break; \
222 default: (x) = __get_user_bad(); \
224 } while (0)
226 #define __get_user_asm(x, addr, err, itype, rtype, ltype) \
227 __asm__ __volatile__( \
228 "1: mov"itype" %2,%"rtype"1\n" \
229 "2:\n" \
230 ".section .fixup,\"ax\"\n" \
231 "3: movl %3,%0\n" \
232 " xor"itype" %"rtype"1,%"rtype"1\n" \
233 " jmp 2b\n" \
234 ".previous\n" \
235 ".section __ex_table,\"a\"\n" \
236 " .align 4\n" \
237 " .long 1b,3b\n" \
238 ".previous" \
239 : "=r"(err), ltype (x) \
240 : "m"(__m(addr)), "i"(-EFAULT), "0"(err))
243 * The "xxx_ret" versions return constant specified in third argument, if
244 * something bad happens. These macros can be optimized for the
245 * case of just returning from the function xxx_ret is used.
248 #define put_user_ret(x,ptr,ret) ({ if (put_user(x,ptr)) return ret; })
250 #define get_user_ret(x,ptr,ret) ({ if (get_user(x,ptr)) return ret; })
252 #define __put_user_ret(x,ptr,ret) ({ if (__put_user(x,ptr)) return ret; })
254 #define __get_user_ret(x,ptr,ret) ({ if (__get_user(x,ptr)) return ret; })
258 * Copy To/From Userspace
261 /* Generic arbitrary sized copy. */
262 #define __copy_user(to,from,size) \
263 __asm__ __volatile__( \
264 "0: rep; movsl\n" \
265 " movl %1,%0\n" \
266 "1: rep; movsb\n" \
267 "2:\n" \
268 ".section .fixup,\"ax\"\n" \
269 "3: lea 0(%1,%0,4),%0\n" \
270 " jmp 2b\n" \
271 ".previous\n" \
272 ".section __ex_table,\"a\"\n" \
273 " .align 4\n" \
274 " .long 0b,3b\n" \
275 " .long 1b,2b\n" \
276 ".previous" \
277 : "=&c"(size) \
278 : "r"(size & 3), "0"(size / 4), "D"(to), "S"(from) \
279 : "di", "si", "memory")
281 /* We let the __ versions of copy_from/to_user inline, because they're often
282 * used in fast paths and have only a small space overhead.
284 static inline unsigned long
285 __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
287 __check_locks(1);
288 __copy_user(to,from,n);
289 return n;
292 static inline unsigned long
293 __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
295 __check_locks(1);
296 __copy_user(to,from,n);
297 return n;
301 /* Optimize just a little bit when we know the size of the move. */
302 #define __constant_copy_user(to, from, size) \
303 do { \
304 switch (size & 3) { \
305 default: \
306 __asm__ __volatile__( \
307 "0: rep; movsl\n" \
308 "1:\n" \
309 ".section .fixup,\"ax\"\n" \
310 "2: shl $2,%0\n" \
311 " jmp 1b\n" \
312 ".previous\n" \
313 ".section __ex_table,\"a\"\n" \
314 " .align 4\n" \
315 " .long 0b,2b\n" \
316 ".previous" \
317 : "=c"(size) \
318 : "S"(from), "D"(to), "0"(size/4) \
319 : "di", "si", "memory"); \
320 break; \
321 case 1: \
322 __asm__ __volatile__( \
323 "0: rep; movsl\n" \
324 "1: movsb\n" \
325 "2:\n" \
326 ".section .fixup,\"ax\"\n" \
327 "3: shl $2,%0\n" \
328 "4: incl %0\n" \
329 " jmp 2b\n" \
330 ".previous\n" \
331 ".section __ex_table,\"a\"\n" \
332 " .align 4\n" \
333 " .long 0b,3b\n" \
334 " .long 1b,4b\n" \
335 ".previous" \
336 : "=c"(size) \
337 : "S"(from), "D"(to), "0"(size/4) \
338 : "di", "si", "memory"); \
339 break; \
340 case 2: \
341 __asm__ __volatile__( \
342 "0: rep; movsl\n" \
343 "1: movsw\n" \
344 "2:\n" \
345 ".section .fixup,\"ax\"\n" \
346 "3: shl $2,%0\n" \
347 "4: addl $2,%0\n" \
348 " jmp 2b\n" \
349 ".previous\n" \
350 ".section __ex_table,\"a\"\n" \
351 " .align 4\n" \
352 " .long 0b,3b\n" \
353 " .long 1b,4b\n" \
354 ".previous" \
355 : "=c"(size) \
356 : "S"(from), "D"(to), "0"(size/4) \
357 : "di", "si", "memory"); \
358 break; \
359 case 3: \
360 __asm__ __volatile__( \
361 "0: rep; movsl\n" \
362 "1: movsw\n" \
363 "2: movsb\n" \
364 "3:\n" \
365 ".section .fixup,\"ax\"\n" \
366 "4: shl $2,%0\n" \
367 "5: addl $2,%0\n" \
368 "6: incl %0\n" \
369 " jmp 3b\n" \
370 ".previous\n" \
371 ".section __ex_table,\"a\"\n" \
372 " .align 4\n" \
373 " .long 0b,4b\n" \
374 " .long 1b,5b\n" \
375 " .long 2b,6b\n" \
376 ".previous" \
377 : "=c"(size) \
378 : "S"(from), "D"(to), "0"(size/4) \
379 : "di", "si", "memory"); \
380 break; \
382 } while (0)
384 unsigned long __generic_copy_to_user(void *, const void *, unsigned long);
385 unsigned long __generic_copy_from_user(void *, const void *, unsigned long);
387 static inline unsigned long
388 __constant_copy_to_user(void *to, const void *from, unsigned long n)
390 __check_locks(1);
391 if (access_ok(VERIFY_WRITE, to, n))
392 __constant_copy_user(to,from,n);
393 return n;
396 static inline unsigned long
397 __constant_copy_from_user(void *to, const void *from, unsigned long n)
399 __check_locks(1);
400 if (access_ok(VERIFY_READ, from, n))
401 __constant_copy_user(to,from,n);
402 return n;
405 static inline unsigned long
406 __constant_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
408 __check_locks(1);
409 __constant_copy_user(to,from,n);
410 return n;
413 static inline unsigned long
414 __constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
416 __check_locks(1);
417 __constant_copy_user(to,from,n);
418 return n;
421 #define copy_to_user(to,from,n) \
422 (__builtin_constant_p(n) ? \
423 __constant_copy_to_user((to),(from),(n)) : \
424 __generic_copy_to_user((to),(from),(n)))
426 #define copy_from_user(to,from,n) \
427 (__builtin_constant_p(n) ? \
428 __constant_copy_from_user((to),(from),(n)) : \
429 __generic_copy_from_user((to),(from),(n)))
431 #define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; })
433 #define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; })
435 #define __copy_to_user(to,from,n) \
436 (__builtin_constant_p(n) ? \
437 __constant_copy_to_user_nocheck((to),(from),(n)) : \
438 __generic_copy_to_user_nocheck((to),(from),(n)))
440 #define __copy_from_user(to,from,n) \
441 (__builtin_constant_p(n) ? \
442 __constant_copy_from_user_nocheck((to),(from),(n)) : \
443 __generic_copy_from_user_nocheck((to),(from),(n)))
445 long strncpy_from_user(char *dst, const char *src, long count);
446 long __strncpy_from_user(char *dst, const char *src, long count);
447 long strlen_user(const char *str);
448 unsigned long clear_user(void *mem, unsigned long len);
449 unsigned long __clear_user(void *mem, unsigned long len);
451 #endif /* __i386_UACCESS_H */