x86: merge common parts of uaccess.
[linux-2.6/mini2440.git] / include / asm-x86 / uaccess_32.h
blob92ad19e709899bcb5ea63d47ddfc8a0093e4551e
1 #ifndef __i386_UACCESS_H
2 #define __i386_UACCESS_H
4 /*
5 * User space memory access functions
6 */
7 #include <linux/errno.h>
8 #include <linux/thread_info.h>
9 #include <linux/prefetch.h>
10 #include <linux/string.h>
11 #include <asm/asm.h>
12 #include <asm/page.h>
15 * movsl can be slow when source and dest are not both 8-byte aligned
17 #ifdef CONFIG_X86_INTEL_USERCOPY
18 extern struct movsl_mask {
19 int mask;
20 } ____cacheline_aligned_in_smp movsl_mask;
21 #endif
23 #define __addr_ok(addr) \
24 ((unsigned long __force)(addr) < \
25 (current_thread_info()->addr_limit.seg))
27 /* Careful: we have to cast the result to the type of the pointer
28 * for sign reasons */
30 /**
31 * get_user: - Get a simple variable from user space.
32 * @x: Variable to store result.
33 * @ptr: Source address, in user space.
35 * Context: User context only. This function may sleep.
37 * This macro copies a single simple variable from user space to kernel
38 * space. It supports simple types like char and int, but not larger
39 * data types like structures or arrays.
41 * @ptr must have pointer-to-simple-variable type, and the result of
42 * dereferencing @ptr must be assignable to @x without a cast.
44 * Returns zero on success, or -EFAULT on error.
45 * On error, the variable @x is set to zero.
47 #define get_user(x, ptr) \
48 ({ \
49 int __ret_gu; \
50 unsigned long __val_gu; \
51 __chk_user_ptr(ptr); \
52 switch (sizeof(*(ptr))) { \
53 case 1: \
54 __get_user_x(1, __ret_gu, __val_gu, ptr); \
55 break; \
56 case 2: \
57 __get_user_x(2, __ret_gu, __val_gu, ptr); \
58 break; \
59 case 4: \
60 __get_user_x(4, __ret_gu, __val_gu, ptr); \
61 break; \
62 default: \
63 __get_user_x(X, __ret_gu, __val_gu, ptr); \
64 break; \
65 } \
66 (x) = (__typeof__(*(ptr)))__val_gu; \
67 __ret_gu; \
70 extern void __put_user_bad(void);
73 * Strange magic calling convention: pointer in %ecx,
74 * value in %eax(:%edx), return value in %eax, no clobbers.
76 extern void __put_user_1(void);
77 extern void __put_user_2(void);
78 extern void __put_user_4(void);
79 extern void __put_user_8(void);
81 #define __put_user_x(size, x, ptr) \
82 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
83 :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
85 #define __put_user_8(x, ptr) \
86 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
87 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
90 /**
91 * put_user: - Write a simple value into user space.
92 * @x: Value to copy to user space.
93 * @ptr: Destination address, in user space.
95 * Context: User context only. This function may sleep.
97 * This macro copies a single simple value from kernel space to user
98 * space. It supports simple types like char and int, but not larger
99 * data types like structures or arrays.
101 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
102 * to the result of dereferencing @ptr.
104 * Returns zero on success, or -EFAULT on error.
106 #ifdef CONFIG_X86_WP_WORKS_OK
108 #define put_user(x, ptr) \
109 ({ \
110 int __ret_pu; \
111 __typeof__(*(ptr)) __pu_val; \
112 __chk_user_ptr(ptr); \
113 __pu_val = x; \
114 switch (sizeof(*(ptr))) { \
115 case 1: \
116 __put_user_x(1, __pu_val, ptr); \
117 break; \
118 case 2: \
119 __put_user_x(2, __pu_val, ptr); \
120 break; \
121 case 4: \
122 __put_user_x(4, __pu_val, ptr); \
123 break; \
124 case 8: \
125 __put_user_8(__pu_val, ptr); \
126 break; \
127 default: \
128 __put_user_x(X, __pu_val, ptr); \
129 break; \
131 __ret_pu; \
134 #else
135 #define put_user(x, ptr) \
136 ({ \
137 int __ret_pu; \
138 __typeof__(*(ptr))__pus_tmp = x; \
139 __ret_pu = 0; \
140 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
141 sizeof(*(ptr))) != 0)) \
142 __ret_pu = -EFAULT; \
143 __ret_pu; \
147 #endif
150 * __get_user: - Get a simple variable from user space, with less checking.
151 * @x: Variable to store result.
152 * @ptr: Source address, in user space.
154 * Context: User context only. This function may sleep.
156 * This macro copies a single simple variable from user space to kernel
157 * space. It supports simple types like char and int, but not larger
158 * data types like structures or arrays.
160 * @ptr must have pointer-to-simple-variable type, and the result of
161 * dereferencing @ptr must be assignable to @x without a cast.
163 * Caller must check the pointer with access_ok() before calling this
164 * function.
166 * Returns zero on success, or -EFAULT on error.
167 * On error, the variable @x is set to zero.
169 #define __get_user(x, ptr) \
170 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
174 * __put_user: - Write a simple value into user space, with less checking.
175 * @x: Value to copy to user space.
176 * @ptr: Destination address, in user space.
178 * Context: User context only. This function may sleep.
180 * This macro copies a single simple value from kernel space to user
181 * space. It supports simple types like char and int, but not larger
182 * data types like structures or arrays.
184 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
185 * to the result of dereferencing @ptr.
187 * Caller must check the pointer with access_ok() before calling this
188 * function.
190 * Returns zero on success, or -EFAULT on error.
192 #define __put_user(x, ptr) \
193 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
195 #define __put_user_nocheck(x, ptr, size) \
196 ({ \
197 long __pu_err; \
198 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
199 __pu_err; \
203 #define __put_user_u64(x, addr, err) \
204 asm volatile("1: movl %%eax,0(%2)\n" \
205 "2: movl %%edx,4(%2)\n" \
206 "3:\n" \
207 ".section .fixup,\"ax\"\n" \
208 "4: movl %3,%0\n" \
209 " jmp 3b\n" \
210 ".previous\n" \
211 _ASM_EXTABLE(1b, 4b) \
212 _ASM_EXTABLE(2b, 4b) \
213 : "=r" (err) \
214 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
216 #ifdef CONFIG_X86_WP_WORKS_OK
218 #define __put_user_size(x, ptr, size, retval, errret) \
219 do { \
220 retval = 0; \
221 __chk_user_ptr(ptr); \
222 switch (size) { \
223 case 1: \
224 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
225 break; \
226 case 2: \
227 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
228 break; \
229 case 4: \
230 __put_user_asm(x, ptr, retval, "l", "", "ir", errret); \
231 break; \
232 case 8: \
233 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
234 break; \
235 default: \
236 __put_user_bad(); \
238 } while (0)
240 #else
242 #define __put_user_size(x, ptr, size, retval, errret) \
243 do { \
244 __typeof__(*(ptr))__pus_tmp = x; \
245 retval = 0; \
247 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
248 retval = errret; \
249 } while (0)
251 #endif
252 struct __large_struct { unsigned long buf[100]; };
253 #define __m(x) (*(struct __large_struct __user *)(x))
256 * Tell gcc we read from memory instead of writing: this is because
257 * we do not write to any memory gcc knows about, so there are no
258 * aliasing issues.
260 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
261 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
262 "2:\n" \
263 ".section .fixup,\"ax\"\n" \
264 "3: movl %3,%0\n" \
265 " jmp 2b\n" \
266 ".previous\n" \
267 _ASM_EXTABLE(1b, 3b) \
268 : "=r"(err) \
269 : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
272 #define __get_user_nocheck(x, ptr, size) \
273 ({ \
274 long __gu_err; \
275 unsigned long __gu_val; \
276 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
277 (x) = (__typeof__(*(ptr)))__gu_val; \
278 __gu_err; \
281 #define __get_user_size(x, ptr, size, retval, errret) \
282 do { \
283 retval = 0; \
284 __chk_user_ptr(ptr); \
285 switch (size) { \
286 case 1: \
287 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
288 break; \
289 case 2: \
290 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
291 break; \
292 case 4: \
293 __get_user_asm(x, ptr, retval, "l", "", "=r", errret); \
294 break; \
295 default: \
296 (x) = __get_user_bad(); \
298 } while (0)
300 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
301 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
302 "2:\n" \
303 ".section .fixup,\"ax\"\n" \
304 "3: movl %3,%0\n" \
305 " xor"itype" %"rtype"1,%"rtype"1\n" \
306 " jmp 2b\n" \
307 ".previous\n" \
308 _ASM_EXTABLE(1b, 3b) \
309 : "=r" (err), ltype (x) \
310 : "m" (__m(addr)), "i" (errret), "0" (err))
313 unsigned long __must_check __copy_to_user_ll
314 (void __user *to, const void *from, unsigned long n);
315 unsigned long __must_check __copy_from_user_ll
316 (void *to, const void __user *from, unsigned long n);
317 unsigned long __must_check __copy_from_user_ll_nozero
318 (void *to, const void __user *from, unsigned long n);
319 unsigned long __must_check __copy_from_user_ll_nocache
320 (void *to, const void __user *from, unsigned long n);
321 unsigned long __must_check __copy_from_user_ll_nocache_nozero
322 (void *to, const void __user *from, unsigned long n);
325 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
326 * @to: Destination address, in user space.
327 * @from: Source address, in kernel space.
328 * @n: Number of bytes to copy.
330 * Context: User context only.
332 * Copy data from kernel space to user space. Caller must check
333 * the specified block with access_ok() before calling this function.
334 * The caller should also make sure he pins the user space address
335 * so that the we don't result in page fault and sleep.
337 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
338 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
339 * If a store crosses a page boundary and gets a fault, the x86 will not write
340 * anything, so this is accurate.
343 static __always_inline unsigned long __must_check
344 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
346 if (__builtin_constant_p(n)) {
347 unsigned long ret;
349 switch (n) {
350 case 1:
351 __put_user_size(*(u8 *)from, (u8 __user *)to,
352 1, ret, 1);
353 return ret;
354 case 2:
355 __put_user_size(*(u16 *)from, (u16 __user *)to,
356 2, ret, 2);
357 return ret;
358 case 4:
359 __put_user_size(*(u32 *)from, (u32 __user *)to,
360 4, ret, 4);
361 return ret;
364 return __copy_to_user_ll(to, from, n);
368 * __copy_to_user: - Copy a block of data into user space, with less checking.
369 * @to: Destination address, in user space.
370 * @from: Source address, in kernel space.
371 * @n: Number of bytes to copy.
373 * Context: User context only. This function may sleep.
375 * Copy data from kernel space to user space. Caller must check
376 * the specified block with access_ok() before calling this function.
378 * Returns number of bytes that could not be copied.
379 * On success, this will be zero.
381 static __always_inline unsigned long __must_check
382 __copy_to_user(void __user *to, const void *from, unsigned long n)
384 might_sleep();
385 return __copy_to_user_inatomic(to, from, n);
388 static __always_inline unsigned long
389 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
391 /* Avoid zeroing the tail if the copy fails..
392 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
393 * but as the zeroing behaviour is only significant when n is not
394 * constant, that shouldn't be a problem.
396 if (__builtin_constant_p(n)) {
397 unsigned long ret;
399 switch (n) {
400 case 1:
401 __get_user_size(*(u8 *)to, from, 1, ret, 1);
402 return ret;
403 case 2:
404 __get_user_size(*(u16 *)to, from, 2, ret, 2);
405 return ret;
406 case 4:
407 __get_user_size(*(u32 *)to, from, 4, ret, 4);
408 return ret;
411 return __copy_from_user_ll_nozero(to, from, n);
415 * __copy_from_user: - Copy a block of data from user space, with less checking.
416 * @to: Destination address, in kernel space.
417 * @from: Source address, in user space.
418 * @n: Number of bytes to copy.
420 * Context: User context only. This function may sleep.
422 * Copy data from user space to kernel space. Caller must check
423 * the specified block with access_ok() before calling this function.
425 * Returns number of bytes that could not be copied.
426 * On success, this will be zero.
428 * If some data could not be copied, this function will pad the copied
429 * data to the requested size using zero bytes.
431 * An alternate version - __copy_from_user_inatomic() - may be called from
432 * atomic context and will fail rather than sleep. In this case the
433 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
434 * for explanation of why this is needed.
436 static __always_inline unsigned long
437 __copy_from_user(void *to, const void __user *from, unsigned long n)
439 might_sleep();
440 if (__builtin_constant_p(n)) {
441 unsigned long ret;
443 switch (n) {
444 case 1:
445 __get_user_size(*(u8 *)to, from, 1, ret, 1);
446 return ret;
447 case 2:
448 __get_user_size(*(u16 *)to, from, 2, ret, 2);
449 return ret;
450 case 4:
451 __get_user_size(*(u32 *)to, from, 4, ret, 4);
452 return ret;
455 return __copy_from_user_ll(to, from, n);
458 #define ARCH_HAS_NOCACHE_UACCESS
460 static __always_inline unsigned long __copy_from_user_nocache(void *to,
461 const void __user *from, unsigned long n)
463 might_sleep();
464 if (__builtin_constant_p(n)) {
465 unsigned long ret;
467 switch (n) {
468 case 1:
469 __get_user_size(*(u8 *)to, from, 1, ret, 1);
470 return ret;
471 case 2:
472 __get_user_size(*(u16 *)to, from, 2, ret, 2);
473 return ret;
474 case 4:
475 __get_user_size(*(u32 *)to, from, 4, ret, 4);
476 return ret;
479 return __copy_from_user_ll_nocache(to, from, n);
482 static __always_inline unsigned long
483 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
484 unsigned long n)
486 return __copy_from_user_ll_nocache_nozero(to, from, n);
489 unsigned long __must_check copy_to_user(void __user *to,
490 const void *from, unsigned long n);
491 unsigned long __must_check copy_from_user(void *to,
492 const void __user *from,
493 unsigned long n);
494 long __must_check strncpy_from_user(char *dst, const char __user *src,
495 long count);
496 long __must_check __strncpy_from_user(char *dst,
497 const char __user *src, long count);
500 * strlen_user: - Get the size of a string in user space.
501 * @str: The string to measure.
503 * Context: User context only. This function may sleep.
505 * Get the size of a NUL-terminated string in user space.
507 * Returns the size of the string INCLUDING the terminating NUL.
508 * On exception, returns 0.
510 * If there is a limit on the length of a valid string, you may wish to
511 * consider using strnlen_user() instead.
513 #define strlen_user(str) strnlen_user(str, LONG_MAX)
515 long strnlen_user(const char __user *str, long n);
516 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
517 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
519 #endif /* __i386_UACCESS_H */