GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / arch / mips / include / asm / uaccess.h
blob45aed43661a633acce2c6958a3b59a599239c2f5
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2007 Maciej W. Rozycki
9 */
10 #ifndef _ASM_UACCESS_H
11 #define _ASM_UACCESS_H
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/thread_info.h>
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
22 * For historical reasons, these macros are grossly misnamed.
24 #ifdef CONFIG_32BIT
26 #define __UA_LIMIT 0x80000000UL
28 #define __UA_ADDR ".word"
29 #define __UA_LA "la"
30 #define __UA_ADDU "addu"
31 #define __UA_t0 "$8"
32 #define __UA_t1 "$9"
34 #endif /* CONFIG_32BIT */
36 #ifdef CONFIG_64BIT
38 #define __UA_LIMIT (- TASK_SIZE)
40 #define __UA_ADDR ".dword"
41 #define __UA_LA "dla"
42 #define __UA_ADDU "daddu"
43 #define __UA_t0 "$12"
44 #define __UA_t1 "$13"
46 #endif /* CONFIG_64BIT */
49 * USER_DS is a bitmask that has the bits set that may not be set in a valid
50 * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
51 * the arithmetic we're doing only works if the limit is a power of two, so
52 * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
53 * address in this range it's the process's problem, not ours :-)
56 #define KERNEL_DS ((mm_segment_t) { 0UL })
57 #define USER_DS ((mm_segment_t) { __UA_LIMIT })
59 #define VERIFY_READ 0
60 #define VERIFY_WRITE 1
62 #define get_ds() (KERNEL_DS)
63 #define get_fs() (current_thread_info()->addr_limit)
64 #define set_fs(x) (current_thread_info()->addr_limit = (x))
66 #define segment_eq(a, b) ((a).seg == (b).seg)
70 * Is a address valid? This does a straighforward calculation rather
71 * than tests.
73 * Address valid if:
74 * - "addr" doesn't have any high-bits set
75 * - AND "size" doesn't have any high-bits set
76 * - AND "addr+size" doesn't have any high-bits set
77 * - OR we are in kernel mode.
79 * __ua_size() is a trick to avoid runtime checking of positive constant
80 * sizes; for those we already know at compile time that the size is ok.
82 #define __ua_size(size) \
83 ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
86 * access_ok: - Checks if a user space pointer is valid
87 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
88 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
89 * to write to a block, it is always safe to read from it.
90 * @addr: User space pointer to start of block to check
91 * @size: Size of block to check
93 * Context: User context only. This function may sleep.
95 * Checks if a pointer to a block of memory in user space is valid.
97 * Returns true (nonzero) if the memory block may be valid, false (zero)
98 * if it is definitely invalid.
100 * Note that, depending on architecture, this function probably just
101 * checks that the pointer is in the user space range - after calling
102 * this function, memory access functions may still return -EFAULT.
105 #define __access_mask get_fs().seg
107 #define __access_ok(addr, size, mask) \
108 ({ \
109 unsigned long __addr = (unsigned long) (addr); \
110 unsigned long __size = size; \
111 unsigned long __mask = mask; \
112 unsigned long __ok; \
114 __chk_user_ptr(addr); \
115 __ok = (signed long)(__mask & (__addr | (__addr + __size) | \
116 __ua_size(__size))); \
117 __ok == 0; \
120 #define access_ok(type, addr, size) \
121 likely(__access_ok((addr), (size), __access_mask))
124 * put_user: - Write a simple value into user space.
125 * @x: Value to copy to user space.
126 * @ptr: Destination address, in user space.
128 * Context: User context only. This function may sleep.
130 * This macro copies a single simple value from kernel space to user
131 * space. It supports simple types like char and int, but not larger
132 * data types like structures or arrays.
134 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
135 * to the result of dereferencing @ptr.
137 * Returns zero on success, or -EFAULT on error.
139 #define put_user(x,ptr) \
140 __put_user_check((x), (ptr), sizeof(*(ptr)))
143 * get_user: - Get a simple variable from user space.
144 * @x: Variable to store result.
145 * @ptr: Source address, in user space.
147 * Context: User context only. This function may sleep.
149 * This macro copies a single simple variable from user space to kernel
150 * space. It supports simple types like char and int, but not larger
151 * data types like structures or arrays.
153 * @ptr must have pointer-to-simple-variable type, and the result of
154 * dereferencing @ptr must be assignable to @x without a cast.
156 * Returns zero on success, or -EFAULT on error.
157 * On error, the variable @x is set to zero.
159 #define get_user(x,ptr) \
160 __get_user_check((x), (ptr), sizeof(*(ptr)))
163 * __put_user: - Write a simple value into user space, with less checking.
164 * @x: Value to copy to user space.
165 * @ptr: Destination address, in user space.
167 * Context: User context only. This function may sleep.
169 * This macro copies a single simple value from kernel space to user
170 * space. It supports simple types like char and int, but not larger
171 * data types like structures or arrays.
173 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
174 * to the result of dereferencing @ptr.
176 * Caller must check the pointer with access_ok() before calling this
177 * function.
179 * Returns zero on success, or -EFAULT on error.
181 #define __put_user(x,ptr) \
182 __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
185 * __get_user: - Get a simple variable from user space, with less checking.
186 * @x: Variable to store result.
187 * @ptr: Source address, in user space.
189 * Context: User context only. This function may sleep.
191 * This macro copies a single simple variable from user space to kernel
192 * space. It supports simple types like char and int, but not larger
193 * data types like structures or arrays.
195 * @ptr must have pointer-to-simple-variable type, and the result of
196 * dereferencing @ptr must be assignable to @x without a cast.
198 * Caller must check the pointer with access_ok() before calling this
199 * function.
201 * Returns zero on success, or -EFAULT on error.
202 * On error, the variable @x is set to zero.
204 #define __get_user(x,ptr) \
205 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
207 struct __large_struct { unsigned long buf[100]; };
208 #define __m(x) (*(struct __large_struct __user *)(x))
211 * Yuck. We need two variants, one for 64bit operation and one
212 * for 32 bit mode and old iron.
214 #ifdef CONFIG_32BIT
215 #define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)
216 #endif
217 #ifdef CONFIG_64BIT
218 #define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr)
219 #endif
221 extern void __get_user_unknown(void);
223 #define __get_user_common(val, size, ptr) \
224 do { \
225 switch (size) { \
226 case 1: __get_user_asm(val, "lb", ptr); break; \
227 case 2: __get_user_asm(val, "lh", ptr); break; \
228 case 4: __get_user_asm(val, "lw", ptr); break; \
229 case 8: __GET_USER_DW(val, ptr); break; \
230 default: __get_user_unknown(); break; \
232 } while (0)
234 #define __get_user_nocheck(x, ptr, size) \
235 ({ \
236 int __gu_err; \
238 __chk_user_ptr(ptr); \
239 __get_user_common((x), size, ptr); \
240 __gu_err; \
243 #define __get_user_check(x, ptr, size) \
244 ({ \
245 int __gu_err = -EFAULT; \
246 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
248 might_fault(); \
249 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
250 __get_user_common((x), size, __gu_ptr); \
252 __gu_err; \
255 #define __get_user_asm(val, insn, addr) \
257 long __gu_tmp; \
259 __asm__ __volatile__( \
260 "1: " insn " %1, %3 \n" \
261 "2: \n" \
262 " .insn \n" \
263 " .section .fixup,\"ax\" \n" \
264 "3: li %0, %4 \n" \
265 " j 2b \n" \
266 " .previous \n" \
267 " .section __ex_table,\"a\" \n" \
268 " "__UA_ADDR "\t1b, 3b \n" \
269 " .previous \n" \
270 : "=r" (__gu_err), "=r" (__gu_tmp) \
271 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
273 (val) = (__typeof__(*(addr))) __gu_tmp; \
277 * Get a long long 64 using 32 bit registers.
279 #define __get_user_asm_ll32(val, addr) \
281 union { \
282 unsigned long long l; \
283 __typeof__(*(addr)) t; \
284 } __gu_tmp; \
286 __asm__ __volatile__( \
287 "1: lw %1, (%3) \n" \
288 "2: lw %D1, 4(%3) \n" \
289 "3: \n" \
290 " .insn \n" \
291 " .section .fixup,\"ax\" \n" \
292 "4: li %0, %4 \n" \
293 " move %1, $0 \n" \
294 " move %D1, $0 \n" \
295 " j 3b \n" \
296 " .previous \n" \
297 " .section __ex_table,\"a\" \n" \
298 " " __UA_ADDR " 1b, 4b \n" \
299 " " __UA_ADDR " 2b, 4b \n" \
300 " .previous \n" \
301 : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
302 : "0" (0), "r" (addr), "i" (-EFAULT)); \
304 (val) = __gu_tmp.t; \
308 * Yuck. We need two variants, one for 64bit operation and one
309 * for 32 bit mode and old iron.
311 #ifdef CONFIG_32BIT
312 #define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr)
313 #endif
314 #ifdef CONFIG_64BIT
315 #define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
316 #endif
318 #define __put_user_nocheck(x, ptr, size) \
319 ({ \
320 __typeof__(*(ptr)) __pu_val; \
321 int __pu_err = 0; \
323 __chk_user_ptr(ptr); \
324 __pu_val = (x); \
325 switch (size) { \
326 case 1: __put_user_asm("sb", ptr); break; \
327 case 2: __put_user_asm("sh", ptr); break; \
328 case 4: __put_user_asm("sw", ptr); break; \
329 case 8: __PUT_USER_DW(ptr); break; \
330 default: __put_user_unknown(); break; \
332 __pu_err; \
335 #define __put_user_check(x, ptr, size) \
336 ({ \
337 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
338 __typeof__(*(ptr)) __pu_val = (x); \
339 int __pu_err = -EFAULT; \
341 might_fault(); \
342 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
343 switch (size) { \
344 case 1: __put_user_asm("sb", __pu_addr); break; \
345 case 2: __put_user_asm("sh", __pu_addr); break; \
346 case 4: __put_user_asm("sw", __pu_addr); break; \
347 case 8: __PUT_USER_DW(__pu_addr); break; \
348 default: __put_user_unknown(); break; \
351 __pu_err; \
354 #define __put_user_asm(insn, ptr) \
356 __asm__ __volatile__( \
357 "1: " insn " %z2, %3 # __put_user_asm\n" \
358 "2: \n" \
359 " .insn \n" \
360 " .section .fixup,\"ax\" \n" \
361 "3: li %0, %4 \n" \
362 " j 2b \n" \
363 " .previous \n" \
364 " .section __ex_table,\"a\" \n" \
365 " " __UA_ADDR " 1b, 3b \n" \
366 " .previous \n" \
367 : "=r" (__pu_err) \
368 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
369 "i" (-EFAULT)); \
372 #define __put_user_asm_ll32(ptr) \
374 __asm__ __volatile__( \
375 "1: sw %2, (%3) # __put_user_asm_ll32 \n" \
376 "2: sw %D2, 4(%3) \n" \
377 "3: \n" \
378 " .insn \n" \
379 " .section .fixup,\"ax\" \n" \
380 "4: li %0, %4 \n" \
381 " j 3b \n" \
382 " .previous \n" \
383 " .section __ex_table,\"a\" \n" \
384 " " __UA_ADDR " 1b, 4b \n" \
385 " " __UA_ADDR " 2b, 4b \n" \
386 " .previous" \
387 : "=r" (__pu_err) \
388 : "0" (0), "r" (__pu_val), "r" (ptr), \
389 "i" (-EFAULT)); \
392 extern void __put_user_unknown(void);
395 * put_user_unaligned: - Write a simple value into user space.
396 * @x: Value to copy to user space.
397 * @ptr: Destination address, in user space.
399 * Context: User context only. This function may sleep.
401 * This macro copies a single simple value from kernel space to user
402 * space. It supports simple types like char and int, but not larger
403 * data types like structures or arrays.
405 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
406 * to the result of dereferencing @ptr.
408 * Returns zero on success, or -EFAULT on error.
410 #define put_user_unaligned(x,ptr) \
411 __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
414 * get_user_unaligned: - Get a simple variable from user space.
415 * @x: Variable to store result.
416 * @ptr: Source address, in user space.
418 * Context: User context only. This function may sleep.
420 * This macro copies a single simple variable from user space to kernel
421 * space. It supports simple types like char and int, but not larger
422 * data types like structures or arrays.
424 * @ptr must have pointer-to-simple-variable type, and the result of
425 * dereferencing @ptr must be assignable to @x without a cast.
427 * Returns zero on success, or -EFAULT on error.
428 * On error, the variable @x is set to zero.
430 #define get_user_unaligned(x,ptr) \
431 __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
434 * __put_user_unaligned: - Write a simple value into user space, with less checking.
435 * @x: Value to copy to user space.
436 * @ptr: Destination address, in user space.
438 * Context: User context only. This function may sleep.
440 * This macro copies a single simple value from kernel space to user
441 * space. It supports simple types like char and int, but not larger
442 * data types like structures or arrays.
444 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
445 * to the result of dereferencing @ptr.
447 * Caller must check the pointer with access_ok() before calling this
448 * function.
450 * Returns zero on success, or -EFAULT on error.
452 #define __put_user_unaligned(x,ptr) \
453 __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
456 * __get_user_unaligned: - Get a simple variable from user space, with less checking.
457 * @x: Variable to store result.
458 * @ptr: Source address, in user space.
460 * Context: User context only. This function may sleep.
462 * This macro copies a single simple variable from user space to kernel
463 * space. It supports simple types like char and int, but not larger
464 * data types like structures or arrays.
466 * @ptr must have pointer-to-simple-variable type, and the result of
467 * dereferencing @ptr must be assignable to @x without a cast.
469 * Caller must check the pointer with access_ok() before calling this
470 * function.
472 * Returns zero on success, or -EFAULT on error.
473 * On error, the variable @x is set to zero.
475 #define __get_user_unaligned(x,ptr) \
476 __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
479 * Yuck. We need two variants, one for 64bit operation and one
480 * for 32 bit mode and old iron.
482 #ifdef CONFIG_32BIT
483 #define __GET_USER_UNALIGNED_DW(val, ptr) \
484 __get_user_unaligned_asm_ll32(val, ptr)
485 #endif
486 #ifdef CONFIG_64BIT
487 #define __GET_USER_UNALIGNED_DW(val, ptr) \
488 __get_user_unaligned_asm(val, "uld", ptr)
489 #endif
491 extern void __get_user_unaligned_unknown(void);
493 #define __get_user_unaligned_common(val, size, ptr) \
494 do { \
495 switch (size) { \
496 case 1: __get_user_asm(val, "lb", ptr); break; \
497 case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \
498 case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \
499 case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
500 default: __get_user_unaligned_unknown(); break; \
502 } while (0)
504 #define __get_user_unaligned_nocheck(x,ptr,size) \
505 ({ \
506 int __gu_err; \
508 __get_user_unaligned_common((x), size, ptr); \
509 __gu_err; \
512 #define __get_user_unaligned_check(x,ptr,size) \
513 ({ \
514 int __gu_err = -EFAULT; \
515 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
517 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
518 __get_user_unaligned_common((x), size, __gu_ptr); \
520 __gu_err; \
523 #define __get_user_unaligned_asm(val, insn, addr) \
525 long __gu_tmp; \
527 __asm__ __volatile__( \
528 "1: " insn " %1, %3 \n" \
529 "2: \n" \
530 " .insn \n" \
531 " .section .fixup,\"ax\" \n" \
532 "3: li %0, %4 \n" \
533 " j 2b \n" \
534 " .previous \n" \
535 " .section __ex_table,\"a\" \n" \
536 " "__UA_ADDR "\t1b, 3b \n" \
537 " "__UA_ADDR "\t1b + 4, 3b \n" \
538 " .previous \n" \
539 : "=r" (__gu_err), "=r" (__gu_tmp) \
540 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
542 (val) = (__typeof__(*(addr))) __gu_tmp; \
546 * Get a long long 64 using 32 bit registers.
548 #define __get_user_unaligned_asm_ll32(val, addr) \
550 unsigned long long __gu_tmp; \
552 __asm__ __volatile__( \
553 "1: ulw %1, (%3) \n" \
554 "2: ulw %D1, 4(%3) \n" \
555 " move %0, $0 \n" \
556 "3: \n" \
557 " .insn \n" \
558 " .section .fixup,\"ax\" \n" \
559 "4: li %0, %4 \n" \
560 " move %1, $0 \n" \
561 " move %D1, $0 \n" \
562 " j 3b \n" \
563 " .previous \n" \
564 " .section __ex_table,\"a\" \n" \
565 " " __UA_ADDR " 1b, 4b \n" \
566 " " __UA_ADDR " 1b + 4, 4b \n" \
567 " " __UA_ADDR " 2b, 4b \n" \
568 " " __UA_ADDR " 2b + 4, 4b \n" \
569 " .previous \n" \
570 : "=r" (__gu_err), "=&r" (__gu_tmp) \
571 : "0" (0), "r" (addr), "i" (-EFAULT)); \
572 (val) = (__typeof__(*(addr))) __gu_tmp; \
576 * Yuck. We need two variants, one for 64bit operation and one
577 * for 32 bit mode and old iron.
579 #ifdef CONFIG_32BIT
580 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
581 #endif
582 #ifdef CONFIG_64BIT
583 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
584 #endif
586 #define __put_user_unaligned_nocheck(x,ptr,size) \
587 ({ \
588 __typeof__(*(ptr)) __pu_val; \
589 int __pu_err = 0; \
591 __pu_val = (x); \
592 switch (size) { \
593 case 1: __put_user_asm("sb", ptr); break; \
594 case 2: __put_user_unaligned_asm("ush", ptr); break; \
595 case 4: __put_user_unaligned_asm("usw", ptr); break; \
596 case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
597 default: __put_user_unaligned_unknown(); break; \
599 __pu_err; \
602 #define __put_user_unaligned_check(x,ptr,size) \
603 ({ \
604 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
605 __typeof__(*(ptr)) __pu_val = (x); \
606 int __pu_err = -EFAULT; \
608 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
609 switch (size) { \
610 case 1: __put_user_asm("sb", __pu_addr); break; \
611 case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \
612 case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \
613 case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break; \
614 default: __put_user_unaligned_unknown(); break; \
617 __pu_err; \
620 #define __put_user_unaligned_asm(insn, ptr) \
622 __asm__ __volatile__( \
623 "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
624 "2: \n" \
625 " .insn \n" \
626 " .section .fixup,\"ax\" \n" \
627 "3: li %0, %4 \n" \
628 " j 2b \n" \
629 " .previous \n" \
630 " .section __ex_table,\"a\" \n" \
631 " " __UA_ADDR " 1b, 3b \n" \
632 " .previous \n" \
633 : "=r" (__pu_err) \
634 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
635 "i" (-EFAULT)); \
638 #define __put_user_unaligned_asm_ll32(ptr) \
640 __asm__ __volatile__( \
641 "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
642 "2: sw %D2, 4(%3) \n" \
643 "3: \n" \
644 " .insn \n" \
645 " .section .fixup,\"ax\" \n" \
646 "4: li %0, %4 \n" \
647 " j 3b \n" \
648 " .previous \n" \
649 " .section __ex_table,\"a\" \n" \
650 " " __UA_ADDR " 1b, 4b \n" \
651 " " __UA_ADDR " 1b + 4, 4b \n" \
652 " " __UA_ADDR " 2b, 4b \n" \
653 " " __UA_ADDR " 2b + 4, 4b \n" \
654 " .previous" \
655 : "=r" (__pu_err) \
656 : "0" (0), "r" (__pu_val), "r" (ptr), \
657 "i" (-EFAULT)); \
660 extern void __put_user_unaligned_unknown(void);
663 * We're generating jump to subroutines which will be outside the range of
664 * jump instructions
666 #ifdef MODULE
667 #define __MODULE_JAL(destination) \
668 ".set\tnoat\n\t" \
669 __UA_LA "\t$1, " #destination "\n\t" \
670 "jalr\t$1\n\t" \
671 ".set\tat\n\t"
672 #else
673 #define __MODULE_JAL(destination) \
674 "jal\t" #destination "\n\t"
675 #endif
677 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
678 #define DADDI_SCRATCH "$0"
679 #else
680 #define DADDI_SCRATCH "$3"
681 #endif
683 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
685 #define __invoke_copy_to_user(to, from, n) \
686 ({ \
687 register void __user *__cu_to_r __asm__("$4"); \
688 register const void *__cu_from_r __asm__("$5"); \
689 register long __cu_len_r __asm__("$6"); \
691 __cu_to_r = (to); \
692 __cu_from_r = (from); \
693 __cu_len_r = (n); \
694 __asm__ __volatile__( \
695 __MODULE_JAL(__copy_user) \
696 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
698 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
699 DADDI_SCRATCH, "memory"); \
700 __cu_len_r; \
704 * __copy_to_user: - Copy a block of data into user space, with less checking.
705 * @to: Destination address, in user space.
706 * @from: Source address, in kernel space.
707 * @n: Number of bytes to copy.
709 * Context: User context only. This function may sleep.
711 * Copy data from kernel space to user space. Caller must check
712 * the specified block with access_ok() before calling this function.
714 * Returns number of bytes that could not be copied.
715 * On success, this will be zero.
717 #define __copy_to_user(to, from, n) \
718 ({ \
719 void __user *__cu_to; \
720 const void *__cu_from; \
721 long __cu_len; \
723 __cu_to = (to); \
724 __cu_from = (from); \
725 __cu_len = (n); \
726 might_fault(); \
727 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
728 __cu_len; \
731 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
733 #define __copy_to_user_inatomic(to, from, n) \
734 ({ \
735 void __user *__cu_to; \
736 const void *__cu_from; \
737 long __cu_len; \
739 __cu_to = (to); \
740 __cu_from = (from); \
741 __cu_len = (n); \
742 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
743 __cu_len; \
746 #define __copy_from_user_inatomic(to, from, n) \
747 ({ \
748 void *__cu_to; \
749 const void __user *__cu_from; \
750 long __cu_len; \
752 __cu_to = (to); \
753 __cu_from = (from); \
754 __cu_len = (n); \
755 __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
756 __cu_len); \
757 __cu_len; \
761 * copy_to_user: - Copy a block of data into user space.
762 * @to: Destination address, in user space.
763 * @from: Source address, in kernel space.
764 * @n: Number of bytes to copy.
766 * Context: User context only. This function may sleep.
768 * Copy data from kernel space to user space.
770 * Returns number of bytes that could not be copied.
771 * On success, this will be zero.
773 #define copy_to_user(to, from, n) \
774 ({ \
775 void __user *__cu_to; \
776 const void *__cu_from; \
777 long __cu_len; \
779 __cu_to = (to); \
780 __cu_from = (from); \
781 __cu_len = (n); \
782 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
783 might_fault(); \
784 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
785 __cu_len); \
787 __cu_len; \
790 #define __invoke_copy_from_user(to, from, n) \
791 ({ \
792 register void *__cu_to_r __asm__("$4"); \
793 register const void __user *__cu_from_r __asm__("$5"); \
794 register long __cu_len_r __asm__("$6"); \
796 __cu_to_r = (to); \
797 __cu_from_r = (from); \
798 __cu_len_r = (n); \
799 __asm__ __volatile__( \
800 ".set\tnoreorder\n\t" \
801 __MODULE_JAL(__copy_user) \
802 ".set\tnoat\n\t" \
803 __UA_ADDU "\t$1, %1, %2\n\t" \
804 ".set\tat\n\t" \
805 ".set\treorder" \
806 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
808 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
809 DADDI_SCRATCH, "memory"); \
810 __cu_len_r; \
813 #define __invoke_copy_from_user_inatomic(to, from, n) \
814 ({ \
815 register void *__cu_to_r __asm__("$4"); \
816 register const void __user *__cu_from_r __asm__("$5"); \
817 register long __cu_len_r __asm__("$6"); \
819 __cu_to_r = (to); \
820 __cu_from_r = (from); \
821 __cu_len_r = (n); \
822 __asm__ __volatile__( \
823 ".set\tnoreorder\n\t" \
824 __MODULE_JAL(__copy_user_inatomic) \
825 ".set\tnoat\n\t" \
826 __UA_ADDU "\t$1, %1, %2\n\t" \
827 ".set\tat\n\t" \
828 ".set\treorder" \
829 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
831 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
832 DADDI_SCRATCH, "memory"); \
833 __cu_len_r; \
837 * __copy_from_user: - Copy a block of data from user space, with less checking.
838 * @to: Destination address, in kernel space.
839 * @from: Source address, in user space.
840 * @n: Number of bytes to copy.
842 * Context: User context only. This function may sleep.
844 * Copy data from user space to kernel space. Caller must check
845 * the specified block with access_ok() before calling this function.
847 * Returns number of bytes that could not be copied.
848 * On success, this will be zero.
850 * If some data could not be copied, this function will pad the copied
851 * data to the requested size using zero bytes.
853 #define __copy_from_user(to, from, n) \
854 ({ \
855 void *__cu_to; \
856 const void __user *__cu_from; \
857 long __cu_len; \
859 __cu_to = (to); \
860 __cu_from = (from); \
861 __cu_len = (n); \
862 might_fault(); \
863 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
864 __cu_len); \
865 __cu_len; \
869 * copy_from_user: - Copy a block of data from user space.
870 * @to: Destination address, in kernel space.
871 * @from: Source address, in user space.
872 * @n: Number of bytes to copy.
874 * Context: User context only. This function may sleep.
876 * Copy data from user space to kernel space.
878 * Returns number of bytes that could not be copied.
879 * On success, this will be zero.
881 * If some data could not be copied, this function will pad the copied
882 * data to the requested size using zero bytes.
884 #define copy_from_user(to, from, n) \
885 ({ \
886 void *__cu_to; \
887 const void __user *__cu_from; \
888 long __cu_len; \
890 __cu_to = (to); \
891 __cu_from = (from); \
892 __cu_len = (n); \
893 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
894 might_fault(); \
895 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
896 __cu_len); \
898 __cu_len; \
901 #define __copy_in_user(to, from, n) \
902 ({ \
903 void __user *__cu_to; \
904 const void __user *__cu_from; \
905 long __cu_len; \
907 __cu_to = (to); \
908 __cu_from = (from); \
909 __cu_len = (n); \
910 might_fault(); \
911 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
912 __cu_len); \
913 __cu_len; \
916 #define copy_in_user(to, from, n) \
917 ({ \
918 void __user *__cu_to; \
919 const void __user *__cu_from; \
920 long __cu_len; \
922 __cu_to = (to); \
923 __cu_from = (from); \
924 __cu_len = (n); \
925 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \
926 access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \
927 might_fault(); \
928 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
929 __cu_len); \
931 __cu_len; \
935 * __clear_user: - Zero a block of memory in user space, with less checking.
936 * @to: Destination address, in user space.
937 * @n: Number of bytes to zero.
939 * Zero a block of memory in user space. Caller must check
940 * the specified block with access_ok() before calling this function.
942 * Returns number of bytes that could not be cleared.
943 * On success, this will be zero.
945 static inline __kernel_size_t
946 __clear_user(void __user *addr, __kernel_size_t size)
948 __kernel_size_t res;
950 might_fault();
951 __asm__ __volatile__(
952 "move\t$4, %1\n\t"
953 "move\t$5, $0\n\t"
954 "move\t$6, %2\n\t"
955 __MODULE_JAL(__bzero)
956 "move\t%0, $6"
957 : "=r" (res)
958 : "r" (addr), "r" (size)
959 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
961 return res;
964 #define clear_user(addr,n) \
965 ({ \
966 void __user * __cl_addr = (addr); \
967 unsigned long __cl_size = (n); \
968 if (__cl_size && access_ok(VERIFY_WRITE, \
969 __cl_addr, __cl_size)) \
970 __cl_size = __clear_user(__cl_addr, __cl_size); \
971 __cl_size; \
975 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
976 * @dst: Destination address, in kernel space. This buffer must be at
977 * least @count bytes long.
978 * @src: Source address, in user space.
979 * @count: Maximum number of bytes to copy, including the trailing NUL.
981 * Copies a NUL-terminated string from userspace to kernel space.
982 * Caller must check the specified block with access_ok() before calling
983 * this function.
985 * On success, returns the length of the string (not including the trailing
986 * NUL).
988 * If access to userspace fails, returns -EFAULT (some data may have been
989 * copied).
991 * If @count is smaller than the length of the string, copies @count bytes
992 * and returns @count.
994 static inline long
995 __strncpy_from_user(char *__to, const char __user *__from, long __len)
997 long res;
999 might_fault();
1000 __asm__ __volatile__(
1001 "move\t$4, %1\n\t"
1002 "move\t$5, %2\n\t"
1003 "move\t$6, %3\n\t"
1004 __MODULE_JAL(__strncpy_from_user_nocheck_asm)
1005 "move\t%0, $2"
1006 : "=r" (res)
1007 : "r" (__to), "r" (__from), "r" (__len)
1008 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1010 return res;
1014 * strncpy_from_user: - Copy a NUL terminated string from userspace.
1015 * @dst: Destination address, in kernel space. This buffer must be at
1016 * least @count bytes long.
1017 * @src: Source address, in user space.
1018 * @count: Maximum number of bytes to copy, including the trailing NUL.
1020 * Copies a NUL-terminated string from userspace to kernel space.
1022 * On success, returns the length of the string (not including the trailing
1023 * NUL).
1025 * If access to userspace fails, returns -EFAULT (some data may have been
1026 * copied).
1028 * If @count is smaller than the length of the string, copies @count bytes
1029 * and returns @count.
1031 static inline long
1032 strncpy_from_user(char *__to, const char __user *__from, long __len)
1034 long res;
1036 might_fault();
1037 __asm__ __volatile__(
1038 "move\t$4, %1\n\t"
1039 "move\t$5, %2\n\t"
1040 "move\t$6, %3\n\t"
1041 __MODULE_JAL(__strncpy_from_user_asm)
1042 "move\t%0, $2"
1043 : "=r" (res)
1044 : "r" (__to), "r" (__from), "r" (__len)
1045 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1047 return res;
1050 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1051 static inline long __strlen_user(const char __user *s)
1053 long res;
1055 might_fault();
1056 __asm__ __volatile__(
1057 "move\t$4, %1\n\t"
1058 __MODULE_JAL(__strlen_user_nocheck_asm)
1059 "move\t%0, $2"
1060 : "=r" (res)
1061 : "r" (s)
1062 : "$2", "$4", __UA_t0, "$31");
1064 return res;
1068 * strlen_user: - Get the size of a string in user space.
1069 * @str: The string to measure.
1071 * Context: User context only. This function may sleep.
1073 * Get the size of a NUL-terminated string in user space.
1075 * Returns the size of the string INCLUDING the terminating NUL.
1076 * On exception, returns 0.
1078 * If there is a limit on the length of a valid string, you may wish to
1079 * consider using strnlen_user() instead.
1081 static inline long strlen_user(const char __user *s)
1083 long res;
1085 might_fault();
1086 __asm__ __volatile__(
1087 "move\t$4, %1\n\t"
1088 __MODULE_JAL(__strlen_user_asm)
1089 "move\t%0, $2"
1090 : "=r" (res)
1091 : "r" (s)
1092 : "$2", "$4", __UA_t0, "$31");
1094 return res;
1097 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1098 static inline long __strnlen_user(const char __user *s, long n)
1100 long res;
1102 might_fault();
1103 __asm__ __volatile__(
1104 "move\t$4, %1\n\t"
1105 "move\t$5, %2\n\t"
1106 __MODULE_JAL(__strnlen_user_nocheck_asm)
1107 "move\t%0, $2"
1108 : "=r" (res)
1109 : "r" (s), "r" (n)
1110 : "$2", "$4", "$5", __UA_t0, "$31");
1112 return res;
1116 * strlen_user: - Get the size of a string in user space.
1117 * @str: The string to measure.
1119 * Context: User context only. This function may sleep.
1121 * Get the size of a NUL-terminated string in user space.
1123 * Returns the size of the string INCLUDING the terminating NUL.
1124 * On exception, returns 0.
1126 * If there is a limit on the length of a valid string, you may wish to
1127 * consider using strnlen_user() instead.
1129 static inline long strnlen_user(const char __user *s, long n)
1131 long res;
1133 might_fault();
1134 __asm__ __volatile__(
1135 "move\t$4, %1\n\t"
1136 "move\t$5, %2\n\t"
1137 __MODULE_JAL(__strnlen_user_asm)
1138 "move\t%0, $2"
1139 : "=r" (res)
1140 : "r" (s), "r" (n)
1141 : "$2", "$4", "$5", __UA_t0, "$31");
1143 return res;
1146 struct exception_table_entry
1148 unsigned long insn;
1149 unsigned long nextinsn;
1152 extern int fixup_exception(struct pt_regs *regs);
1154 #endif /* _ASM_UACCESS_H */