Linux-2.6.12-rc2
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-mips / uaccess.h
blob07114898e0652fd9a5a7d5df836934f9ef41c5c9
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_UACCESS_H
10 #define _ASM_UACCESS_H
12 #include <linux/config.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/thread_info.h>
16 #include <asm-generic/uaccess.h>
19 * The fs value determines whether argument validity checking should be
20 * performed or not. If get_fs() == USER_DS, checking is performed, with
21 * get_fs() == KERNEL_DS, checking is bypassed.
23 * For historical reasons, these macros are grossly misnamed.
25 #ifdef CONFIG_MIPS32
27 #define __UA_LIMIT 0x80000000UL
29 #define __UA_ADDR ".word"
30 #define __UA_LA "la"
31 #define __UA_ADDU "addu"
32 #define __UA_t0 "$8"
33 #define __UA_t1 "$9"
35 #endif /* CONFIG_MIPS32 */
37 #ifdef CONFIG_MIPS64
39 #define __UA_LIMIT (- TASK_SIZE)
41 #define __UA_ADDR ".dword"
42 #define __UA_LA "dla"
43 #define __UA_ADDU "daddu"
44 #define __UA_t0 "$12"
45 #define __UA_t1 "$13"
47 #endif /* CONFIG_MIPS64 */
50 * USER_DS is a bitmask that has the bits set that may not be set in a valid
51 * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
52 * the arithmetic we're doing only works if the limit is a power of two, so
53 * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
54 * address in this range it's the process's problem, not ours :-)
57 #define KERNEL_DS ((mm_segment_t) { 0UL })
58 #define USER_DS ((mm_segment_t) { __UA_LIMIT })
60 #define VERIFY_READ 0
61 #define VERIFY_WRITE 1
63 #define get_ds() (KERNEL_DS)
64 #define get_fs() (current_thread_info()->addr_limit)
65 #define set_fs(x) (current_thread_info()->addr_limit = (x))
67 #define segment_eq(a,b) ((a).seg == (b).seg)
71 * Is a address valid? This does a straighforward calculation rather
72 * than tests.
74 * Address valid if:
75 * - "addr" doesn't have any high-bits set
76 * - AND "size" doesn't have any high-bits set
77 * - AND "addr+size" doesn't have any high-bits set
78 * - OR we are in kernel mode.
80 * __ua_size() is a trick to avoid runtime checking of positive constant
81 * sizes; for those we already know at compile time that the size is ok.
83 #define __ua_size(size) \
84 ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
87 * access_ok: - Checks if a user space pointer is valid
88 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
89 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
90 * to write to a block, it is always safe to read from it.
91 * @addr: User space pointer to start of block to check
92 * @size: Size of block to check
94 * Context: User context only. This function may sleep.
96 * Checks if a pointer to a block of memory in user space is valid.
98 * Returns true (nonzero) if the memory block may be valid, false (zero)
99 * if it is definitely invalid.
101 * Note that, depending on architecture, this function probably just
102 * checks that the pointer is in the user space range - after calling
103 * this function, memory access functions may still return -EFAULT.
106 #define __access_mask get_fs().seg
108 #define __access_ok(addr, size, mask) \
109 (((signed long)((mask) & ((addr) | ((addr) + (size)) | __ua_size(size)))) == 0)
111 #define access_ok(type, addr, size) \
112 likely(__access_ok((unsigned long)(addr), (size),__access_mask))
115 * verify_area: - Obsolete/deprecated and will go away soon,
116 * use access_ok() instead.
117 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE
118 * @addr: User space pointer to start of block to check
119 * @size: Size of block to check
121 * Context: User context only. This function may sleep.
123 * This function has been replaced by access_ok().
125 * Checks if a pointer to a block of memory in user space is valid.
127 * Returns zero if the memory block may be valid, -EFAULT
128 * if it is definitely invalid.
130 * See access_ok() for more details.
132 static inline int __deprecated verify_area(int type, const void * addr, unsigned long size)
134 return access_ok(type, addr, size) ? 0 : -EFAULT;
138 * put_user: - Write a simple value into user space.
139 * @x: Value to copy to user space.
140 * @ptr: Destination address, in user space.
142 * Context: User context only. This function may sleep.
144 * This macro copies a single simple value from kernel space to user
145 * space. It supports simple types like char and int, but not larger
146 * data types like structures or arrays.
148 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
149 * to the result of dereferencing @ptr.
151 * Returns zero on success, or -EFAULT on error.
153 #define put_user(x,ptr) \
154 __put_user_check((x),(ptr),sizeof(*(ptr)))
157 * get_user: - Get a simple variable from user space.
158 * @x: Variable to store result.
159 * @ptr: Source address, in user space.
161 * Context: User context only. This function may sleep.
163 * This macro copies a single simple variable from user space to kernel
164 * space. It supports simple types like char and int, but not larger
165 * data types like structures or arrays.
167 * @ptr must have pointer-to-simple-variable type, and the result of
168 * dereferencing @ptr must be assignable to @x without a cast.
170 * Returns zero on success, or -EFAULT on error.
171 * On error, the variable @x is set to zero.
173 #define get_user(x,ptr) \
174 __get_user_check((x),(ptr),sizeof(*(ptr)))
177 * __put_user: - Write a simple value into user space, with less checking.
178 * @x: Value to copy to user space.
179 * @ptr: Destination address, in user space.
181 * Context: User context only. This function may sleep.
183 * This macro copies a single simple value from kernel space to user
184 * space. It supports simple types like char and int, but not larger
185 * data types like structures or arrays.
187 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
188 * to the result of dereferencing @ptr.
190 * Caller must check the pointer with access_ok() before calling this
191 * function.
193 * Returns zero on success, or -EFAULT on error.
195 #define __put_user(x,ptr) \
196 __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
199 * __get_user: - Get a simple variable from user space, with less checking.
200 * @x: Variable to store result.
201 * @ptr: Source address, in user space.
203 * Context: User context only. This function may sleep.
205 * This macro copies a single simple variable from user space to kernel
206 * space. It supports simple types like char and int, but not larger
207 * data types like structures or arrays.
209 * @ptr must have pointer-to-simple-variable type, and the result of
210 * dereferencing @ptr must be assignable to @x without a cast.
212 * Caller must check the pointer with access_ok() before calling this
213 * function.
215 * Returns zero on success, or -EFAULT on error.
216 * On error, the variable @x is set to zero.
218 #define __get_user(x,ptr) \
219 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
221 struct __large_struct { unsigned long buf[100]; };
222 #define __m(x) (*(struct __large_struct *)(x))
225 * Yuck. We need two variants, one for 64bit operation and one
226 * for 32 bit mode and old iron.
228 #ifdef __mips64
229 #define __GET_USER_DW(__gu_err) __get_user_asm("ld", __gu_err)
230 #else
231 #define __GET_USER_DW(__gu_err) __get_user_asm_ll32(__gu_err)
232 #endif
234 #define __get_user_nocheck(x,ptr,size) \
235 ({ \
236 __typeof(*(ptr)) __gu_val = 0; \
237 long __gu_addr; \
238 long __gu_err = 0; \
240 might_sleep(); \
241 __gu_addr = (long) (ptr); \
242 switch (size) { \
243 case 1: __get_user_asm("lb", __gu_err); break; \
244 case 2: __get_user_asm("lh", __gu_err); break; \
245 case 4: __get_user_asm("lw", __gu_err); break; \
246 case 8: __GET_USER_DW(__gu_err); break; \
247 default: __get_user_unknown(); break; \
249 x = (__typeof__(*(ptr))) __gu_val; \
250 __gu_err; \
253 #define __get_user_check(x,ptr,size) \
254 ({ \
255 __typeof__(*(ptr)) __gu_val = 0; \
256 long __gu_addr; \
257 long __gu_err; \
259 might_sleep(); \
260 __gu_addr = (long) (ptr); \
261 __gu_err = access_ok(VERIFY_READ, (void *) __gu_addr, size) \
262 ? 0 : -EFAULT; \
264 if (likely(!__gu_err)) { \
265 switch (size) { \
266 case 1: __get_user_asm("lb", __gu_err); break; \
267 case 2: __get_user_asm("lh", __gu_err); break; \
268 case 4: __get_user_asm("lw", __gu_err); break; \
269 case 8: __GET_USER_DW(__gu_err); break; \
270 default: __get_user_unknown(); break; \
273 x = (__typeof__(*(ptr))) __gu_val; \
274 __gu_err; \
277 #define __get_user_asm(insn,__gu_err) \
278 ({ \
279 __asm__ __volatile__( \
280 "1: " insn " %1, %3 \n" \
281 "2: \n" \
282 " .section .fixup,\"ax\" \n" \
283 "3: li %0, %4 \n" \
284 " j 2b \n" \
285 " .previous \n" \
286 " .section __ex_table,\"a\" \n" \
287 " "__UA_ADDR "\t1b, 3b \n" \
288 " .previous \n" \
289 : "=r" (__gu_err), "=r" (__gu_val) \
290 : "0" (__gu_err), "o" (__m(__gu_addr)), "i" (-EFAULT)); \
294 * Get a long long 64 using 32 bit registers.
296 #define __get_user_asm_ll32(__gu_err) \
297 ({ \
298 __asm__ __volatile__( \
299 "1: lw %1, %3 \n" \
300 "2: lw %D1, %4 \n" \
301 " move %0, $0 \n" \
302 "3: .section .fixup,\"ax\" \n" \
303 "4: li %0, %5 \n" \
304 " move %1, $0 \n" \
305 " move %D1, $0 \n" \
306 " j 3b \n" \
307 " .previous \n" \
308 " .section __ex_table,\"a\" \n" \
309 " " __UA_ADDR " 1b, 4b \n" \
310 " " __UA_ADDR " 2b, 4b \n" \
311 " .previous \n" \
312 : "=r" (__gu_err), "=&r" (__gu_val) \
313 : "0" (__gu_err), "o" (__m(__gu_addr)), \
314 "o" (__m(__gu_addr + 4)), "i" (-EFAULT)); \
317 extern void __get_user_unknown(void);
320 * Yuck. We need two variants, one for 64bit operation and one
321 * for 32 bit mode and old iron.
323 #ifdef __mips64
324 #define __PUT_USER_DW(__pu_val) __put_user_asm("sd", __pu_val)
325 #else
326 #define __PUT_USER_DW(__pu_val) __put_user_asm_ll32(__pu_val)
327 #endif
329 #define __put_user_nocheck(x,ptr,size) \
330 ({ \
331 __typeof__(*(ptr)) __pu_val; \
332 long __pu_addr; \
333 long __pu_err = 0; \
335 might_sleep(); \
336 __pu_val = (x); \
337 __pu_addr = (long) (ptr); \
338 switch (size) { \
339 case 1: __put_user_asm("sb", __pu_val); break; \
340 case 2: __put_user_asm("sh", __pu_val); break; \
341 case 4: __put_user_asm("sw", __pu_val); break; \
342 case 8: __PUT_USER_DW(__pu_val); break; \
343 default: __put_user_unknown(); break; \
345 __pu_err; \
348 #define __put_user_check(x,ptr,size) \
349 ({ \
350 __typeof__(*(ptr)) __pu_val; \
351 long __pu_addr; \
352 long __pu_err; \
354 might_sleep(); \
355 __pu_val = (x); \
356 __pu_addr = (long) (ptr); \
357 __pu_err = access_ok(VERIFY_WRITE, (void *) __pu_addr, size) \
358 ? 0 : -EFAULT; \
360 if (likely(!__pu_err)) { \
361 switch (size) { \
362 case 1: __put_user_asm("sb", __pu_val); break; \
363 case 2: __put_user_asm("sh", __pu_val); break; \
364 case 4: __put_user_asm("sw", __pu_val); break; \
365 case 8: __PUT_USER_DW(__pu_val); break; \
366 default: __put_user_unknown(); break; \
369 __pu_err; \
372 #define __put_user_asm(insn, __pu_val) \
373 ({ \
374 __asm__ __volatile__( \
375 "1: " insn " %z2, %3 # __put_user_asm\n" \
376 "2: \n" \
377 " .section .fixup,\"ax\" \n" \
378 "3: li %0, %4 \n" \
379 " j 2b \n" \
380 " .previous \n" \
381 " .section __ex_table,\"a\" \n" \
382 " " __UA_ADDR " 1b, 3b \n" \
383 " .previous \n" \
384 : "=r" (__pu_err) \
385 : "0" (__pu_err), "Jr" (__pu_val), "o" (__m(__pu_addr)), \
386 "i" (-EFAULT)); \
389 #define __put_user_asm_ll32(__pu_val) \
390 ({ \
391 __asm__ __volatile__( \
392 "1: sw %2, %3 # __put_user_asm_ll32 \n" \
393 "2: sw %D2, %4 \n" \
394 "3: \n" \
395 " .section .fixup,\"ax\" \n" \
396 "4: li %0, %5 \n" \
397 " j 3b \n" \
398 " .previous \n" \
399 " .section __ex_table,\"a\" \n" \
400 " " __UA_ADDR " 1b, 4b \n" \
401 " " __UA_ADDR " 2b, 4b \n" \
402 " .previous" \
403 : "=r" (__pu_err) \
404 : "0" (__pu_err), "r" (__pu_val), "o" (__m(__pu_addr)), \
405 "o" (__m(__pu_addr + 4)), "i" (-EFAULT)); \
408 extern void __put_user_unknown(void);
411 * We're generating jump to subroutines which will be outside the range of
412 * jump instructions
414 #ifdef MODULE
415 #define __MODULE_JAL(destination) \
416 ".set\tnoat\n\t" \
417 __UA_LA "\t$1, " #destination "\n\t" \
418 "jalr\t$1\n\t" \
419 ".set\tat\n\t"
420 #else
421 #define __MODULE_JAL(destination) \
422 "jal\t" #destination "\n\t"
423 #endif
425 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
427 #define __invoke_copy_to_user(to,from,n) \
428 ({ \
429 register void *__cu_to_r __asm__ ("$4"); \
430 register const void *__cu_from_r __asm__ ("$5"); \
431 register long __cu_len_r __asm__ ("$6"); \
433 __cu_to_r = (to); \
434 __cu_from_r = (from); \
435 __cu_len_r = (n); \
436 __asm__ __volatile__( \
437 __MODULE_JAL(__copy_user) \
438 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
440 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
441 "memory"); \
442 __cu_len_r; \
446 * __copy_to_user: - Copy a block of data into user space, with less checking.
447 * @to: Destination address, in user space.
448 * @from: Source address, in kernel space.
449 * @n: Number of bytes to copy.
451 * Context: User context only. This function may sleep.
453 * Copy data from kernel space to user space. Caller must check
454 * the specified block with access_ok() before calling this function.
456 * Returns number of bytes that could not be copied.
457 * On success, this will be zero.
459 #define __copy_to_user(to,from,n) \
460 ({ \
461 void *__cu_to; \
462 const void *__cu_from; \
463 long __cu_len; \
465 might_sleep(); \
466 __cu_to = (to); \
467 __cu_from = (from); \
468 __cu_len = (n); \
469 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
470 __cu_len; \
473 #define __copy_to_user_inatomic __copy_to_user
474 #define __copy_from_user_inatomic __copy_from_user
477 * copy_to_user: - Copy a block of data into user space.
478 * @to: Destination address, in user space.
479 * @from: Source address, in kernel space.
480 * @n: Number of bytes to copy.
482 * Context: User context only. This function may sleep.
484 * Copy data from kernel space to user space.
486 * Returns number of bytes that could not be copied.
487 * On success, this will be zero.
489 #define copy_to_user(to,from,n) \
490 ({ \
491 void *__cu_to; \
492 const void *__cu_from; \
493 long __cu_len; \
495 might_sleep(); \
496 __cu_to = (to); \
497 __cu_from = (from); \
498 __cu_len = (n); \
499 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) \
500 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
501 __cu_len); \
502 __cu_len; \
505 #define __invoke_copy_from_user(to,from,n) \
506 ({ \
507 register void *__cu_to_r __asm__ ("$4"); \
508 register const void *__cu_from_r __asm__ ("$5"); \
509 register long __cu_len_r __asm__ ("$6"); \
511 __cu_to_r = (to); \
512 __cu_from_r = (from); \
513 __cu_len_r = (n); \
514 __asm__ __volatile__( \
515 ".set\tnoreorder\n\t" \
516 __MODULE_JAL(__copy_user) \
517 ".set\tnoat\n\t" \
518 __UA_ADDU "\t$1, %1, %2\n\t" \
519 ".set\tat\n\t" \
520 ".set\treorder" \
521 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
523 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
524 "memory"); \
525 __cu_len_r; \
529 * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space.
530 * @from: Source address, in user space.
531 * @n: Number of bytes to copy.
533 * Context: User context only. This function may sleep.
535 * Copy data from user space to kernel space. Caller must check
536 * the specified block with access_ok() before calling this function.
538 * Returns number of bytes that could not be copied.
539 * On success, this will be zero.
541 * If some data could not be copied, this function will pad the copied
542 * data to the requested size using zero bytes.
544 #define __copy_from_user(to,from,n) \
545 ({ \
546 void *__cu_to; \
547 const void *__cu_from; \
548 long __cu_len; \
550 might_sleep(); \
551 __cu_to = (to); \
552 __cu_from = (from); \
553 __cu_len = (n); \
554 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
555 __cu_len); \
556 __cu_len; \
560 * copy_from_user: - Copy a block of data from user space.
561 * @to: Destination address, in kernel space.
562 * @from: Source address, in user space.
563 * @n: Number of bytes to copy.
565 * Context: User context only. This function may sleep.
567 * Copy data from user space to kernel space.
569 * Returns number of bytes that could not be copied.
570 * On success, this will be zero.
572 * If some data could not be copied, this function will pad the copied
573 * data to the requested size using zero bytes.
575 #define copy_from_user(to,from,n) \
576 ({ \
577 void *__cu_to; \
578 const void *__cu_from; \
579 long __cu_len; \
581 might_sleep(); \
582 __cu_to = (to); \
583 __cu_from = (from); \
584 __cu_len = (n); \
585 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) \
586 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
587 __cu_len); \
588 __cu_len; \
591 #define __copy_in_user(to, from, n) __copy_from_user(to, from, n)
593 #define copy_in_user(to,from,n) \
594 ({ \
595 void *__cu_to; \
596 const void *__cu_from; \
597 long __cu_len; \
599 might_sleep(); \
600 __cu_to = (to); \
601 __cu_from = (from); \
602 __cu_len = (n); \
603 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \
604 access_ok(VERIFY_WRITE, __cu_to, __cu_len))) \
605 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
606 __cu_len); \
607 __cu_len; \
611 * __clear_user: - Zero a block of memory in user space, with less checking.
612 * @to: Destination address, in user space.
613 * @n: Number of bytes to zero.
615 * Zero a block of memory in user space. Caller must check
616 * the specified block with access_ok() before calling this function.
618 * Returns number of bytes that could not be cleared.
619 * On success, this will be zero.
621 static inline __kernel_size_t
622 __clear_user(void *addr, __kernel_size_t size)
624 __kernel_size_t res;
626 might_sleep();
627 __asm__ __volatile__(
628 "move\t$4, %1\n\t"
629 "move\t$5, $0\n\t"
630 "move\t$6, %2\n\t"
631 __MODULE_JAL(__bzero)
632 "move\t%0, $6"
633 : "=r" (res)
634 : "r" (addr), "r" (size)
635 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
637 return res;
640 #define clear_user(addr,n) \
641 ({ \
642 void * __cl_addr = (addr); \
643 unsigned long __cl_size = (n); \
644 if (__cl_size && access_ok(VERIFY_WRITE, \
645 ((unsigned long)(__cl_addr)), __cl_size)) \
646 __cl_size = __clear_user(__cl_addr, __cl_size); \
647 __cl_size; \
651 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
652 * @dst: Destination address, in kernel space. This buffer must be at
653 * least @count bytes long.
654 * @src: Source address, in user space.
655 * @count: Maximum number of bytes to copy, including the trailing NUL.
657 * Copies a NUL-terminated string from userspace to kernel space.
658 * Caller must check the specified block with access_ok() before calling
659 * this function.
661 * On success, returns the length of the string (not including the trailing
662 * NUL).
664 * If access to userspace fails, returns -EFAULT (some data may have been
665 * copied).
667 * If @count is smaller than the length of the string, copies @count bytes
668 * and returns @count.
670 static inline long
671 __strncpy_from_user(char *__to, const char *__from, long __len)
673 long res;
675 might_sleep();
676 __asm__ __volatile__(
677 "move\t$4, %1\n\t"
678 "move\t$5, %2\n\t"
679 "move\t$6, %3\n\t"
680 __MODULE_JAL(__strncpy_from_user_nocheck_asm)
681 "move\t%0, $2"
682 : "=r" (res)
683 : "r" (__to), "r" (__from), "r" (__len)
684 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
686 return res;
690 * strncpy_from_user: - Copy a NUL terminated string from userspace.
691 * @dst: Destination address, in kernel space. This buffer must be at
692 * least @count bytes long.
693 * @src: Source address, in user space.
694 * @count: Maximum number of bytes to copy, including the trailing NUL.
696 * Copies a NUL-terminated string from userspace to kernel space.
698 * On success, returns the length of the string (not including the trailing
699 * NUL).
701 * If access to userspace fails, returns -EFAULT (some data may have been
702 * copied).
704 * If @count is smaller than the length of the string, copies @count bytes
705 * and returns @count.
707 static inline long
708 strncpy_from_user(char *__to, const char *__from, long __len)
710 long res;
712 might_sleep();
713 __asm__ __volatile__(
714 "move\t$4, %1\n\t"
715 "move\t$5, %2\n\t"
716 "move\t$6, %3\n\t"
717 __MODULE_JAL(__strncpy_from_user_asm)
718 "move\t%0, $2"
719 : "=r" (res)
720 : "r" (__to), "r" (__from), "r" (__len)
721 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
723 return res;
726 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
727 static inline long __strlen_user(const char *s)
729 long res;
731 might_sleep();
732 __asm__ __volatile__(
733 "move\t$4, %1\n\t"
734 __MODULE_JAL(__strlen_user_nocheck_asm)
735 "move\t%0, $2"
736 : "=r" (res)
737 : "r" (s)
738 : "$2", "$4", __UA_t0, "$31");
740 return res;
744 * strlen_user: - Get the size of a string in user space.
745 * @str: The string to measure.
747 * Context: User context only. This function may sleep.
749 * Get the size of a NUL-terminated string in user space.
751 * Returns the size of the string INCLUDING the terminating NUL.
752 * On exception, returns 0.
754 * If there is a limit on the length of a valid string, you may wish to
755 * consider using strnlen_user() instead.
757 static inline long strlen_user(const char *s)
759 long res;
761 might_sleep();
762 __asm__ __volatile__(
763 "move\t$4, %1\n\t"
764 __MODULE_JAL(__strlen_user_asm)
765 "move\t%0, $2"
766 : "=r" (res)
767 : "r" (s)
768 : "$2", "$4", __UA_t0, "$31");
770 return res;
773 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
774 static inline long __strnlen_user(const char *s, long n)
776 long res;
778 might_sleep();
779 __asm__ __volatile__(
780 "move\t$4, %1\n\t"
781 "move\t$5, %2\n\t"
782 __MODULE_JAL(__strnlen_user_nocheck_asm)
783 "move\t%0, $2"
784 : "=r" (res)
785 : "r" (s), "r" (n)
786 : "$2", "$4", "$5", __UA_t0, "$31");
788 return res;
792 * strlen_user: - Get the size of a string in user space.
793 * @str: The string to measure.
795 * Context: User context only. This function may sleep.
797 * Get the size of a NUL-terminated string in user space.
799 * Returns the size of the string INCLUDING the terminating NUL.
800 * On exception, returns 0.
802 * If there is a limit on the length of a valid string, you may wish to
803 * consider using strnlen_user() instead.
805 static inline long strnlen_user(const char *s, long n)
807 long res;
809 might_sleep();
810 __asm__ __volatile__(
811 "move\t$4, %1\n\t"
812 "move\t$5, %2\n\t"
813 __MODULE_JAL(__strnlen_user_asm)
814 "move\t%0, $2"
815 : "=r" (res)
816 : "r" (s), "r" (n)
817 : "$2", "$4", "$5", __UA_t0, "$31");
819 return res;
822 struct exception_table_entry
824 unsigned long insn;
825 unsigned long nextinsn;
828 extern int fixup_exception(struct pt_regs *regs);
830 #endif /* _ASM_UACCESS_H */