[IA64-SGI] SN SAL call to inject memory errors
[linux-2.6/kvm.git] / include / asm-m32r / uaccess.h
blob819cc28a94f7c430e977064ccd84f2052406795e
1 #ifndef _ASM_M32R_UACCESS_H
2 #define _ASM_M32R_UACCESS_H
4 /*
5 * linux/include/asm-m32r/uaccess.h
7 * M32R version.
8 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
9 */
12 * User space memory access functions
14 #include <linux/config.h>
15 #include <linux/errno.h>
16 #include <linux/thread_info.h>
17 #include <asm/page.h>
19 #define VERIFY_READ 0
20 #define VERIFY_WRITE 1
23 * The fs value determines whether argument validity checking should be
24 * performed or not. If get_fs() == USER_DS, checking is performed, with
25 * get_fs() == KERNEL_DS, checking is bypassed.
27 * For historical reasons, these macros are grossly misnamed.
30 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
32 #ifdef CONFIG_MMU
34 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
35 #define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
36 #define get_ds() (KERNEL_DS)
37 #define get_fs() (current_thread_info()->addr_limit)
38 #define set_fs(x) (current_thread_info()->addr_limit = (x))
40 #else /* not CONFIG_MMU */
42 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
43 #define USER_DS MAKE_MM_SEG(0xFFFFFFFF)
44 #define get_ds() (KERNEL_DS)
46 static inline mm_segment_t get_fs(void)
48 return USER_DS;
51 static inline void set_fs(mm_segment_t s)
55 #endif /* not CONFIG_MMU */
57 #define segment_eq(a,b) ((a).seg == (b).seg)
59 #define __addr_ok(addr) \
60 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
63 * Test whether a block of memory is a valid user space address.
64 * Returns 0 if the range is valid, nonzero otherwise.
66 * This is equivalent to the following test:
67 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg
69 * This needs 33-bit arithmetic. We have a carry...
71 #define __range_ok(addr,size) ({ \
72 unsigned long flag, sum; \
73 __chk_user_ptr(addr); \
74 asm ( \
75 " cmpu %1, %1 ; clear cbit\n" \
76 " addx %1, %3 ; set cbit if overflow\n" \
77 " subx %0, %0\n" \
78 " cmpu %4, %1\n" \
79 " subx %0, %5\n" \
80 : "=&r" (flag), "=r" (sum) \
81 : "1" (addr), "r" ((int)(size)), \
82 "r" (current_thread_info()->addr_limit.seg), "r" (0) \
83 : "cbit" ); \
84 flag; })
86 /**
87 * access_ok: - Checks if a user space pointer is valid
88 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
89 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
90 * to write to a block, it is always safe to read from it.
91 * @addr: User space pointer to start of block to check
92 * @size: Size of block to check
94 * Context: User context only. This function may sleep.
96 * Checks if a pointer to a block of memory in user space is valid.
98 * Returns true (nonzero) if the memory block may be valid, false (zero)
99 * if it is definitely invalid.
101 * Note that, depending on architecture, this function probably just
102 * checks that the pointer is in the user space range - after calling
103 * this function, memory access functions may still return -EFAULT.
105 #ifdef CONFIG_MMU
106 #define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0))
107 #else
108 static inline int access_ok(int type, const void *addr, unsigned long size)
110 extern unsigned long memory_start, memory_end;
111 unsigned long val = (unsigned long)addr;
113 return ((val >= memory_start) && ((val + size) < memory_end));
115 #endif /* CONFIG_MMU */
118 * The exception table consists of pairs of addresses: the first is the
119 * address of an instruction that is allowed to fault, and the second is
120 * the address at which the program should continue. No registers are
121 * modified, so it is entirely up to the continuation code to figure out
122 * what to do.
124 * All the routines below use bits of fixup code that are out of line
125 * with the main instruction path. This means when everything is well,
126 * we don't even have to jump over them. Further, they do not intrude
127 * on our cache or tlb entries.
130 struct exception_table_entry
132 unsigned long insn, fixup;
135 extern int fixup_exception(struct pt_regs *regs);
138 * These are the main single-value transfer routines. They automatically
139 * use the right size if we just have the right pointer type.
141 * This gets kind of ugly. We want to return _two_ values in "get_user()"
142 * and yet we don't want to do any pointers, because that is too much
143 * of a performance impact. Thus we have a few rather ugly macros here,
144 * and hide all the uglyness from the user.
146 * The "__xxx" versions of the user access functions are versions that
147 * do not verify the address space, that must have been done previously
148 * with a separate "access_ok()" call (this is used when we do multiple
149 * accesses to the same area of user memory).
152 /* Careful: we have to cast the result to the type of the pointer for sign
153 reasons */
155 * get_user: - Get a simple variable from user space.
156 * @x: Variable to store result.
157 * @ptr: Source address, in user space.
159 * Context: User context only. This function may sleep.
161 * This macro copies a single simple variable from user space to kernel
162 * space. It supports simple types like char and int, but not larger
163 * data types like structures or arrays.
165 * @ptr must have pointer-to-simple-variable type, and the result of
166 * dereferencing @ptr must be assignable to @x without a cast.
168 * Returns zero on success, or -EFAULT on error.
169 * On error, the variable @x is set to zero.
171 #define get_user(x,ptr) \
172 __get_user_check((x),(ptr),sizeof(*(ptr)))
175 * put_user: - Write a simple value into user space.
176 * @x: Value to copy to user space.
177 * @ptr: Destination address, in user space.
179 * Context: User context only. This function may sleep.
181 * This macro copies a single simple value from kernel space to user
182 * space. It supports simple types like char and int, but not larger
183 * data types like structures or arrays.
185 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
186 * to the result of dereferencing @ptr.
188 * Returns zero on success, or -EFAULT on error.
190 #define put_user(x,ptr) \
191 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
194 * __get_user: - Get a simple variable from user space, with less checking.
195 * @x: Variable to store result.
196 * @ptr: Source address, in user space.
198 * Context: User context only. This function may sleep.
200 * This macro copies a single simple variable from user space to kernel
201 * space. It supports simple types like char and int, but not larger
202 * data types like structures or arrays.
204 * @ptr must have pointer-to-simple-variable type, and the result of
205 * dereferencing @ptr must be assignable to @x without a cast.
207 * Caller must check the pointer with access_ok() before calling this
208 * function.
210 * Returns zero on success, or -EFAULT on error.
211 * On error, the variable @x is set to zero.
213 #define __get_user(x,ptr) \
214 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
216 #define __get_user_nocheck(x,ptr,size) \
217 ({ \
218 long __gu_err = 0; \
219 unsigned long __gu_val; \
220 might_sleep(); \
221 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
222 (x) = (__typeof__(*(ptr)))__gu_val; \
223 __gu_err; \
226 #define __get_user_check(x,ptr,size) \
227 ({ \
228 long __gu_err = -EFAULT; \
229 unsigned long __gu_val = 0; \
230 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
231 might_sleep(); \
232 if (access_ok(VERIFY_READ,__gu_addr,size)) \
233 __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
234 (x) = (__typeof__(*(ptr)))__gu_val; \
235 __gu_err; \
238 extern long __get_user_bad(void);
240 #define __get_user_size(x,ptr,size,retval) \
241 do { \
242 retval = 0; \
243 __chk_user_ptr(ptr); \
244 switch (size) { \
245 case 1: __get_user_asm(x,ptr,retval,"ub"); break; \
246 case 2: __get_user_asm(x,ptr,retval,"uh"); break; \
247 case 4: __get_user_asm(x,ptr,retval,""); break; \
248 default: (x) = __get_user_bad(); \
250 } while (0)
252 #define __get_user_asm(x, addr, err, itype) \
253 __asm__ __volatile__( \
254 " .fillinsn\n" \
255 "1: ld"itype" %1,@%2\n" \
256 " .fillinsn\n" \
257 "2:\n" \
258 ".section .fixup,\"ax\"\n" \
259 " .balign 4\n" \
260 "3: ldi %0,%3\n" \
261 " seth r14,#high(2b)\n" \
262 " or3 r14,r14,#low(2b)\n" \
263 " jmp r14\n" \
264 ".previous\n" \
265 ".section __ex_table,\"a\"\n" \
266 " .balign 4\n" \
267 " .long 1b,3b\n" \
268 ".previous" \
269 : "=&r" (err), "=&r" (x) \
270 : "r" (addr), "i" (-EFAULT), "0" (err) \
271 : "r14", "memory")
274 * __put_user: - Write a simple value into user space, with less checking.
275 * @x: Value to copy to user space.
276 * @ptr: Destination address, in user space.
278 * Context: User context only. This function may sleep.
280 * This macro copies a single simple value from kernel space to user
281 * space. It supports simple types like char and int, but not larger
282 * data types like structures or arrays.
284 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
285 * to the result of dereferencing @ptr.
287 * Caller must check the pointer with access_ok() before calling this
288 * function.
290 * Returns zero on success, or -EFAULT on error.
292 #define __put_user(x,ptr) \
293 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
296 #define __put_user_nocheck(x,ptr,size) \
297 ({ \
298 long __pu_err; \
299 might_sleep(); \
300 __put_user_size((x),(ptr),(size),__pu_err); \
301 __pu_err; \
305 #define __put_user_check(x,ptr,size) \
306 ({ \
307 long __pu_err = -EFAULT; \
308 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
309 might_sleep(); \
310 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
311 __put_user_size((x),__pu_addr,(size),__pu_err); \
312 __pu_err; \
315 #if defined(__LITTLE_ENDIAN__)
316 #define __put_user_u64(x, addr, err) \
317 __asm__ __volatile__( \
318 " .fillinsn\n" \
319 "1: st %L1,@%2\n" \
320 " .fillinsn\n" \
321 "2: st %H1,@(4,%2)\n" \
322 " .fillinsn\n" \
323 "3:\n" \
324 ".section .fixup,\"ax\"\n" \
325 " .balign 4\n" \
326 "4: ldi %0,%3\n" \
327 " seth r14,#high(3b)\n" \
328 " or3 r14,r14,#low(3b)\n" \
329 " jmp r14\n" \
330 ".previous\n" \
331 ".section __ex_table,\"a\"\n" \
332 " .balign 4\n" \
333 " .long 1b,4b\n" \
334 " .long 2b,4b\n" \
335 ".previous" \
336 : "=&r" (err) \
337 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err) \
338 : "r14", "memory")
340 #elif defined(__BIG_ENDIAN__)
341 #define __put_user_u64(x, addr, err) \
342 __asm__ __volatile__( \
343 " .fillinsn\n" \
344 "1: st %H1,@%2\n" \
345 " .fillinsn\n" \
346 "2: st %L1,@(4,%2)\n" \
347 " .fillinsn\n" \
348 "3:\n" \
349 ".section .fixup,\"ax\"\n" \
350 " .balign 4\n" \
351 "4: ldi %0,%3\n" \
352 " seth r14,#high(3b)\n" \
353 " or3 r14,r14,#low(3b)\n" \
354 " jmp r14\n" \
355 ".previous\n" \
356 ".section __ex_table,\"a\"\n" \
357 " .balign 4\n" \
358 " .long 1b,4b\n" \
359 " .long 2b,4b\n" \
360 ".previous" \
361 : "=&r" (err) \
362 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err) \
363 : "r14", "memory")
364 #else
365 #error no endian defined
366 #endif
368 extern void __put_user_bad(void);
370 #define __put_user_size(x,ptr,size,retval) \
371 do { \
372 retval = 0; \
373 __chk_user_ptr(ptr); \
374 switch (size) { \
375 case 1: __put_user_asm(x,ptr,retval,"b"); break; \
376 case 2: __put_user_asm(x,ptr,retval,"h"); break; \
377 case 4: __put_user_asm(x,ptr,retval,""); break; \
378 case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\
379 default: __put_user_bad(); \
381 } while (0)
383 struct __large_struct { unsigned long buf[100]; };
384 #define __m(x) (*(struct __large_struct *)(x))
387 * Tell gcc we read from memory instead of writing: this is because
388 * we do not write to any memory gcc knows about, so there are no
389 * aliasing issues.
391 #define __put_user_asm(x, addr, err, itype) \
392 __asm__ __volatile__( \
393 " .fillinsn\n" \
394 "1: st"itype" %1,@%2\n" \
395 " .fillinsn\n" \
396 "2:\n" \
397 ".section .fixup,\"ax\"\n" \
398 " .balign 4\n" \
399 "3: ldi %0,%3\n" \
400 " seth r14,#high(2b)\n" \
401 " or3 r14,r14,#low(2b)\n" \
402 " jmp r14\n" \
403 ".previous\n" \
404 ".section __ex_table,\"a\"\n" \
405 " .balign 4\n" \
406 " .long 1b,3b\n" \
407 ".previous" \
408 : "=&r" (err) \
409 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err) \
410 : "r14", "memory")
413 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
414 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
415 * If a store crosses a page boundary and gets a fault, the m32r will not write
416 * anything, so this is accurate.
420 * Copy To/From Userspace
423 /* Generic arbitrary sized copy. */
424 /* Return the number of bytes NOT copied. */
425 #define __copy_user(to,from,size) \
426 do { \
427 unsigned long __dst, __src, __c; \
428 __asm__ __volatile__ ( \
429 " mv r14, %0\n" \
430 " or r14, %1\n" \
431 " beq %0, %1, 9f\n" \
432 " beqz %2, 9f\n" \
433 " and3 r14, r14, #3\n" \
434 " bnez r14, 2f\n" \
435 " and3 %2, %2, #3\n" \
436 " beqz %3, 2f\n" \
437 " addi %0, #-4 ; word_copy \n" \
438 " .fillinsn\n" \
439 "0: ld r14, @%1+\n" \
440 " addi %3, #-1\n" \
441 " .fillinsn\n" \
442 "1: st r14, @+%0\n" \
443 " bnez %3, 0b\n" \
444 " beqz %2, 9f\n" \
445 " addi %0, #4\n" \
446 " .fillinsn\n" \
447 "2: ldb r14, @%1 ; byte_copy \n" \
448 " .fillinsn\n" \
449 "3: stb r14, @%0\n" \
450 " addi %1, #1\n" \
451 " addi %2, #-1\n" \
452 " addi %0, #1\n" \
453 " bnez %2, 2b\n" \
454 " .fillinsn\n" \
455 "9:\n" \
456 ".section .fixup,\"ax\"\n" \
457 " .balign 4\n" \
458 "5: addi %3, #1\n" \
459 " addi %1, #-4\n" \
460 " .fillinsn\n" \
461 "6: slli %3, #2\n" \
462 " add %2, %3\n" \
463 " addi %0, #4\n" \
464 " .fillinsn\n" \
465 "7: seth r14, #high(9b)\n" \
466 " or3 r14, r14, #low(9b)\n" \
467 " jmp r14\n" \
468 ".previous\n" \
469 ".section __ex_table,\"a\"\n" \
470 " .balign 4\n" \
471 " .long 0b,6b\n" \
472 " .long 1b,5b\n" \
473 " .long 2b,9b\n" \
474 " .long 3b,9b\n" \
475 ".previous\n" \
476 : "=&r" (__dst), "=&r" (__src), "=&r" (size), \
477 "=&r" (__c) \
478 : "0" (to), "1" (from), "2" (size), "3" (size / 4) \
479 : "r14", "memory"); \
480 } while (0)
482 #define __copy_user_zeroing(to,from,size) \
483 do { \
484 unsigned long __dst, __src, __c; \
485 __asm__ __volatile__ ( \
486 " mv r14, %0\n" \
487 " or r14, %1\n" \
488 " beq %0, %1, 9f\n" \
489 " beqz %2, 9f\n" \
490 " and3 r14, r14, #3\n" \
491 " bnez r14, 2f\n" \
492 " and3 %2, %2, #3\n" \
493 " beqz %3, 2f\n" \
494 " addi %0, #-4 ; word_copy \n" \
495 " .fillinsn\n" \
496 "0: ld r14, @%1+\n" \
497 " addi %3, #-1\n" \
498 " .fillinsn\n" \
499 "1: st r14, @+%0\n" \
500 " bnez %3, 0b\n" \
501 " beqz %2, 9f\n" \
502 " addi %0, #4\n" \
503 " .fillinsn\n" \
504 "2: ldb r14, @%1 ; byte_copy \n" \
505 " .fillinsn\n" \
506 "3: stb r14, @%0\n" \
507 " addi %1, #1\n" \
508 " addi %2, #-1\n" \
509 " addi %0, #1\n" \
510 " bnez %2, 2b\n" \
511 " .fillinsn\n" \
512 "9:\n" \
513 ".section .fixup,\"ax\"\n" \
514 " .balign 4\n" \
515 "5: addi %3, #1\n" \
516 " addi %1, #-4\n" \
517 " .fillinsn\n" \
518 "6: slli %3, #2\n" \
519 " add %2, %3\n" \
520 " addi %0, #4\n" \
521 " .fillinsn\n" \
522 "7: ldi r14, #0 ; store zero \n" \
523 " .fillinsn\n" \
524 "8: addi %2, #-1\n" \
525 " stb r14, @%0 ; ACE? \n" \
526 " addi %0, #1\n" \
527 " bnez %2, 8b\n" \
528 " seth r14, #high(9b)\n" \
529 " or3 r14, r14, #low(9b)\n" \
530 " jmp r14\n" \
531 ".previous\n" \
532 ".section __ex_table,\"a\"\n" \
533 " .balign 4\n" \
534 " .long 0b,6b\n" \
535 " .long 1b,5b\n" \
536 " .long 2b,7b\n" \
537 " .long 3b,7b\n" \
538 ".previous\n" \
539 : "=&r" (__dst), "=&r" (__src), "=&r" (size), \
540 "=&r" (__c) \
541 : "0" (to), "1" (from), "2" (size), "3" (size / 4) \
542 : "r14", "memory"); \
543 } while (0)
546 /* We let the __ versions of copy_from/to_user inline, because they're often
547 * used in fast paths and have only a small space overhead.
549 static inline unsigned long __generic_copy_from_user_nocheck(void *to,
550 const void __user *from, unsigned long n)
552 __copy_user_zeroing(to,from,n);
553 return n;
556 static inline unsigned long __generic_copy_to_user_nocheck(void __user *to,
557 const void *from, unsigned long n)
559 __copy_user(to,from,n);
560 return n;
563 unsigned long __generic_copy_to_user(void __user *, const void *, unsigned long);
564 unsigned long __generic_copy_from_user(void *, const void __user *, unsigned long);
567 * __copy_to_user: - Copy a block of data into user space, with less checking.
568 * @to: Destination address, in user space.
569 * @from: Source address, in kernel space.
570 * @n: Number of bytes to copy.
572 * Context: User context only. This function may sleep.
574 * Copy data from kernel space to user space. Caller must check
575 * the specified block with access_ok() before calling this function.
577 * Returns number of bytes that could not be copied.
578 * On success, this will be zero.
580 #define __copy_to_user(to,from,n) \
581 __generic_copy_to_user_nocheck((to),(from),(n))
583 #define __copy_to_user_inatomic __copy_to_user
584 #define __copy_from_user_inatomic __copy_from_user
587 * copy_to_user: - Copy a block of data into user space.
588 * @to: Destination address, in user space.
589 * @from: Source address, in kernel space.
590 * @n: Number of bytes to copy.
592 * Context: User context only. This function may sleep.
594 * Copy data from kernel space to user space.
596 * Returns number of bytes that could not be copied.
597 * On success, this will be zero.
599 #define copy_to_user(to,from,n) \
600 ({ \
601 might_sleep(); \
602 __generic_copy_to_user((to),(from),(n)); \
606 * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space.
607 * @from: Source address, in user space.
608 * @n: Number of bytes to copy.
610 * Context: User context only. This function may sleep.
612 * Copy data from user space to kernel space. Caller must check
613 * the specified block with access_ok() before calling this function.
615 * Returns number of bytes that could not be copied.
616 * On success, this will be zero.
618 * If some data could not be copied, this function will pad the copied
619 * data to the requested size using zero bytes.
621 #define __copy_from_user(to,from,n) \
622 __generic_copy_from_user_nocheck((to),(from),(n))
625 * copy_from_user: - Copy a block of data from user space.
626 * @to: Destination address, in kernel space.
627 * @from: Source address, in user space.
628 * @n: Number of bytes to copy.
630 * Context: User context only. This function may sleep.
632 * Copy data from user space to kernel space.
634 * Returns number of bytes that could not be copied.
635 * On success, this will be zero.
637 * If some data could not be copied, this function will pad the copied
638 * data to the requested size using zero bytes.
640 #define copy_from_user(to,from,n) \
641 ({ \
642 might_sleep(); \
643 __generic_copy_from_user((to),(from),(n)); \
646 long __must_check strncpy_from_user(char *dst, const char __user *src,
647 long count);
648 long __must_check __strncpy_from_user(char *dst,
649 const char __user *src, long count);
652 * __clear_user: - Zero a block of memory in user space, with less checking.
653 * @to: Destination address, in user space.
654 * @n: Number of bytes to zero.
656 * Zero a block of memory in user space. Caller must check
657 * the specified block with access_ok() before calling this function.
659 * Returns number of bytes that could not be cleared.
660 * On success, this will be zero.
662 unsigned long __clear_user(void __user *mem, unsigned long len);
665 * clear_user: - Zero a block of memory in user space.
666 * @to: Destination address, in user space.
667 * @n: Number of bytes to zero.
669 * Zero a block of memory in user space. Caller must check
670 * the specified block with access_ok() before calling this function.
672 * Returns number of bytes that could not be cleared.
673 * On success, this will be zero.
675 unsigned long clear_user(void __user *mem, unsigned long len);
678 * strlen_user: - Get the size of a string in user space.
679 * @str: The string to measure.
681 * Context: User context only. This function may sleep.
683 * Get the size of a NUL-terminated string in user space.
685 * Returns the size of the string INCLUDING the terminating NUL.
686 * On exception, returns 0.
688 * If there is a limit on the length of a valid string, you may wish to
689 * consider using strnlen_user() instead.
691 #define strlen_user(str) strnlen_user(str, ~0UL >> 1)
692 long strnlen_user(const char __user *str, long n);
694 #endif /* _ASM_M32R_UACCESS_H */