2 * include/asm-xtensa/uaccess.h
4 * User space memory access functions
6 * These routines provide basic accessing functions to the user memory
7 * space for the kernel. This header file provides fuctions such as:
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 * Copyright (C) 2001 - 2005 Tensilica Inc.
16 #ifndef _XTENSA_UACCESS_H
17 #define _XTENSA_UACCESS_H
19 #include <linux/errno.h>
22 #define VERIFY_WRITE 1
27 #include <asm/current.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/processor.h>
32 * These assembly macros mirror the C macros that follow below. They
33 * should always have identical functionality. See
34 * arch/xtensa/kernel/sys.S for usage.
40 #define get_ds (KERNEL_DS)
43 * get_fs reads current->thread.current_ds into a register.
48 * <ad> contains current->thread.current_ds
52 l32i
\ad
, \ad
, THREAD_CURRENT_DS
56 * set_fs sets current->thread.current_ds to some value.
58 * <at> anything (temp register)
62 * <at> destroyed (actually, current)
63 * <av> preserved, value to write
65 .macro set_fs at
, av
, sp
67 s32i
\av
, \at
, THREAD_CURRENT_DS
71 * kernel_ok determines whether we should bypass addr/size checking.
72 * See the equivalent C-macro version below for clarity.
73 * On success, kernel_ok branches to a label indicated by parameter
74 * <success>. This implies that the macro falls through to the next
75 * insruction on an error.
77 * Note that while this macro can be used independently, we designed
78 * in for optimal use in the access_ok macro below (i.e., we fall
82 * <at> anything (temp register)
83 * <success> label to branch to on success; implies
84 * fall-through macro on error
87 * <at> destroyed (actually, current->thread.current_ds)
90 #if ((KERNEL_DS != 0) || (USER_DS == 0))
91 # error Assembly macro kernel_ok fails
93 .macro kernel_ok at
, sp
, success
99 * user_ok determines whether the access to user-space memory is allowed.
100 * See the equivalent C-macro version below for clarity.
102 * On error, user_ok branches to a label indicated by parameter
103 * <error>. This implies that the macro falls through to the next
104 * instruction on success.
106 * Note that while this macro can be used independently, we designed
107 * in for optimal use in the access_ok macro below (i.e., we fall
108 * through on success).
111 * <aa> register containing memory address
112 * <as> register containing memory size
114 * <error> label to branch to on error; implies fall-through
119 * <at> destroyed (actually, (TASK_SIZE + 1 - size))
121 .macro user_ok aa
, as
, at
, error
122 movi
\at
, (TASK_SIZE
+1)
123 bgeu
\as
, \at
, \error
125 bgeu
\aa
, \at
, \error
129 * access_ok determines whether a memory access is allowed. See the
130 * equivalent C-macro version below for clarity.
132 * On error, access_ok branches to a label indicated by parameter
133 * <error>. This implies that the macro falls through to the next
134 * instruction on success.
136 * Note that we assume success is the common case, and we optimize the
137 * branch fall-through case on success.
140 * <aa> register containing memory address
141 * <as> register containing memory size
144 * <error> label to branch to on error; implies fall-through
151 .macro access_ok aa
, as
, at
, sp
, error
152 kernel_ok
\at
, \sp
, .Laccess_ok_\@
153 user_ok
\aa
, \as
, \at
, \error
157 #else /* __ASSEMBLY__ not defined */
159 #include <linux/sched.h>
160 #include <asm/types.h>
163 * The fs value determines whether argument validity checking should
164 * be performed or not. If get_fs() == USER_DS, checking is
165 * performed, with get_fs() == KERNEL_DS, checking is bypassed.
167 * For historical reasons (Data Segment Register?), these macros are
171 #define KERNEL_DS ((mm_segment_t) { 0 })
172 #define USER_DS ((mm_segment_t) { 1 })
174 #define get_ds() (KERNEL_DS)
175 #define get_fs() (current->thread.current_ds)
176 #define set_fs(val) (current->thread.current_ds = (val))
178 #define segment_eq(a,b) ((a).seg == (b).seg)
180 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
181 #define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
182 #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
183 #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
186 * These are the main single-value transfer routines. They
187 * automatically use the right size if we just have the right pointer
190 * This gets kind of ugly. We want to return _two_ values in
191 * "get_user()" and yet we don't want to do any pointers, because that
192 * is too much of a performance impact. Thus we have a few rather ugly
193 * macros here, and hide all the uglyness from the user.
196 * (a) re-use the arguments for side effects (sizeof is ok)
197 * (b) require any knowledge of processes at this stage
199 #define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
200 #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
203 * The "__xxx" versions of the user access functions are versions that
204 * do not verify the address space, that must have been done previously
205 * with a separate "access_ok()" call (this is used when we do multiple
206 * accesses to the same area of user memory).
208 #define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
209 #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
212 extern long __put_user_bad(void);
214 #define __put_user_nocheck(x,ptr,size) \
217 __put_user_size((x),(ptr),(size),__pu_err); \
221 #define __put_user_check(x,ptr,size) \
223 long __pu_err = -EFAULT; \
224 __typeof__(*(ptr)) *__pu_addr = (ptr); \
225 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
226 __put_user_size((x),__pu_addr,(size),__pu_err); \
230 #define __put_user_size(x,ptr,size,retval) \
234 case 1: __put_user_asm(x,ptr,retval,1,"s8i"); break; \
235 case 2: __put_user_asm(x,ptr,retval,2,"s16i"); break; \
236 case 4: __put_user_asm(x,ptr,retval,4,"s32i"); break; \
238 __typeof__(*ptr) __v64 = x; \
239 retval = __copy_to_user(ptr,&__v64,8); \
242 default: __put_user_bad(); \
248 * Consider a case of a user single load/store would cause both an
249 * unaligned exception and an MMU-related exception (unaligned
250 * exceptions happen first):
252 * User code passes a bad variable ptr to a system call.
253 * Kernel tries to access the variable.
254 * Unaligned exception occurs.
255 * Unaligned exception handler tries to make aligned accesses.
256 * Double exception occurs for MMU-related cause (e.g., page not mapped).
257 * do_page_fault() thinks the fault address belongs to the kernel, not the
260 * The kernel currently prohibits user unaligned accesses. We use the
261 * __check_align_* macros to check for unaligned addresses before
262 * accessing user space so we don't crash the kernel. Both
263 * __put_user_asm and __get_user_asm use these alignment macros, so
264 * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in
268 #define __check_align_1 ""
270 #define __check_align_2 \
271 " _bbci.l %2, 0, 1f \n" \
275 #define __check_align_4 \
276 " _bbsi.l %2, 0, 0f \n" \
277 " _bbci.l %2, 1, 1f \n" \
278 "0: movi %0, %3 \n" \
283 * We don't tell gcc that we are accessing memory, but this is OK
284 * because we do not write to any memory gcc knows about, so there
285 * are no aliasing issues.
287 * WARNING: If you modify this macro at all, verify that the
288 * __check_align_* macros still work.
290 #define __put_user_asm(x, addr, err, align, insn) \
291 __asm__ __volatile__( \
292 __check_align_##align \
293 "1: "insn" %1, %2, 0 \n" \
295 " .section .fixup,\"ax\" \n" \
304 " .section __ex_table,\"a\" \n" \
308 :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
310 #define __get_user_nocheck(x,ptr,size) \
312 long __gu_err, __gu_val; \
313 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
314 (x) = (__typeof__(*(ptr)))__gu_val; \
318 #define __get_user_check(x,ptr,size) \
320 long __gu_err = -EFAULT, __gu_val = 0; \
321 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
322 if (access_ok(VERIFY_READ,__gu_addr,size)) \
323 __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
324 (x) = (__typeof__(*(ptr)))__gu_val; \
328 extern long __get_user_bad(void);
330 #define __get_user_size(x,ptr,size,retval) \
334 case 1: __get_user_asm(x,ptr,retval,1,"l8ui"); break; \
335 case 2: __get_user_asm(x,ptr,retval,2,"l16ui"); break; \
336 case 4: __get_user_asm(x,ptr,retval,4,"l32i"); break; \
337 case 8: retval = __copy_from_user(&x,ptr,8); break; \
338 default: (x) = __get_user_bad(); \
344 * WARNING: If you modify this macro at all, verify that the
345 * __check_align_* macros still work.
347 #define __get_user_asm(x, addr, err, align, insn) \
348 __asm__ __volatile__( \
349 __check_align_##align \
350 "1: "insn" %1, %2, 0 \n" \
352 " .section .fixup,\"ax\" \n" \
362 " .section __ex_table,\"a\" \n" \
365 :"=r" (err), "=r" (x) \
366 :"r" (addr), "i" (-EFAULT), "0" (err))
370 * Copy to/from user space
374 * We use a generic, arbitrary-sized copy subroutine. The Xtensa
375 * architecture would cause heavy code bloat if we tried to inline
376 * these functions and provide __constant_copy_* equivalents like the
377 * i386 versions. __xtensa_copy_user is quite efficient. See the
378 * .fixup section of __xtensa_copy_user for a discussion on the
379 * X_zeroing equivalents for Xtensa.
382 extern unsigned __xtensa_copy_user(void *to
, const void *from
, unsigned n
);
383 #define __copy_user(to,from,size) __xtensa_copy_user(to,from,size)
386 static inline unsigned long
387 __generic_copy_from_user_nocheck(void *to
, const void *from
, unsigned long n
)
389 return __copy_user(to
,from
,n
);
392 static inline unsigned long
393 __generic_copy_to_user_nocheck(void *to
, const void *from
, unsigned long n
)
395 return __copy_user(to
,from
,n
);
398 static inline unsigned long
399 __generic_copy_to_user(void *to
, const void *from
, unsigned long n
)
402 if (access_ok(VERIFY_WRITE
, to
, n
))
403 return __copy_user(to
,from
,n
);
407 static inline unsigned long
408 __generic_copy_from_user(void *to
, const void *from
, unsigned long n
)
411 if (access_ok(VERIFY_READ
, from
, n
))
412 return __copy_user(to
,from
,n
);
418 #define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n))
419 #define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n))
420 #define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n))
421 #define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))
422 #define __copy_to_user_inatomic __copy_to_user
423 #define __copy_from_user_inatomic __copy_from_user
427 * We need to return the number of bytes not cleared. Our memset()
428 * returns zero if a problem occurs while accessing user-space memory.
429 * In that event, return no memory cleared. Otherwise, zero for
433 static inline unsigned long
434 __xtensa_clear_user(void *addr
, unsigned long size
)
436 if ( ! memset(addr
, 0, size
) )
441 static inline unsigned long
442 clear_user(void *addr
, unsigned long size
)
444 if (access_ok(VERIFY_WRITE
, addr
, size
))
445 return __xtensa_clear_user(addr
, size
);
446 return size
? -EFAULT
: 0;
449 #define __clear_user __xtensa_clear_user
452 extern long __strncpy_user(char *, const char *, long);
453 #define __strncpy_from_user __strncpy_user
456 strncpy_from_user(char *dst
, const char *src
, long count
)
458 if (access_ok(VERIFY_READ
, src
, 1))
459 return __strncpy_from_user(dst
, src
, count
);
464 #define strlen_user(str) strnlen_user((str), TASK_SIZE - 1)
467 * Return the size of a string (including the ending 0!)
469 extern long __strnlen_user(const char *, long);
471 static inline long strnlen_user(const char *str
, long len
)
473 unsigned long top
= __kernel_ok
? ~0UL : TASK_SIZE
- 1;
475 if ((unsigned long)str
> top
)
477 return __strnlen_user(str
, len
);
481 struct exception_table_entry
483 unsigned long insn
, fixup
;
486 /* Returns 0 if exception not found and fixup.unit otherwise. */
488 extern unsigned long search_exception_table(unsigned long addr
);
489 extern void sort_exception_table(void);
491 /* Returns the new pc */
492 #define fixup_exception(map_reg, fixup_unit, pc) \
497 #endif /* __ASSEMBLY__ */
498 #endif /* _XTENSA_UACCESS_H */