1 #ifndef __ALPHA_UACCESS_H
2 #define __ALPHA_UACCESS_H
4 #include <linux/errno.h>
5 #include <linux/sched.h>
9 * The fs value determines whether argument validity checking should be
10 * performed or not. If get_fs() == USER_DS, checking is performed, with
11 * get_fs() == KERNEL_DS, checking is bypassed.
13 * Or at least it did once upon a time. Nowadays it is a mask that
14 * defines which bits of the address space are off limits. This is a
15 * wee bit faster than the above.
17 * For historical reasons, these macros are grossly misnamed.
20 #define KERNEL_DS ((mm_segment_t) { 0UL })
21 #define USER_DS ((mm_segment_t) { -0x40000000000UL })
24 #define VERIFY_WRITE 1
26 #define get_fs() (current->thread.fs)
27 #define get_ds() (KERNEL_DS)
28 #define set_fs(x) (current->thread.fs = (x))
30 #define segment_eq(a,b) ((a).seg == (b).seg)
34 * Is a address valid? This does a straighforward calculation rather
38 * - "addr" doesn't have any high-bits set
39 * - AND "size" doesn't have any high-bits set
40 * - AND "addr+size" doesn't have any high-bits set
41 * - OR we are in kernel mode.
43 #define __access_ok(addr,size,segment) \
44 (((segment).seg & (addr | size | (addr+size))) == 0)
46 #define access_ok(type,addr,size) \
47 __access_ok(((unsigned long)(addr)),(size),get_fs())
49 extern inline int verify_area(int type
, const void * addr
, unsigned long size
)
51 return access_ok(type
,addr
,size
) ? 0 : -EFAULT
;
55 * These are the main single-value transfer routines. They automatically
56 * use the right size if we just have the right pointer type.
58 * As the alpha uses the same address space for kernel and user
59 * data, we can just do these as direct assignments. (Of course, the
60 * exception handling means that it's no longer "just"...)
63 * (a) re-use the arguments for side effects (sizeof/typeof is ok)
64 * (b) require any knowledge of processes at this stage
66 #define put_user(x,ptr) \
67 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs())
68 #define get_user(x,ptr) \
69 __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs())
72 * The "__xxx" versions do not do address space checking, useful when
73 * doing multiple accesses to the same area (the programmer has to do the
74 * checks by hand with "access_ok()")
76 #define __put_user(x,ptr) \
77 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
78 #define __get_user(x,ptr) \
79 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
82 * The "xxx_ret" versions return constant specified in third argument, if
83 * something bad happens. These macros can be optimized for the
84 * case of just returning from the function xxx_ret is used.
87 #define put_user_ret(x,ptr,ret) ({ \
88 if (put_user(x,ptr)) return ret; })
90 #define get_user_ret(x,ptr,ret) ({ \
91 if (get_user(x,ptr)) return ret; })
93 #define __put_user_ret(x,ptr,ret) ({ \
94 if (__put_user(x,ptr)) return ret; })
96 #define __get_user_ret(x,ptr,ret) ({ \
97 if (__get_user(x,ptr)) return ret; })
100 * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
101 * encode the bits we need for resolving the exception. See the
102 * more extensive comments with fixup_inline_exception below for
106 extern void __get_user_unknown(void);
108 #define __get_user_nocheck(x,ptr,size) \
110 long __gu_err = 0, __gu_val; \
112 case 1: __get_user_8(ptr); break; \
113 case 2: __get_user_16(ptr); break; \
114 case 4: __get_user_32(ptr); break; \
115 case 8: __get_user_64(ptr); break; \
116 default: __get_user_unknown(); break; \
118 (x) = (__typeof__(*(ptr))) __gu_val; \
122 #define __get_user_check(x,ptr,size,segment) \
124 long __gu_err = -EFAULT, __gu_val = 0; \
125 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
126 if (__access_ok((long)__gu_addr,size,segment)) { \
129 case 1: __get_user_8(__gu_addr); break; \
130 case 2: __get_user_16(__gu_addr); break; \
131 case 4: __get_user_32(__gu_addr); break; \
132 case 8: __get_user_64(__gu_addr); break; \
133 default: __get_user_unknown(); break; \
136 (x) = (__typeof__(*(ptr))) __gu_val; \
140 struct __large_struct
{ unsigned long buf
[100]; };
141 #define __m(x) (*(struct __large_struct *)(x))
143 #define __get_user_64(addr) \
144 __asm__("1: ldq %0,%2\n" \
146 ".section __ex_table,\"a\"\n" \
148 " lda %0, 2b-1b(%1)\n" \
150 : "=r"(__gu_val), "=r"(__gu_err) \
151 : "m"(__m(addr)), "1"(__gu_err))
153 #define __get_user_32(addr) \
154 __asm__("1: ldl %0,%2\n" \
156 ".section __ex_table,\"a\"\n" \
158 " lda %0, 2b-1b(%1)\n" \
160 : "=r"(__gu_val), "=r"(__gu_err) \
161 : "m"(__m(addr)), "1"(__gu_err))
164 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
166 #define __get_user_16(addr) \
167 __asm__("1: ldwu %0,%2\n" \
169 ".section __ex_table,\"a\"\n" \
171 " lda %0, 2b-1b(%1)\n" \
173 : "=r"(__gu_val), "=r"(__gu_err) \
174 : "m"(__m(addr)), "1"(__gu_err))
176 #define __get_user_8(addr) \
177 __asm__("1: ldbu %0,%2\n" \
179 ".section __ex_table,\"a\"\n" \
181 " lda %0, 2b-1b(%1)\n" \
183 : "=r"(__gu_val), "=r"(__gu_err) \
184 : "m"(__m(addr)), "1"(__gu_err))
186 /* Unfortunately, we can't get an unaligned access trap for the sub-word
187 load, so we have to do a general unaligned operation. */
189 #define __get_user_16(addr) \
192 __asm__("1: ldq_u %0,0(%3)\n" \
193 "2: ldq_u %1,1(%3)\n" \
194 " extwl %0,%3,%0\n" \
195 " extwh %1,%3,%1\n" \
198 ".section __ex_table,\"a\"\n" \
200 " lda %0, 3b-1b(%2)\n" \
202 " lda %0, 2b-1b(%2)\n" \
204 : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
205 : "r"(addr), "2"(__gu_err)); \
208 #define __get_user_8(addr) \
209 __asm__("1: ldq_u %0,0(%2)\n" \
210 " extbl %0,%2,%0\n" \
212 ".section __ex_table,\"a\"\n" \
214 " lda %0, 2b-1b(%1)\n" \
216 : "=&r"(__gu_val), "=r"(__gu_err) \
217 : "r"(addr), "1"(__gu_err))
220 extern void __put_user_unknown(void);
222 #define __put_user_nocheck(x,ptr,size) \
226 case 1: __put_user_8(x,ptr); break; \
227 case 2: __put_user_16(x,ptr); break; \
228 case 4: __put_user_32(x,ptr); break; \
229 case 8: __put_user_64(x,ptr); break; \
230 default: __put_user_unknown(); break; \
235 #define __put_user_check(x,ptr,size,segment) \
237 long __pu_err = -EFAULT; \
238 __typeof__(*(ptr)) *__pu_addr = (ptr); \
239 if (__access_ok((long)__pu_addr,size,segment)) { \
242 case 1: __put_user_8(x,__pu_addr); break; \
243 case 2: __put_user_16(x,__pu_addr); break; \
244 case 4: __put_user_32(x,__pu_addr); break; \
245 case 8: __put_user_64(x,__pu_addr); break; \
246 default: __put_user_unknown(); break; \
253 * The "__put_user_xx()" macros tell gcc they read from memory
254 * instead of writing: this is because they do not write to
255 * any memory gcc knows about, so there are no aliasing issues
257 #define __put_user_64(x,addr) \
258 __asm__ __volatile__("1: stq %r2,%1\n" \
260 ".section __ex_table,\"a\"\n" \
262 " lda $31,2b-1b(%0)\n" \
265 : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
267 #define __put_user_32(x,addr) \
268 __asm__ __volatile__("1: stl %r2,%1\n" \
270 ".section __ex_table,\"a\"\n" \
272 " lda $31,2b-1b(%0)\n" \
275 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
278 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
280 #define __put_user_16(x,addr) \
281 __asm__ __volatile__("1: stw %r2,%1\n" \
283 ".section __ex_table,\"a\"\n" \
285 " lda $31,2b-1b(%0)\n" \
288 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
290 #define __put_user_8(x,addr) \
291 __asm__ __volatile__("1: stb %r2,%1\n" \
293 ".section __ex_table,\"a\"\n" \
295 " lda $31,2b-1b(%0)\n" \
298 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
300 /* Unfortunately, we can't get an unaligned access trap for the sub-word
301 write, so we have to do a general unaligned operation. */
303 #define __put_user_16(x,addr) \
305 long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
306 __asm__ __volatile__( \
307 "1: ldq_u %2,1(%5)\n" \
308 "2: ldq_u %1,0(%5)\n" \
309 " inswh %6,%5,%4\n" \
310 " inswl %6,%5,%3\n" \
311 " mskwh %2,%5,%2\n" \
312 " mskwl %1,%5,%1\n" \
315 "3: stq_u %2,1(%5)\n" \
316 "4: stq_u %1,0(%5)\n" \
318 ".section __ex_table,\"a\"\n" \
320 " lda $31, 5b-1b(%0)\n" \
322 " lda $31, 5b-2b(%0)\n" \
324 " lda $31, 5b-3b(%0)\n" \
326 " lda $31, 5b-4b(%0)\n" \
328 : "=r"(__pu_err), "=&r"(__pu_tmp1), \
329 "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
331 : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
334 #define __put_user_8(x,addr) \
336 long __pu_tmp1, __pu_tmp2; \
337 __asm__ __volatile__( \
338 "1: ldq_u %1,0(%4)\n" \
339 " insbl %3,%4,%2\n" \
340 " mskbl %1,%4,%1\n" \
342 "2: stq_u %1,0(%4)\n" \
344 ".section __ex_table,\"a\"\n" \
346 " lda $31, 3b-1b(%0)\n" \
348 " lda $31, 3b-2b(%0)\n" \
351 "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
352 : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
358 * Complex access routines
361 extern void __copy_user(void);
364 __copy_tofrom_user_nocheck(void *to
, const void *from
, long len
)
366 /* This little bit of silliness is to get the GP loaded for
367 a function that ordinarily wouldn't. Otherwise we could
368 have it done by the macro directly, which can be optimized
370 register void * pv
__asm__("$27") = __copy_user
;
372 register void * __cu_to
__asm__("$6") = to
;
373 register const void * __cu_from
__asm__("$7") = from
;
374 register long __cu_len
__asm__("$0") = len
;
376 __asm__
__volatile__(
377 "jsr $28,(%3),__copy_user\n\tldgp $29,0($28)"
378 : "=r" (__cu_len
), "=r" (__cu_from
), "=r" (__cu_to
), "=r"(pv
)
379 : "0" (__cu_len
), "1" (__cu_from
), "2" (__cu_to
), "3"(pv
)
380 : "$1","$2","$3","$4","$5","$28","memory");
386 __copy_tofrom_user(void *to
, const void *from
, long len
, const void *validate
)
388 if (__access_ok((long)validate
, len
, get_fs())) {
389 register void * pv
__asm__("$27") = __copy_user
;
390 register void * __cu_to
__asm__("$6") = to
;
391 register const void * __cu_from
__asm__("$7") = from
;
392 register long __cu_len
__asm__("$0") = len
;
393 __asm__
__volatile__(
394 "jsr $28,(%3),__copy_user\n\tldgp $29,0($28)"
395 : "=r"(__cu_len
), "=r"(__cu_from
), "=r"(__cu_to
),
397 : "0" (__cu_len
), "1" (__cu_from
), "2" (__cu_to
),
399 : "$1","$2","$3","$4","$5","$28","memory");
405 #define __copy_to_user(to,from,n) __copy_tofrom_user_nocheck((to),(from),(n))
406 #define __copy_from_user(to,from,n) __copy_tofrom_user_nocheck((to),(from),(n))
409 copy_to_user(void *to
, const void *from
, long n
)
411 return __copy_tofrom_user(to
, from
, n
, to
);
415 copy_from_user(void *to
, const void *from
, long n
)
417 return __copy_tofrom_user(to
, from
, n
, from
);
420 #define copy_to_user_ret(to,from,n,retval) ({ \
421 if (copy_to_user(to,from,n)) \
425 #define copy_from_user_ret(to,from,n,retval) ({ \
426 if (copy_from_user(to,from,n)) \
430 extern void __do_clear_user(void);
433 __clear_user(void *to
, long len
)
435 /* This little bit of silliness is to get the GP loaded for
436 a function that ordinarily wouldn't. Otherwise we could
437 have it done by the macro directly, which can be optimized
439 register void * pv
__asm__("$27") = __do_clear_user
;
441 register void * __cl_to
__asm__("$6") = to
;
442 register long __cl_len
__asm__("$0") = len
;
443 __asm__
__volatile__(
444 "jsr $28,(%2),__do_clear_user\n\tldgp $29,0($28)"
445 : "=r"(__cl_len
), "=r"(__cl_to
), "=r"(pv
)
446 : "0"(__cl_len
), "1"(__cl_to
), "2"(pv
)
447 : "$1","$2","$3","$4","$5","$28","memory");
452 clear_user(void *to
, long len
)
454 if (__access_ok((long)to
, len
, get_fs())) {
455 register void * pv
__asm__("$27") = __do_clear_user
;
456 register void * __cl_to
__asm__("$6") = to
;
457 register long __cl_len
__asm__("$0") = len
;
458 __asm__
__volatile__(
459 "jsr $28,(%2),__do_clear_user\n\tldgp $29,0($28)"
460 : "=r"(__cl_len
), "=r"(__cl_to
), "=r"(pv
)
461 : "0"(__cl_len
), "1"(__cl_to
), "2"(pv
)
462 : "$1","$2","$3","$4","$5","$28","memory");
468 /* Returns: -EFAULT if exception before terminator, N if the entire
469 buffer filled, else strlen. */
471 extern long __strncpy_from_user(char *__to
, const char *__from
, long __to_len
);
474 strncpy_from_user(char *to
, const char *from
, long n
)
477 if (__access_ok((long)from
, 0, get_fs()))
478 ret
= __strncpy_from_user(to
, from
, n
);
482 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
483 extern long __strlen_user(const char *);
485 extern inline long strlen_user(const char *str
)
487 return access_ok(VERIFY_READ
,str
,0) ? __strlen_user(str
) : 0;
490 /* Returns: 0 if exception before NUL or reaching the supplied limit (N),
491 * a value greater than N if the limit would be exceeded, else strlen. */
492 extern long __strnlen_user(const char *, long);
494 extern inline long strnlen_user(const char *str
, long n
)
496 return access_ok(VERIFY_READ
,str
,0) ? __strnlen_user(str
, n
) : 0;
500 * About the exception table:
502 * - insn is a 32-bit offset off of the kernel's or module's gp.
503 * - nextinsn is a 16-bit offset off of the faulting instruction
504 * (not off of the *next* instruction as branches are).
505 * - errreg is the register in which to place -EFAULT.
506 * - valreg is the final target register for the load sequence
507 * and will be zeroed.
509 * Either errreg or valreg may be $31, in which case nothing happens.
511 * The exception fixup information "just so happens" to be arranged
512 * as in a MEM format instruction. This lets us emit our three
515 * lda valreg, nextinsn(errreg)
519 struct exception_table_entry
522 union exception_fixup
{
525 signed int nextinsn
: 16;
526 unsigned int errreg
: 5;
527 unsigned int valreg
: 5;
532 /* Returns 0 if exception not found and fixup.unit otherwise. */
533 extern unsigned search_exception_table(unsigned long, unsigned long);
535 /* Returns the new pc */
536 #define fixup_exception(map_reg, fixup_unit, pc) \
538 union exception_fixup __fie_fixup; \
539 __fie_fixup.unit = fixup_unit; \
540 if (__fie_fixup.bits.valreg != 31) \
541 map_reg(__fie_fixup.bits.valreg) = 0; \
542 if (__fie_fixup.bits.errreg != 31) \
543 map_reg(__fie_fixup.bits.errreg) = -EFAULT; \
544 (pc) + __fie_fixup.bits.nextinsn; \
548 #endif /* __ALPHA_UACCESS_H */