- pre2
[davej-history.git] / include / asm-alpha / uaccess.h
blobeeb5b85406dd354a7ce6d0cd64039a64bea78b2e
1 #ifndef __ALPHA_UACCESS_H
2 #define __ALPHA_UACCESS_H
4 #include <linux/errno.h>
5 #include <linux/sched.h>
8 /*
9 * The fs value determines whether argument validity checking should be
10 * performed or not. If get_fs() == USER_DS, checking is performed, with
11 * get_fs() == KERNEL_DS, checking is bypassed.
13 * Or at least it did once upon a time. Nowadays it is a mask that
14 * defines which bits of the address space are off limits. This is a
15 * wee bit faster than the above.
17 * For historical reasons, these macros are grossly misnamed.
20 #define KERNEL_DS ((mm_segment_t) { 0UL })
21 #define USER_DS ((mm_segment_t) { -0x40000000000UL })
23 #define VERIFY_READ 0
24 #define VERIFY_WRITE 1
26 #define get_fs() (current->thread.fs)
27 #define get_ds() (KERNEL_DS)
28 #define set_fs(x) (current->thread.fs = (x))
30 #define segment_eq(a,b) ((a).seg == (b).seg)
34 * Is a address valid? This does a straighforward calculation rather
35 * than tests.
37 * Address valid if:
38 * - "addr" doesn't have any high-bits set
39 * - AND "size" doesn't have any high-bits set
40 * - AND "addr+size" doesn't have any high-bits set
41 * - OR we are in kernel mode.
43 #define __access_ok(addr,size,segment) \
44 (((segment).seg & (addr | size | (addr+size))) == 0)
46 #define access_ok(type,addr,size) \
47 __access_ok(((unsigned long)(addr)),(size),get_fs())
49 extern inline int verify_area(int type, const void * addr, unsigned long size)
51 return access_ok(type,addr,size) ? 0 : -EFAULT;
55 * These are the main single-value transfer routines. They automatically
56 * use the right size if we just have the right pointer type.
58 * As the alpha uses the same address space for kernel and user
59 * data, we can just do these as direct assignments. (Of course, the
60 * exception handling means that it's no longer "just"...)
62 * Careful to not
63 * (a) re-use the arguments for side effects (sizeof/typeof is ok)
64 * (b) require any knowledge of processes at this stage
66 #define put_user(x,ptr) \
67 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs())
68 #define get_user(x,ptr) \
69 __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs())
72 * The "__xxx" versions do not do address space checking, useful when
73 * doing multiple accesses to the same area (the programmer has to do the
74 * checks by hand with "access_ok()")
76 #define __put_user(x,ptr) \
77 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
78 #define __get_user(x,ptr) \
79 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
82 * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
83 * encode the bits we need for resolving the exception. See the
84 * more extensive comments with fixup_inline_exception below for
85 * more information.
88 extern void __get_user_unknown(void);
90 #define __get_user_nocheck(x,ptr,size) \
91 ({ \
92 long __gu_err = 0, __gu_val; \
93 switch (size) { \
94 case 1: __get_user_8(ptr); break; \
95 case 2: __get_user_16(ptr); break; \
96 case 4: __get_user_32(ptr); break; \
97 case 8: __get_user_64(ptr); break; \
98 default: __get_user_unknown(); break; \
99 } \
100 (x) = (__typeof__(*(ptr))) __gu_val; \
101 __gu_err; \
104 #define __get_user_check(x,ptr,size,segment) \
105 ({ \
106 long __gu_err = -EFAULT, __gu_val = 0; \
107 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
108 if (__access_ok((long)__gu_addr,size,segment)) { \
109 __gu_err = 0; \
110 switch (size) { \
111 case 1: __get_user_8(__gu_addr); break; \
112 case 2: __get_user_16(__gu_addr); break; \
113 case 4: __get_user_32(__gu_addr); break; \
114 case 8: __get_user_64(__gu_addr); break; \
115 default: __get_user_unknown(); break; \
118 (x) = (__typeof__(*(ptr))) __gu_val; \
119 __gu_err; \
122 struct __large_struct { unsigned long buf[100]; };
123 #define __m(x) (*(struct __large_struct *)(x))
125 #define __get_user_64(addr) \
126 __asm__("1: ldq %0,%2\n" \
127 "2:\n" \
128 ".section __ex_table,\"a\"\n" \
129 " .gprel32 1b\n" \
130 " lda %0, 2b-1b(%1)\n" \
131 ".previous" \
132 : "=r"(__gu_val), "=r"(__gu_err) \
133 : "m"(__m(addr)), "1"(__gu_err))
135 #define __get_user_32(addr) \
136 __asm__("1: ldl %0,%2\n" \
137 "2:\n" \
138 ".section __ex_table,\"a\"\n" \
139 " .gprel32 1b\n" \
140 " lda %0, 2b-1b(%1)\n" \
141 ".previous" \
142 : "=r"(__gu_val), "=r"(__gu_err) \
143 : "m"(__m(addr)), "1"(__gu_err))
145 #ifdef __alpha_bwx__
146 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
148 #define __get_user_16(addr) \
149 __asm__("1: ldwu %0,%2\n" \
150 "2:\n" \
151 ".section __ex_table,\"a\"\n" \
152 " .gprel32 1b\n" \
153 " lda %0, 2b-1b(%1)\n" \
154 ".previous" \
155 : "=r"(__gu_val), "=r"(__gu_err) \
156 : "m"(__m(addr)), "1"(__gu_err))
158 #define __get_user_8(addr) \
159 __asm__("1: ldbu %0,%2\n" \
160 "2:\n" \
161 ".section __ex_table,\"a\"\n" \
162 " .gprel32 1b\n" \
163 " lda %0, 2b-1b(%1)\n" \
164 ".previous" \
165 : "=r"(__gu_val), "=r"(__gu_err) \
166 : "m"(__m(addr)), "1"(__gu_err))
167 #else
168 /* Unfortunately, we can't get an unaligned access trap for the sub-word
169 load, so we have to do a general unaligned operation. */
171 #define __get_user_16(addr) \
173 long __gu_tmp; \
174 __asm__("1: ldq_u %0,0(%3)\n" \
175 "2: ldq_u %1,1(%3)\n" \
176 " extwl %0,%3,%0\n" \
177 " extwh %1,%3,%1\n" \
178 " or %0,%1,%0\n" \
179 "3:\n" \
180 ".section __ex_table,\"a\"\n" \
181 " .gprel32 1b\n" \
182 " lda %0, 3b-1b(%2)\n" \
183 " .gprel32 2b\n" \
184 " lda %0, 2b-1b(%2)\n" \
185 ".previous" \
186 : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
187 : "r"(addr), "2"(__gu_err)); \
190 #define __get_user_8(addr) \
191 __asm__("1: ldq_u %0,0(%2)\n" \
192 " extbl %0,%2,%0\n" \
193 "2:\n" \
194 ".section __ex_table,\"a\"\n" \
195 " .gprel32 1b\n" \
196 " lda %0, 2b-1b(%1)\n" \
197 ".previous" \
198 : "=&r"(__gu_val), "=r"(__gu_err) \
199 : "r"(addr), "1"(__gu_err))
200 #endif
202 extern void __put_user_unknown(void);
204 #define __put_user_nocheck(x,ptr,size) \
205 ({ \
206 long __pu_err = 0; \
207 switch (size) { \
208 case 1: __put_user_8(x,ptr); break; \
209 case 2: __put_user_16(x,ptr); break; \
210 case 4: __put_user_32(x,ptr); break; \
211 case 8: __put_user_64(x,ptr); break; \
212 default: __put_user_unknown(); break; \
214 __pu_err; \
217 #define __put_user_check(x,ptr,size,segment) \
218 ({ \
219 long __pu_err = -EFAULT; \
220 __typeof__(*(ptr)) *__pu_addr = (ptr); \
221 if (__access_ok((long)__pu_addr,size,segment)) { \
222 __pu_err = 0; \
223 switch (size) { \
224 case 1: __put_user_8(x,__pu_addr); break; \
225 case 2: __put_user_16(x,__pu_addr); break; \
226 case 4: __put_user_32(x,__pu_addr); break; \
227 case 8: __put_user_64(x,__pu_addr); break; \
228 default: __put_user_unknown(); break; \
231 __pu_err; \
235 * The "__put_user_xx()" macros tell gcc they read from memory
236 * instead of writing: this is because they do not write to
237 * any memory gcc knows about, so there are no aliasing issues
239 #define __put_user_64(x,addr) \
240 __asm__ __volatile__("1: stq %r2,%1\n" \
241 "2:\n" \
242 ".section __ex_table,\"a\"\n" \
243 " .gprel32 1b\n" \
244 " lda $31,2b-1b(%0)\n" \
245 ".previous" \
246 : "=r"(__pu_err) \
247 : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
249 #define __put_user_32(x,addr) \
250 __asm__ __volatile__("1: stl %r2,%1\n" \
251 "2:\n" \
252 ".section __ex_table,\"a\"\n" \
253 " .gprel32 1b\n" \
254 " lda $31,2b-1b(%0)\n" \
255 ".previous" \
256 : "=r"(__pu_err) \
257 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
259 #ifdef __alpha_bwx__
260 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
262 #define __put_user_16(x,addr) \
263 __asm__ __volatile__("1: stw %r2,%1\n" \
264 "2:\n" \
265 ".section __ex_table,\"a\"\n" \
266 " .gprel32 1b\n" \
267 " lda $31,2b-1b(%0)\n" \
268 ".previous" \
269 : "=r"(__pu_err) \
270 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
272 #define __put_user_8(x,addr) \
273 __asm__ __volatile__("1: stb %r2,%1\n" \
274 "2:\n" \
275 ".section __ex_table,\"a\"\n" \
276 " .gprel32 1b\n" \
277 " lda $31,2b-1b(%0)\n" \
278 ".previous" \
279 : "=r"(__pu_err) \
280 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
281 #else
282 /* Unfortunately, we can't get an unaligned access trap for the sub-word
283 write, so we have to do a general unaligned operation. */
285 #define __put_user_16(x,addr) \
287 long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
288 __asm__ __volatile__( \
289 "1: ldq_u %2,1(%5)\n" \
290 "2: ldq_u %1,0(%5)\n" \
291 " inswh %6,%5,%4\n" \
292 " inswl %6,%5,%3\n" \
293 " mskwh %2,%5,%2\n" \
294 " mskwl %1,%5,%1\n" \
295 " or %2,%4,%2\n" \
296 " or %1,%3,%1\n" \
297 "3: stq_u %2,1(%5)\n" \
298 "4: stq_u %1,0(%5)\n" \
299 "5:\n" \
300 ".section __ex_table,\"a\"\n" \
301 " .gprel32 1b\n" \
302 " lda $31, 5b-1b(%0)\n" \
303 " .gprel32 2b\n" \
304 " lda $31, 5b-2b(%0)\n" \
305 " .gprel32 3b\n" \
306 " lda $31, 5b-3b(%0)\n" \
307 " .gprel32 4b\n" \
308 " lda $31, 5b-4b(%0)\n" \
309 ".previous" \
310 : "=r"(__pu_err), "=&r"(__pu_tmp1), \
311 "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
312 "=&r"(__pu_tmp4) \
313 : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
316 #define __put_user_8(x,addr) \
318 long __pu_tmp1, __pu_tmp2; \
319 __asm__ __volatile__( \
320 "1: ldq_u %1,0(%4)\n" \
321 " insbl %3,%4,%2\n" \
322 " mskbl %1,%4,%1\n" \
323 " or %1,%2,%1\n" \
324 "2: stq_u %1,0(%4)\n" \
325 "3:\n" \
326 ".section __ex_table,\"a\"\n" \
327 " .gprel32 1b\n" \
328 " lda $31, 3b-1b(%0)\n" \
329 " .gprel32 2b\n" \
330 " lda $31, 3b-2b(%0)\n" \
331 ".previous" \
332 : "=r"(__pu_err), \
333 "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
334 : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
336 #endif
340 * Complex access routines
343 extern void __copy_user(void);
345 extern inline long
346 __copy_tofrom_user_nocheck(void *to, const void *from, long len)
348 /* This little bit of silliness is to get the GP loaded for
349 a function that ordinarily wouldn't. Otherwise we could
350 have it done by the macro directly, which can be optimized
351 the linker. */
352 register void * pv __asm__("$27") = __copy_user;
354 register void * __cu_to __asm__("$6") = to;
355 register const void * __cu_from __asm__("$7") = from;
356 register long __cu_len __asm__("$0") = len;
358 __asm__ __volatile__(
359 "jsr $28,(%3),__copy_user\n\tldgp $29,0($28)"
360 : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to), "=r"(pv)
361 : "0" (__cu_len), "1" (__cu_from), "2" (__cu_to), "3"(pv)
362 : "$1","$2","$3","$4","$5","$28","memory");
364 return __cu_len;
367 extern inline long
368 __copy_tofrom_user(void *to, const void *from, long len, const void *validate)
370 if (__access_ok((long)validate, len, get_fs())) {
371 register void * pv __asm__("$27") = __copy_user;
372 register void * __cu_to __asm__("$6") = to;
373 register const void * __cu_from __asm__("$7") = from;
374 register long __cu_len __asm__("$0") = len;
375 __asm__ __volatile__(
376 "jsr $28,(%3),__copy_user\n\tldgp $29,0($28)"
377 : "=r"(__cu_len), "=r"(__cu_from), "=r"(__cu_to),
378 "=r" (pv)
379 : "0" (__cu_len), "1" (__cu_from), "2" (__cu_to),
380 "3" (pv)
381 : "$1","$2","$3","$4","$5","$28","memory");
382 len = __cu_len;
384 return len;
387 #define __copy_to_user(to,from,n) __copy_tofrom_user_nocheck((to),(from),(n))
388 #define __copy_from_user(to,from,n) __copy_tofrom_user_nocheck((to),(from),(n))
390 extern inline long
391 copy_to_user(void *to, const void *from, long n)
393 return __copy_tofrom_user(to, from, n, to);
396 extern inline long
397 copy_from_user(void *to, const void *from, long n)
399 return __copy_tofrom_user(to, from, n, from);
402 extern void __do_clear_user(void);
404 extern inline long
405 __clear_user(void *to, long len)
407 /* This little bit of silliness is to get the GP loaded for
408 a function that ordinarily wouldn't. Otherwise we could
409 have it done by the macro directly, which can be optimized
410 the linker. */
411 register void * pv __asm__("$27") = __do_clear_user;
413 register void * __cl_to __asm__("$6") = to;
414 register long __cl_len __asm__("$0") = len;
415 __asm__ __volatile__(
416 "jsr $28,(%2),__do_clear_user\n\tldgp $29,0($28)"
417 : "=r"(__cl_len), "=r"(__cl_to), "=r"(pv)
418 : "0"(__cl_len), "1"(__cl_to), "2"(pv)
419 : "$1","$2","$3","$4","$5","$28","memory");
420 return __cl_len;
423 extern inline long
424 clear_user(void *to, long len)
426 if (__access_ok((long)to, len, get_fs())) {
427 register void * pv __asm__("$27") = __do_clear_user;
428 register void * __cl_to __asm__("$6") = to;
429 register long __cl_len __asm__("$0") = len;
430 __asm__ __volatile__(
431 "jsr $28,(%2),__do_clear_user\n\tldgp $29,0($28)"
432 : "=r"(__cl_len), "=r"(__cl_to), "=r"(pv)
433 : "0"(__cl_len), "1"(__cl_to), "2"(pv)
434 : "$1","$2","$3","$4","$5","$28","memory");
435 len = __cl_len;
437 return len;
440 /* Returns: -EFAULT if exception before terminator, N if the entire
441 buffer filled, else strlen. */
443 extern long __strncpy_from_user(char *__to, const char *__from, long __to_len);
445 extern inline long
446 strncpy_from_user(char *to, const char *from, long n)
448 long ret = -EFAULT;
449 if (__access_ok((long)from, 0, get_fs()))
450 ret = __strncpy_from_user(to, from, n);
451 return ret;
454 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
455 extern long __strlen_user(const char *);
457 extern inline long strlen_user(const char *str)
459 return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0;
462 /* Returns: 0 if exception before NUL or reaching the supplied limit (N),
463 * a value greater than N if the limit would be exceeded, else strlen. */
464 extern long __strnlen_user(const char *, long);
466 extern inline long strnlen_user(const char *str, long n)
468 return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0;
472 * About the exception table:
474 * - insn is a 32-bit offset off of the kernel's or module's gp.
475 * - nextinsn is a 16-bit offset off of the faulting instruction
476 * (not off of the *next* instruction as branches are).
477 * - errreg is the register in which to place -EFAULT.
478 * - valreg is the final target register for the load sequence
479 * and will be zeroed.
481 * Either errreg or valreg may be $31, in which case nothing happens.
483 * The exception fixup information "just so happens" to be arranged
484 * as in a MEM format instruction. This lets us emit our three
485 * values like so:
487 * lda valreg, nextinsn(errreg)
491 struct exception_table_entry
493 signed int insn;
494 union exception_fixup {
495 unsigned unit;
496 struct {
497 signed int nextinsn : 16;
498 unsigned int errreg : 5;
499 unsigned int valreg : 5;
500 } bits;
501 } fixup;
504 /* Returns 0 if exception not found and fixup.unit otherwise. */
505 extern unsigned search_exception_table(unsigned long, unsigned long);
507 /* Returns the new pc */
508 #define fixup_exception(map_reg, fixup_unit, pc) \
509 ({ \
510 union exception_fixup __fie_fixup; \
511 __fie_fixup.unit = fixup_unit; \
512 if (__fie_fixup.bits.valreg != 31) \
513 map_reg(__fie_fixup.bits.valreg) = 0; \
514 if (__fie_fixup.bits.errreg != 31) \
515 map_reg(__fie_fixup.bits.errreg) = -EFAULT; \
516 (pc) + __fie_fixup.bits.nextinsn; \
520 #endif /* __ALPHA_UACCESS_H */