- pre2
[davej-history.git] / include / asm-mips / uaccess.h
blobc94006594bc96013540ea8b663e695a04227d6b0
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1996, 1997, 1998, 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_UACCESS_H
10 #define _ASM_UACCESS_H
12 #include <linux/errno.h>
13 #include <linux/sched.h>
15 #define STR(x) __STR(x)
16 #define __STR(x) #x
19 * The fs value determines whether argument validity checking should be
20 * performed or not. If get_fs() == USER_DS, checking is performed, with
21 * get_fs() == KERNEL_DS, checking is bypassed.
23 * For historical reasons, these macros are grossly misnamed.
25 #define KERNEL_DS ((mm_segment_t) { (unsigned long) 0L })
26 #define USER_DS ((mm_segment_t) { (unsigned long) -1L })
28 #define VERIFY_READ 0
29 #define VERIFY_WRITE 1
31 #define get_fs() (current->thread.current_ds)
32 #define get_ds() (KERNEL_DS)
33 #define set_fs(x) (current->thread.current_ds=(x))
35 #define segment_eq(a,b) ((a).seg == (b).seg)
39 * Is a address valid? This does a straighforward calculation rather
40 * than tests.
42 * Address valid if:
43 * - "addr" doesn't have any high-bits set
44 * - AND "size" doesn't have any high-bits set
45 * - AND "addr+size" doesn't have any high-bits set
46 * - OR we are in kernel mode.
48 #define __access_ok(addr,size,mask) \
49 (((__signed__ long)((mask)&(addr | size | (addr+size)))) >= 0)
50 #define __access_mask ((long)(get_fs().seg))
52 #define access_ok(type,addr,size) \
53 __access_ok(((unsigned long)(addr)),(size),__access_mask)
55 extern inline int verify_area(int type, const void * addr, unsigned long size)
57 return access_ok(type,addr,size) ? 0 : -EFAULT;
61 * Uh, these should become the main single-value transfer routines ...
62 * They automatically use the right size if we just have the right
63 * pointer type ...
65 * As MIPS uses the same address space for kernel and user data, we
66 * can just do these as direct assignments.
68 * Careful to not
69 * (a) re-use the arguments for side effects (sizeof is ok)
70 * (b) require any knowledge of processes at this stage
72 #define put_user(x,ptr) \
73 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
74 #define get_user(x,ptr) \
75 __get_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
78 * The "__xxx" versions do not do address space checking, useful when
79 * doing multiple accesses to the same area (the user has to do the
80 * checks by hand with "access_ok()")
82 #define __put_user(x,ptr) \
83 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
84 #define __get_user(x,ptr) \
85 __get_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
87 struct __large_struct { unsigned long buf[100]; };
88 #define __m(x) (*(struct __large_struct *)(x))
91 * Yuck. We need two variants, one for 64bit operation and one
92 * for 32 bit mode and old iron.
94 #ifdef __mips64
95 #define __GET_USER_DW __get_user_asm("ld")
96 #else
97 #define __GET_USER_DW __get_user_asm_ll32
98 #endif
100 #define __get_user_nocheck(x,ptr,size) ({ \
101 long __gu_err; \
102 __typeof(*(ptr)) __gu_val; \
103 long __gu_addr; \
104 __asm__("":"=r" (__gu_val)); \
105 __gu_addr = (long) (ptr); \
106 __asm__("":"=r" (__gu_err)); \
107 switch (size) { \
108 case 1: __get_user_asm("lb"); break; \
109 case 2: __get_user_asm("lh"); break; \
110 case 4: __get_user_asm("lw"); break; \
111 case 8: __GET_USER_DW; break; \
112 default: __get_user_unknown(); break; \
113 } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
115 #define __get_user_check(x,ptr,size) ({ \
116 long __gu_err; \
117 __typeof__(*(ptr)) __gu_val; \
118 long __gu_addr; \
119 __asm__("":"=r" (__gu_val)); \
120 __gu_addr = (long) (ptr); \
121 __asm__("":"=r" (__gu_err)); \
122 if (__access_ok(__gu_addr,size,__access_mask)) { \
123 switch (size) { \
124 case 1: __get_user_asm("lb"); break; \
125 case 2: __get_user_asm("lh"); break; \
126 case 4: __get_user_asm("lw"); break; \
127 case 8: __GET_USER_DW; break; \
128 default: __get_user_unknown(); break; \
129 } } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
131 #define __get_user_asm(insn) \
132 ({ \
133 __asm__ __volatile__( \
134 "1:\t" insn "\t%1,%2\n\t" \
135 "move\t%0,$0\n" \
136 "2:\n\t" \
137 ".section\t.fixup,\"ax\"\n" \
138 "3:\tli\t%0,%3\n\t" \
139 "move\t%1,$0\n\t" \
140 "j\t2b\n\t" \
141 ".previous\n\t" \
142 ".section\t__ex_table,\"a\"\n\t" \
143 ".word\t1b,3b\n\t" \
144 ".previous" \
145 :"=r" (__gu_err), "=r" (__gu_val) \
146 :"o" (__m(__gu_addr)), "i" (-EFAULT)); })
149 * Get a long long 64 using 32 bit registers.
151 #define __get_user_asm_ll32 \
152 ({ \
153 __asm__ __volatile__( \
154 "1:\tlw\t%1,%2\n" \
155 "2:\tlw\t%D1,%3\n\t" \
156 "move\t%0,$0\n" \
157 "3:\t.section\t.fixup,\"ax\"\n" \
158 "4:\tli\t%0,%4\n\t" \
159 "move\t%1,$0\n\t" \
160 "move\t%D1,$0\n\t" \
161 "j\t3b\n\t" \
162 ".previous\n\t" \
163 ".section\t__ex_table,\"a\"\n\t" \
164 ".word\t1b,4b\n\t" \
165 ".word\t2b,4b\n\t" \
166 ".previous" \
167 :"=r" (__gu_err), "=&r" (__gu_val) \
168 :"o" (__m(__gu_addr)), "o" (__m(__gu_addr + 4)), \
169 "i" (-EFAULT)); })
171 extern void __get_user_unknown(void);
174 * Yuck. We need two variants, one for 64bit operation and one
175 * for 32 bit mode and old iron.
177 #ifdef __mips64
178 #define __PUT_USER_DW __put_user_asm("sd")
179 #else
180 #define __PUT_USER_DW __put_user_asm_ll32
181 #endif
183 #define __put_user_nocheck(x,ptr,size) ({ \
184 long __pu_err; \
185 __typeof__(*(ptr)) __pu_val; \
186 long __pu_addr; \
187 __pu_val = (x); \
188 __pu_addr = (long) (ptr); \
189 __asm__("":"=r" (__pu_err)); \
190 switch (size) { \
191 case 1: __put_user_asm("sb"); break; \
192 case 2: __put_user_asm("sh"); break; \
193 case 4: __put_user_asm("sw"); break; \
194 case 8: __PUT_USER_DW; break; \
195 default: __put_user_unknown(); break; \
196 } __pu_err; })
198 #define __put_user_check(x,ptr,size) ({ \
199 long __pu_err; \
200 __typeof__(*(ptr)) __pu_val; \
201 long __pu_addr; \
202 __pu_val = (x); \
203 __pu_addr = (long) (ptr); \
204 __asm__("":"=r" (__pu_err)); \
205 if (__access_ok(__pu_addr,size,__access_mask)) { \
206 switch (size) { \
207 case 1: __put_user_asm("sb"); break; \
208 case 2: __put_user_asm("sh"); break; \
209 case 4: __put_user_asm("sw"); break; \
210 case 8: __PUT_USER_DW; break; \
211 default: __put_user_unknown(); break; \
212 } } __pu_err; })
214 #define __put_user_asm(insn) \
215 ({ \
216 __asm__ __volatile__( \
217 "1:\t" insn "\t%1,%2\n\t" \
218 "move\t%0,$0\n" \
219 "2:\n\t" \
220 ".section\t.fixup,\"ax\"\n" \
221 "3:\tli\t%0,%3\n\t" \
222 "j\t2b\n\t" \
223 ".previous\n\t" \
224 ".section\t__ex_table,\"a\"\n\t" \
225 ".word\t1b,3b\n\t" \
226 ".previous" \
227 :"=r" (__pu_err) \
228 :"r" (__pu_val), "o" (__m(__pu_addr)), "i" (-EFAULT)); })
230 #define __put_user_asm_ll32 \
231 ({ \
232 __asm__ __volatile__( \
233 "1:\tsw\t%1,%2\n\t" \
234 "2:\tsw\t%D1,%3\n" \
235 "move\t%0,$0\n" \
236 "3:\n\t" \
237 ".section\t.fixup,\"ax\"\n" \
238 "4:\tli\t%0,%4\n\t" \
239 "j\t3b\n\t" \
240 ".previous\n\t" \
241 ".section\t__ex_table,\"a\"\n\t" \
242 ".word\t1b,4b\n\t" \
243 ".word\t2b,4b\n\t" \
244 ".previous" \
245 :"=r" (__pu_err) \
246 :"r" (__pu_val), "o" (__m(__pu_addr)), "o" (__m(__pu_addr + 4)), \
247 "i" (-EFAULT)); })
249 extern void __put_user_unknown(void);
252 * We're generating jump to subroutines which will be outside the range of
253 * jump instructions
255 #ifdef MODULE
256 #define __MODULE_JAL(destination) \
257 ".set\tnoat\n\t" \
258 "la\t$1, " #destination "\n\t" \
259 "jalr\t$1\n\t" \
260 ".set\tat\n\t"
261 #else
262 #define __MODULE_JAL(destination) \
263 "jal\t" #destination "\n\t"
264 #endif
266 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
268 #define __copy_to_user(to,from,n) ({ \
269 void *__cu_to; \
270 const void *__cu_from; \
271 long __cu_len; \
273 __cu_to = (to); \
274 __cu_from = (from); \
275 __cu_len = (n); \
276 __asm__ __volatile__( \
277 "move\t$4, %1\n\t" \
278 "move\t$5, %2\n\t" \
279 "move\t$6, %3\n\t" \
280 __MODULE_JAL(__copy_user) \
281 "move\t%0, $6" \
282 : "=r" (__cu_len) \
283 : "r" (__cu_to), "r" (__cu_from), "r" (__cu_len) \
284 : "$4", "$5", "$6", "$8", "$9", "$10", "$11", "$12", "$15", \
285 "$24", "$31","memory"); \
286 __cu_len; \
289 #define __copy_from_user(to,from,n) ({ \
290 void *__cu_to; \
291 const void *__cu_from; \
292 long __cu_len; \
294 __cu_to = (to); \
295 __cu_from = (from); \
296 __cu_len = (n); \
297 __asm__ __volatile__( \
298 "move\t$4, %1\n\t" \
299 "move\t$5, %2\n\t" \
300 "move\t$6, %3\n\t" \
301 ".set\tnoreorder\n\t" \
302 __MODULE_JAL(__copy_user) \
303 ".set\tnoat\n\t" \
304 "addu\t$1, %2, %3\n\t" \
305 ".set\tat\n\t" \
306 ".set\treorder\n\t" \
307 "move\t%0, $6" \
308 : "=r" (__cu_len) \
309 : "r" (__cu_to), "r" (__cu_from), "r" (__cu_len) \
310 : "$4", "$5", "$6", "$8", "$9", "$10", "$11", "$12", "$15", \
311 "$24", "$31","memory"); \
312 __cu_len; \
315 #define copy_to_user(to,from,n) ({ \
316 void *__cu_to; \
317 const void *__cu_from; \
318 long __cu_len; \
320 __cu_to = (to); \
321 __cu_from = (from); \
322 __cu_len = (n); \
323 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) \
324 __asm__ __volatile__( \
325 "move\t$4, %1\n\t" \
326 "move\t$5, %2\n\t" \
327 "move\t$6, %3\n\t" \
328 __MODULE_JAL(__copy_user) \
329 "move\t%0, $6" \
330 : "=r" (__cu_len) \
331 : "r" (__cu_to), "r" (__cu_from), "r" (__cu_len) \
332 : "$4", "$5", "$6", "$8", "$9", "$10", "$11", "$12", \
333 "$15", "$24", "$31","memory"); \
334 __cu_len; \
337 #define copy_from_user(to,from,n) ({ \
338 void *__cu_to; \
339 const void *__cu_from; \
340 long __cu_len; \
342 __cu_to = (to); \
343 __cu_from = (from); \
344 __cu_len = (n); \
345 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) \
346 __asm__ __volatile__( \
347 "move\t$4, %1\n\t" \
348 "move\t$5, %2\n\t" \
349 "move\t$6, %3\n\t" \
350 ".set\tnoreorder\n\t" \
351 __MODULE_JAL(__copy_user) \
352 ".set\tnoat\n\t" \
353 "addu\t$1, %2, %3\n\t" \
354 ".set\tat\n\t" \
355 ".set\treorder\n\t" \
356 "move\t%0, $6" \
357 : "=r" (__cu_len) \
358 : "r" (__cu_to), "r" (__cu_from), "r" (__cu_len) \
359 : "$4", "$5", "$6", "$8", "$9", "$10", "$11", "$12", \
360 "$15", "$24", "$31","memory"); \
361 __cu_len; \
364 extern inline __kernel_size_t
365 __clear_user(void *addr, __kernel_size_t size)
367 __kernel_size_t res;
369 __asm__ __volatile__(
370 "move\t$4, %1\n\t"
371 "move\t$5, $0\n\t"
372 "move\t$6, %2\n\t"
373 __MODULE_JAL(__bzero)
374 "move\t%0, $6"
375 : "=r" (res)
376 : "r" (addr), "r" (size)
377 : "$4", "$5", "$6", "$8", "$9", "$31");
379 return res;
382 #define clear_user(addr,n) ({ \
383 void * __cl_addr = (addr); \
384 unsigned long __cl_size = (n); \
385 if (__cl_size && access_ok(VERIFY_WRITE, ((unsigned long)(__cl_addr)), __cl_size)) \
386 __cl_size = __clear_user(__cl_addr, __cl_size); \
387 __cl_size; })
390 * Returns: -EFAULT if exception before terminator, N if the entire
391 * buffer filled, else strlen.
393 extern inline long
394 __strncpy_from_user(char *__to, const char *__from, long __len)
396 long res;
398 __asm__ __volatile__(
399 "move\t$4, %1\n\t"
400 "move\t$5, %2\n\t"
401 "move\t$6, %3\n\t"
402 __MODULE_JAL(__strncpy_from_user_nocheck_asm)
403 "move\t%0, $2"
404 : "=r" (res)
405 : "r" (__to), "r" (__from), "r" (__len)
406 : "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
408 return res;
411 extern inline long
412 strncpy_from_user(char *__to, const char *__from, long __len)
414 long res;
416 __asm__ __volatile__(
417 "move\t$4, %1\n\t"
418 "move\t$5, %2\n\t"
419 "move\t$6, %3\n\t"
420 __MODULE_JAL(__strncpy_from_user_asm)
421 "move\t%0, $2"
422 : "=r" (res)
423 : "r" (__to), "r" (__from), "r" (__len)
424 : "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
426 return res;
429 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
430 extern inline long __strlen_user(const char *s)
432 long res;
434 __asm__ __volatile__(
435 "move\t$4, %1\n\t"
436 __MODULE_JAL(__strlen_user_nocheck_asm)
437 "move\t%0, $2"
438 : "=r" (res)
439 : "r" (s)
440 : "$2", "$4", "$8", "$31");
442 return res;
445 extern inline long strlen_user(const char *s)
447 long res;
449 __asm__ __volatile__(
450 "move\t$4, %1\n\t"
451 __MODULE_JAL(__strlen_user_asm)
452 "move\t%0, $2"
453 : "=r" (res)
454 : "r" (s)
455 : "$2", "$4", "$8", "$31");
457 return res;
460 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
461 extern inline long __strnlen_user(const char *s, long n)
463 long res;
465 __asm__ __volatile__(
466 "move\t$4, %1\n\t"
467 "move\t$5, %2\n\t"
468 __MODULE_JAL(__strnlen_user_nocheck_asm)
469 "move\t%0, $2"
470 : "=r" (res)
471 : "r" (s), "r" (n)
472 : "$2", "$4", "$5", "$8", "$31");
474 return res;
477 extern inline long strnlen_user(const char *s, long n)
479 long res;
481 __asm__ __volatile__(
482 "move\t$4, %1\n\t"
483 "move\t$5, %2\n\t"
484 __MODULE_JAL(__strnlen_user_asm)
485 "move\t%0, $2"
486 : "=r" (res)
487 : "r" (s), "r" (n)
488 : "$2", "$4", "$5", "$8", "$31");
490 return res;
493 struct exception_table_entry
495 unsigned long insn;
496 unsigned long nextinsn;
499 /* Returns 0 if exception not found and fixup.unit otherwise. */
500 extern unsigned long search_exception_table(unsigned long addr);
502 /* Returns the new pc */
503 #define fixup_exception(map_reg, fixup_unit, pc) \
504 ({ \
505 fixup_unit; \
508 #endif /* _ASM_UACCESS_H */