These are already identical ... 508 lines of code, goodbye.
[linux-2.6/linux-mips.git] / include / asm-mips / uaccess.h
blob2a8833afe6dd119f5a7fc7bc2e9e0b9774113f9f
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1996, 1997, 1998, 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_UACCESS_H
10 #define _ASM_UACCESS_H
12 #include <linux/errno.h>
13 #include <linux/thread_info.h>
15 #define STR(x) __STR(x)
16 #define __STR(x) #x
19 * The fs value determines whether argument validity checking should be
20 * performed or not. If get_fs() == USER_DS, checking is performed, with
21 * get_fs() == KERNEL_DS, checking is bypassed.
23 * For historical reasons, these macros are grossly misnamed.
25 #define KERNEL_DS ((mm_segment_t) { (unsigned long) 0L })
26 #define USER_DS ((mm_segment_t) { (unsigned long) -1L })
28 #define VERIFY_READ 0
29 #define VERIFY_WRITE 1
31 #define get_ds() (KERNEL_DS)
32 #define get_fs() (current_thread_info()->addr_limit)
33 #define set_fs(x) (current_thread_info()->addr_limit = (x))
35 #define segment_eq(a,b) ((a).seg == (b).seg)
39 * Is a address valid? This does a straighforward calculation rather
40 * than tests.
42 * Address valid if:
43 * - "addr" doesn't have any high-bits set
44 * - AND "size" doesn't have any high-bits set
45 * - AND "addr+size" doesn't have any high-bits set
46 * - OR we are in kernel mode.
48 #define __ua_size(size) \
49 (__builtin_constant_p(size) && (signed long) (size) > 0 ? 0 : (size))
51 #define __access_ok(addr,size,mask) \
52 (((signed long)((mask)&(addr | (addr + size) | __ua_size(size)))) >= 0)
54 #define __access_mask ((long)(get_fs().seg))
56 #define access_ok(type,addr,size) \
57 __access_ok(((unsigned long)(addr)),(size),__access_mask)
59 static inline int verify_area(int type, const void * addr, unsigned long size)
61 return access_ok(type,addr,size) ? 0 : -EFAULT;
65 * Uh, these should become the main single-value transfer routines ...
66 * They automatically use the right size if we just have the right
67 * pointer type ...
69 * As MIPS uses the same address space for kernel and user data, we
70 * can just do these as direct assignments.
72 * Careful to not
73 * (a) re-use the arguments for side effects (sizeof is ok)
74 * (b) require any knowledge of processes at this stage
76 #define put_user(x,ptr) \
77 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
78 #define get_user(x,ptr) \
79 __get_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
82 * The "__xxx" versions do not do address space checking, useful when
83 * doing multiple accesses to the same area (the user has to do the
84 * checks by hand with "access_ok()")
86 #define __put_user(x,ptr) \
87 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
88 #define __get_user(x,ptr) \
89 __get_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
91 struct __large_struct { unsigned long buf[100]; };
92 #define __m(x) (*(struct __large_struct *)(x))
95 * Yuck. We need two variants, one for 64bit operation and one
96 * for 32 bit mode and old iron.
98 #ifdef __mips64
99 #define __GET_USER_DW __get_user_asm("ld")
100 #else
101 #define __GET_USER_DW __get_user_asm_ll32
102 #endif
104 #define __get_user_nocheck(x,ptr,size) ({ \
105 long __gu_err; \
106 __typeof(*(ptr)) __gu_val; \
107 long __gu_addr; \
108 __asm__("":"=r" (__gu_val)); \
109 __gu_addr = (long) (ptr); \
110 __asm__("":"=r" (__gu_err)); \
111 switch (size) { \
112 case 1: __get_user_asm("lb"); break; \
113 case 2: __get_user_asm("lh"); break; \
114 case 4: __get_user_asm("lw"); break; \
115 case 8: __GET_USER_DW; break; \
116 default: __get_user_unknown(); break; \
117 } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
119 #define __get_user_check(x,ptr,size) ({ \
120 long __gu_err; \
121 __typeof__(*(ptr)) __gu_val; \
122 long __gu_addr; \
123 __asm__("":"=r" (__gu_val)); \
124 __gu_addr = (long) (ptr); \
125 __asm__("":"=r" (__gu_err)); \
126 if (__access_ok(__gu_addr,size,__access_mask)) { \
127 switch (size) { \
128 case 1: __get_user_asm("lb"); break; \
129 case 2: __get_user_asm("lh"); break; \
130 case 4: __get_user_asm("lw"); break; \
131 case 8: __GET_USER_DW; break; \
132 default: __get_user_unknown(); break; \
133 } } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
135 #define __get_user_asm(insn) \
136 ({ \
137 __asm__ __volatile__( \
138 "1:\t" insn "\t%1,%2\n\t" \
139 "move\t%0,$0\n" \
140 "2:\n\t" \
141 ".section\t.fixup,\"ax\"\n" \
142 "3:\tli\t%0,%3\n\t" \
143 "move\t%1,$0\n\t" \
144 "j\t2b\n\t" \
145 ".previous\n\t" \
146 ".section\t__ex_table,\"a\"\n\t" \
147 ".word\t1b,3b\n\t" \
148 ".previous" \
149 :"=r" (__gu_err), "=r" (__gu_val) \
150 :"o" (__m(__gu_addr)), "i" (-EFAULT)); })
153 * Get a long long 64 using 32 bit registers.
155 #define __get_user_asm_ll32 \
156 ({ \
157 __asm__ __volatile__( \
158 "1:\tlw\t%1,%2\n" \
159 "2:\tlw\t%D1,%3\n\t" \
160 "move\t%0,$0\n" \
161 "3:\t.section\t.fixup,\"ax\"\n" \
162 "4:\tli\t%0,%4\n\t" \
163 "move\t%1,$0\n\t" \
164 "move\t%D1,$0\n\t" \
165 "j\t3b\n\t" \
166 ".previous\n\t" \
167 ".section\t__ex_table,\"a\"\n\t" \
168 ".word\t1b,4b\n\t" \
169 ".word\t2b,4b\n\t" \
170 ".previous" \
171 :"=r" (__gu_err), "=&r" (__gu_val) \
172 :"o" (__m(__gu_addr)), "o" (__m(__gu_addr + 4)), \
173 "i" (-EFAULT)); })
175 extern void __get_user_unknown(void);
178 * Yuck. We need two variants, one for 64bit operation and one
179 * for 32 bit mode and old iron.
181 #ifdef __mips64
182 #define __PUT_USER_DW __put_user_asm("sd")
183 #else
184 #define __PUT_USER_DW __put_user_asm_ll32
185 #endif
187 #define __put_user_nocheck(x,ptr,size) ({ \
188 long __pu_err; \
189 __typeof__(*(ptr)) __pu_val; \
190 long __pu_addr; \
191 __pu_val = (x); \
192 __pu_addr = (long) (ptr); \
193 __asm__("":"=r" (__pu_err)); \
194 switch (size) { \
195 case 1: __put_user_asm("sb"); break; \
196 case 2: __put_user_asm("sh"); break; \
197 case 4: __put_user_asm("sw"); break; \
198 case 8: __PUT_USER_DW; break; \
199 default: __put_user_unknown(); break; \
200 } __pu_err; })
202 #define __put_user_check(x,ptr,size) ({ \
203 long __pu_err; \
204 __typeof__(*(ptr)) __pu_val; \
205 long __pu_addr; \
206 __pu_val = (x); \
207 __pu_addr = (long) (ptr); \
208 __asm__("":"=r" (__pu_err)); \
209 if (__access_ok(__pu_addr,size,__access_mask)) { \
210 switch (size) { \
211 case 1: __put_user_asm("sb"); break; \
212 case 2: __put_user_asm("sh"); break; \
213 case 4: __put_user_asm("sw"); break; \
214 case 8: __PUT_USER_DW; break; \
215 default: __put_user_unknown(); break; \
216 } } __pu_err; })
218 #define __put_user_asm(insn) \
219 ({ \
220 __asm__ __volatile__( \
221 "1:\t" insn "\t%z1, %2\t\t\t# __put_user_asm\n\t" \
222 "move\t%0, $0\n" \
223 "2:\n\t" \
224 ".section\t.fixup,\"ax\"\n" \
225 "3:\tli\t%0,%3\n\t" \
226 "j\t2b\n\t" \
227 ".previous\n\t" \
228 ".section\t__ex_table,\"a\"\n\t" \
229 ".word\t1b,3b\n\t" \
230 ".previous" \
231 :"=r" (__pu_err) \
232 :"Jr" (__pu_val), "o" (__m(__pu_addr)), "i" (-EFAULT)); })
234 #define __put_user_asm_ll32 \
235 ({ \
236 __asm__ __volatile__( \
237 "1:\tsw\t%1, %2\t\t\t# __put_user_asm_ll32\n\t" \
238 "2:\tsw\t%D1, %3\n" \
239 "move\t%0, $0\n" \
240 "3:\n\t" \
241 ".section\t.fixup,\"ax\"\n" \
242 "4:\tli\t%0,%4\n\t" \
243 "j\t3b\n\t" \
244 ".previous\n\t" \
245 ".section\t__ex_table,\"a\"\n\t" \
246 ".word\t1b,4b\n\t" \
247 ".word\t2b,4b\n\t" \
248 ".previous" \
249 :"=r" (__pu_err) \
250 :"r" (__pu_val), "o" (__m(__pu_addr)), "o" (__m(__pu_addr + 4)), \
251 "i" (-EFAULT)); })
253 extern void __put_user_unknown(void);
256 * We're generating jump to subroutines which will be outside the range of
257 * jump instructions
259 #ifdef MODULE
260 #define __MODULE_JAL(destination) \
261 ".set\tnoat\n\t" \
262 "la\t$1, " #destination "\n\t" \
263 "jalr\t$1\n\t" \
264 ".set\tat\n\t"
265 #else
266 #define __MODULE_JAL(destination) \
267 "jal\t" #destination "\n\t"
268 #endif
270 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
272 #define __invoke_copy_to_user(to,from,n) ({ \
273 register void *__cu_to_r __asm__ ("$4"); \
274 register const void *__cu_from_r __asm__ ("$5"); \
275 register long __cu_len_r __asm__ ("$6"); \
277 __cu_to_r = (to); \
278 __cu_from_r = (from); \
279 __cu_len_r = (n); \
280 __asm__ __volatile__( \
281 __MODULE_JAL(__copy_user) \
282 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
284 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
285 "memory"); \
286 __cu_len_r; \
289 #define __copy_to_user(to,from,n) ({ \
290 void *__cu_to; \
291 const void *__cu_from; \
292 long __cu_len; \
294 __cu_to = (to); \
295 __cu_from = (from); \
296 __cu_len = (n); \
297 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
298 __cu_len; \
301 #define copy_to_user(to,from,n) ({ \
302 void *__cu_to; \
303 const void *__cu_from; \
304 long __cu_len; \
306 __cu_to = (to); \
307 __cu_from = (from); \
308 __cu_len = (n); \
309 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) \
310 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
311 __cu_len); \
312 __cu_len; \
315 #define __invoke_copy_from_user(to,from,n) ({ \
316 register void *__cu_to_r __asm__ ("$4"); \
317 register const void *__cu_from_r __asm__ ("$5"); \
318 register long __cu_len_r __asm__ ("$6"); \
320 __cu_to_r = (to); \
321 __cu_from_r = (from); \
322 __cu_len_r = (n); \
323 __asm__ __volatile__( \
324 ".set\tnoreorder\n\t" \
325 __MODULE_JAL(__copy_user) \
326 ".set\tnoat\n\t" \
327 "addu\t$1, %1, %2\n\t" \
328 ".set\tat\n\t" \
329 ".set\treorder\n\t" \
330 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
332 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
333 "memory"); \
334 __cu_len_r; \
337 #define __copy_from_user(to,from,n) ({ \
338 void *__cu_to; \
339 const void *__cu_from; \
340 long __cu_len; \
342 __cu_to = (to); \
343 __cu_from = (from); \
344 __cu_len = (n); \
345 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
346 __cu_len); \
347 __cu_len; \
350 #define copy_from_user(to,from,n) ({ \
351 void *__cu_to; \
352 const void *__cu_from; \
353 long __cu_len; \
355 __cu_to = (to); \
356 __cu_from = (from); \
357 __cu_len = (n); \
358 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) \
359 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
360 __cu_len); \
361 __cu_len; \
364 static inline __kernel_size_t
365 __clear_user(void *addr, __kernel_size_t size)
367 __kernel_size_t res;
369 __asm__ __volatile__(
370 "move\t$4, %1\n\t"
371 "move\t$5, $0\n\t"
372 "move\t$6, %2\n\t"
373 __MODULE_JAL(__bzero)
374 "move\t%0, $6"
375 : "=r" (res)
376 : "r" (addr), "r" (size)
377 : "$4", "$5", "$6", "$8", "$9", "$31");
379 return res;
382 #define clear_user(addr,n) ({ \
383 void * __cl_addr = (addr); \
384 unsigned long __cl_size = (n); \
385 if (__cl_size && access_ok(VERIFY_WRITE, ((unsigned long)(__cl_addr)), __cl_size)) \
386 __cl_size = __clear_user(__cl_addr, __cl_size); \
387 __cl_size; })
390 * Returns: -EFAULT if exception before terminator, N if the entire
391 * buffer filled, else strlen.
393 static inline long
394 __strncpy_from_user(char *__to, const char *__from, long __len)
396 long res;
398 __asm__ __volatile__(
399 "move\t$4, %1\n\t"
400 "move\t$5, %2\n\t"
401 "move\t$6, %3\n\t"
402 __MODULE_JAL(__strncpy_from_user_nocheck_asm)
403 "move\t%0, $2"
404 : "=r" (res)
405 : "r" (__to), "r" (__from), "r" (__len)
406 : "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
408 return res;
411 static inline long
412 strncpy_from_user(char *__to, const char *__from, long __len)
414 long res;
416 __asm__ __volatile__(
417 "move\t$4, %1\n\t"
418 "move\t$5, %2\n\t"
419 "move\t$6, %3\n\t"
420 __MODULE_JAL(__strncpy_from_user_asm)
421 "move\t%0, $2"
422 : "=r" (res)
423 : "r" (__to), "r" (__from), "r" (__len)
424 : "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
426 return res;
429 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
430 static inline long __strlen_user(const char *s)
432 long res;
434 __asm__ __volatile__(
435 "move\t$4, %1\n\t"
436 __MODULE_JAL(__strlen_user_nocheck_asm)
437 "move\t%0, $2"
438 : "=r" (res)
439 : "r" (s)
440 : "$2", "$4", "$8", "$31");
442 return res;
445 static inline long strlen_user(const char *s)
447 long res;
449 __asm__ __volatile__(
450 "move\t$4, %1\n\t"
451 __MODULE_JAL(__strlen_user_asm)
452 "move\t%0, $2"
453 : "=r" (res)
454 : "r" (s)
455 : "$2", "$4", "$8", "$31");
457 return res;
460 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
461 static inline long __strnlen_user(const char *s, long n)
463 long res;
465 __asm__ __volatile__(
466 "move\t$4, %1\n\t"
467 "move\t$5, %2\n\t"
468 __MODULE_JAL(__strnlen_user_nocheck_asm)
469 "move\t%0, $2"
470 : "=r" (res)
471 : "r" (s), "r" (n)
472 : "$2", "$4", "$5", "$8", "$31");
474 return res;
477 static inline long strnlen_user(const char *s, long n)
479 long res;
481 __asm__ __volatile__(
482 "move\t$4, %1\n\t"
483 "move\t$5, %2\n\t"
484 __MODULE_JAL(__strnlen_user_asm)
485 "move\t%0, $2"
486 : "=r" (res)
487 : "r" (s), "r" (n)
488 : "$2", "$4", "$5", "$8", "$31");
490 return res;
493 struct exception_table_entry
495 unsigned long insn;
496 unsigned long nextinsn;
499 #endif /* _ASM_UACCESS_H */