Linux 2.4.0-test7pre1
[davej-history.git] / include / asm-sh / uaccess.h
blob2237d34a468ff2a9efa0daf349c79771602b7a72
1 /* $Id: uaccess.h,v 1.10 2000/03/24 13:53:45 gniibe Exp $
3 * User space memory access functions
5 * Copyright (C) 1999 Niibe Yutaka
7 * Based on:
8 * MIPS implementation version 1.15 by
9 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
10 * and i386 version.
12 #ifndef __ASM_SH_UACCESS_H
13 #define __ASM_SH_UACCESS_H
15 #include <linux/errno.h>
16 #include <linux/sched.h>
18 #define VERIFY_READ 0
19 #define VERIFY_WRITE 1
22 * The fs value determines whether argument validity checking should be
23 * performed or not. If get_fs() == USER_DS, checking is performed, with
24 * get_fs() == KERNEL_DS, checking is bypassed.
26 * For historical reasons (Data Segment Register?), these macros are misnamed.
29 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
31 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
32 #define USER_DS MAKE_MM_SEG(0x80000000)
34 #define get_ds() (KERNEL_DS)
35 #define get_fs() (current->addr_limit)
36 #define set_fs(x) (current->addr_limit=(x))
38 #define segment_eq(a,b) ((a).seg == (b).seg)
40 #define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
43 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
45 * sum := addr + size; carry? --> flag = true;
46 * if (sum >= addr_limit) flag = true;
48 #define __range_ok(addr,size) ({ \
49 unsigned long flag,sum; \
50 __asm__("clrt; addc %3, %1; movt %0; cmp/hi %4, %1; rotcl %0" \
51 :"=&r" (flag), "=r" (sum) \
52 :"1" (addr), "r" ((int)(size)), "r" (current->addr_limit.seg)); \
53 flag; })
55 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
56 #define __access_ok(addr,size) (__range_ok(addr,size) == 0)
58 extern inline int verify_area(int type, const void * addr, unsigned long size)
60 return access_ok(type,addr,size) ? 0 : -EFAULT;
64 * Uh, these should become the main single-value transfer routines ...
65 * They automatically use the right size if we just have the right
66 * pointer type ...
68 * As MIPS uses the same address space for kernel and user data, we
69 * can just do these as direct assignments.
71 * Careful to not
72 * (a) re-use the arguments for side effects (sizeof is ok)
73 * (b) require any knowledge of processes at this stage
75 #define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
76 #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
79 * The "__xxx" versions do not do address space checking, useful when
80 * doing multiple accesses to the same area (the user has to do the
81 * checks by hand with "access_ok()")
83 #define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
84 #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
87 * The "xxx_ret" versions return constant specified in third argument, if
88 * something bad happens. These macros can be optimized for the
89 * case of just returning from the function xxx_ret is used.
92 #define put_user_ret(x,ptr,ret) ({ \
93 if (put_user(x,ptr)) return ret; })
95 #define get_user_ret(x,ptr,ret) ({ \
96 if (get_user(x,ptr)) return ret; })
98 #define __put_user_ret(x,ptr,ret) ({ \
99 if (__put_user(x,ptr)) return ret; })
101 #define __get_user_ret(x,ptr,ret) ({ \
102 if (__get_user(x,ptr)) return ret; })
104 struct __large_struct { unsigned long buf[100]; };
105 #define __m(x) (*(struct __large_struct *)(x))
107 #define __get_user_nocheck(x,ptr,size) ({ \
108 long __gu_err; \
109 __typeof(*(ptr)) __gu_val; \
110 long __gu_addr; \
111 __asm__("":"=r" (__gu_val)); \
112 __gu_addr = (long) (ptr); \
113 __asm__("":"=r" (__gu_err)); \
114 switch (size) { \
115 case 1: __get_user_asm("b"); break; \
116 case 2: __get_user_asm("w"); break; \
117 case 4: __get_user_asm("l"); break; \
118 default: __get_user_unknown(); break; \
119 } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
121 #define __get_user_check(x,ptr,size) ({ \
122 long __gu_err; \
123 __typeof__(*(ptr)) __gu_val; \
124 long __gu_addr; \
125 __asm__("":"=r" (__gu_val)); \
126 __gu_addr = (long) (ptr); \
127 __asm__("":"=r" (__gu_err)); \
128 if (__access_ok(__gu_addr,size)) { \
129 switch (size) { \
130 case 1: __get_user_asm("b"); break; \
131 case 2: __get_user_asm("w"); break; \
132 case 4: __get_user_asm("l"); break; \
133 default: __get_user_unknown(); break; \
134 } } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
136 #define __get_user_asm(insn) \
137 ({ \
138 __asm__ __volatile__( \
139 "1:\n\t" \
140 "mov." insn " %2, %1\n\t" \
141 "mov #0, %0\n" \
142 "2:\n" \
143 ".section .fixup,\"ax\"\n" \
144 "3:\n\t" \
145 "mov #0, %1\n\t" \
146 "mov.l 4f, %0\n\t" \
147 "jmp @%0\n\t" \
148 " mov %3, %0\n" \
149 "4: .long 2b\n\t" \
150 ".previous\n" \
151 ".section __ex_table,\"a\"\n\t" \
152 ".long 1b, 3b\n\t" \
153 ".previous" \
154 :"=&r" (__gu_err), "=&r" (__gu_val) \
155 :"m" (__m(__gu_addr)), "i" (-EFAULT)); })
157 extern void __get_user_unknown(void);
159 #define __put_user_nocheck(x,ptr,size) ({ \
160 long __pu_err; \
161 __typeof__(*(ptr)) __pu_val; \
162 long __pu_addr; \
163 __pu_val = (x); \
164 __pu_addr = (long) (ptr); \
165 __asm__("":"=r" (__pu_err)); \
166 switch (size) { \
167 case 1: __put_user_asm("b"); break; \
168 case 2: __put_user_asm("w"); break; \
169 case 4: __put_user_asm("l"); break; \
170 default: __put_user_unknown(); break; \
171 } __pu_err; })
173 #define __put_user_check(x,ptr,size) ({ \
174 long __pu_err; \
175 __typeof__(*(ptr)) __pu_val; \
176 long __pu_addr; \
177 __pu_val = (x); \
178 __pu_addr = (long) (ptr); \
179 __asm__("":"=r" (__pu_err)); \
180 if (__access_ok(__pu_addr,size)) { \
181 switch (size) { \
182 case 1: __put_user_asm("b"); break; \
183 case 2: __put_user_asm("w"); break; \
184 case 4: __put_user_asm("l"); break; \
185 default: __put_user_unknown(); break; \
186 } } __pu_err; })
188 #define __put_user_asm(insn) \
189 ({ \
190 __asm__ __volatile__( \
191 "1:\n\t" \
192 "mov." insn " %1, %2\n\t" \
193 "mov #0, %0\n" \
194 "2:\n" \
195 ".section .fixup,\"ax\"\n" \
196 "3:\n\t" \
197 "nop\n\t" \
198 "mov.l 4f, %0\n\t" \
199 "jmp @%0\n\t" \
200 "mov %3, %0\n" \
201 "4: .long 2b\n\t" \
202 ".previous\n" \
203 ".section __ex_table,\"a\"\n\t" \
204 ".long 1b, 3b\n\t" \
205 ".previous" \
206 :"=&r" (__pu_err) \
207 :"r" (__pu_val), "m" (__m(__pu_addr)), "i" (-EFAULT)); })
209 extern void __put_user_unknown(void);
211 /* Generic arbitrary sized copy. */
212 /* Return the number of bytes NOT copied */
213 /* XXX: should be such that: 4byte and the rest. */
214 extern __inline__ __kernel_size_t
215 __copy_user(void *__to, const void *__from, __kernel_size_t __n)
217 unsigned long __dummy, _f, _t;
218 __kernel_size_t res;
220 if ((res = __n))
221 __asm__ __volatile__(
222 "9:\n\t"
223 "mov.b @%2+, %1\n\t"
224 "dt %0\n"
225 "1:\n\t"
226 "mov.b %1, @%3\n\t"
227 "bf/s 9b\n\t"
228 " add #1, %3\n"
229 "2:\n"
230 ".section .fixup,\"ax\"\n"
231 "3:\n\t"
232 "mov.l 5f, %1\n\t"
233 "jmp @%1\n\t"
234 " add #1, %0\n\t"
235 ".balign 4\n"
236 "5: .long 2b\n"
237 ".previous\n"
238 ".section __ex_table,\"a\"\n"
239 " .balign 4\n"
240 " .long 9b,2b\n"
241 " .long 1b,3b\n"
242 ".previous"
243 : "=r" (res), "=&z" (__dummy), "=r" (_f), "=r" (_t)
244 : "2" (__from), "3" (__to), "0" (res)
245 : "memory");
247 return res;
250 #define copy_to_user(to,from,n) ({ \
251 void *__copy_to = (void *) (to); \
252 __kernel_size_t __copy_size = (__kernel_size_t) (n); \
253 __kernel_size_t __copy_res; \
254 if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
255 __copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
256 } else __copy_res = __copy_size; \
257 __copy_res; })
259 #define copy_to_user_ret(to,from,n,retval) ({ \
260 if (copy_to_user(to,from,n)) \
261 return retval; \
264 #define __copy_to_user(to,from,n) \
265 __copy_user((void *)(to), \
266 (void *)(from), n)
268 #define __copy_to_user_ret(to,from,n,retval) ({ \
269 if (__copy_to_user(to,from,n)) \
270 return retval; \
273 #define copy_from_user(to,from,n) ({ \
274 void *__copy_to = (void *) (to); \
275 void *__copy_from = (void *) (from); \
276 __kernel_size_t __copy_size = (__kernel_size_t) (n); \
277 __kernel_size_t __copy_res; \
278 if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
279 __copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
280 } else __copy_res = __copy_size; \
281 __copy_res; })
283 #define copy_from_user_ret(to,from,n,retval) ({ \
284 if (copy_from_user(to,from,n)) \
285 return retval; \
288 #define __copy_from_user(to,from,n) \
289 __copy_user((void *)(to), \
290 (void *)(from), n)
292 #define __copy_from_user_ret(to,from,n,retval) ({ \
293 if (__copy_from_user(to,from,n)) \
294 return retval; \
297 /* XXX: Not sure it works well..
298 should be such that: 4byte clear and the rest. */
299 extern __inline__ __kernel_size_t
300 __clear_user(void *addr, __kernel_size_t size)
302 unsigned long __a;
304 __asm__ __volatile__(
305 "9:\n\t"
306 "dt %0\n"
307 "1:\n\t"
308 "mov.b %4, @%1\n\t"
309 "bf/s 9b\n\t"
310 " add #1, %1\n"
311 "2:\n"
312 ".section .fixup,\"ax\"\n"
313 "3:\n\t"
314 "mov.l 4f, %1\n\t"
315 "jmp @%1\n\t"
316 " nop\n"
317 ".balign 4\n"
318 "4: .long 2b\n"
319 ".previous\n"
320 ".section __ex_table,\"a\"\n"
321 " .balign 4\n"
322 " .long 1b,3b\n"
323 ".previous"
324 : "=r" (size), "=r" (__a)
325 : "0" (size), "1" (addr), "r" (0));
327 return size;
330 #define clear_user(addr,n) ({ \
331 void * __cl_addr = (addr); \
332 unsigned long __cl_size = (n); \
333 if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
334 __cl_size = __clear_user(__cl_addr, __cl_size); \
335 __cl_size; })
337 extern __inline__ int
338 __strncpy_from_user(unsigned long __dest, unsigned long __src, int __count)
340 __kernel_size_t res;
341 unsigned long __dummy, _d, _s;
343 __asm__ __volatile__(
344 "9:\n"
345 "mov.b @%2+, %1\n\t"
346 "cmp/eq #0, %1\n\t"
347 "bt/s 2f\n"
348 "1:\n"
349 "mov.b %1, @%3\n\t"
350 "dt %7\n\t"
351 "bf/s 9b\n\t"
352 " add #1, %3\n\t"
353 "2:\n\t"
354 "sub %7, %0\n"
355 "3:\n"
356 ".section .fixup,\"ax\"\n"
357 "4:\n\t"
358 "mov.l 5f, %1\n\t"
359 "jmp @%1\n\t"
360 " mov %8, %0\n\t"
361 ".balign 4\n"
362 "5: .long 3b\n"
363 ".previous\n"
364 ".section __ex_table,\"a\"\n"
365 " .balign 4\n"
366 " .long 9b,4b\n"
367 ".previous"
368 : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d)
369 : "0" (__count), "2" (__src), "3" (__dest), "r" (__count),
370 "i" (-EFAULT)
371 : "memory");
373 return res;
376 #define strncpy_from_user(dest,src,count) ({ \
377 unsigned long __sfu_src = (unsigned long) (src); \
378 int __sfu_count = (int) (count); \
379 long __sfu_res = -EFAULT; \
380 if(__access_ok(__sfu_src, __sfu_count)) { \
381 __sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
382 } __sfu_res; })
384 #define strlen_user(str) strnlen_user(str, ~0UL >> 1)
387 * Return the size of a string (including the ending 0!)
389 extern __inline__ long __strnlen_user(const char *__s, long __n)
391 unsigned long res;
392 unsigned long __dummy;
394 __asm__ __volatile__(
395 "9:\n"
396 "cmp/eq %4, %0\n\t"
397 "bt 2f\n"
398 "1:\t"
399 "mov.b @(%0,%3), %1\n\t"
400 "tst %1, %1\n\t"
401 "bf/s 9b\n\t"
402 " add #1, %0\n"
403 "2:\n"
404 ".section .fixup,\"ax\"\n"
405 "3:\n\t"
406 "mov.l 4f, %1\n\t"
407 "jmp @%1\n\t"
408 " mov %5, %0\n"
409 ".balign 4\n"
410 "4: .long 2b\n"
411 ".previous\n"
412 ".section __ex_table,\"a\"\n"
413 " .balign 4\n"
414 " .long 1b,3b\n"
415 ".previous"
416 : "=z" (res), "=&r" (__dummy)
417 : "0" (0), "r" (__s), "r" (__n), "i" (-EFAULT));
418 return res;
421 extern __inline__ long strnlen_user(const char *s, long n)
423 if (!__addr_ok(s))
424 return 0;
425 else
426 return __strnlen_user(s, n);
429 struct exception_table_entry
431 unsigned long insn, fixup;
434 /* Returns 0 if exception not found and fixup.unit otherwise. */
435 extern unsigned long search_exception_table(unsigned long addr);
437 /* Returns the new pc */
438 #define fixup_exception(map_reg, fixup_unit, pc) \
439 ({ \
440 fixup_unit; \
443 #endif /* __ASM_SH_UACCESS_H */