Import 2.3.18pre1
[davej-history.git] / include / asm-sh / uaccess.h
blob37da013315730bb730914e3f8d5e9310ee8134a9
1 /*
2 * User space memory access functions
4 * Copyright (C) 1999 Niibe Yutaka
6 * Based on:
7 * MIPS implementation version 1.15 by
8 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
9 * and i386 version.
11 #ifndef __ASM_SH_UACCESS_H
12 #define __ASM_SH_UACCESS_H
14 #include <linux/errno.h>
15 #include <linux/sched.h>
17 #define VERIFY_READ 0
18 #define VERIFY_WRITE 1
21 * The fs value determines whether argument validity checking should be
22 * performed or not. If get_fs() == USER_DS, checking is performed, with
23 * get_fs() == KERNEL_DS, checking is bypassed.
25 * For historical reasons (Data Segment Register?), these macros are misnamed.
28 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
30 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
31 #define USER_DS MAKE_MM_SEG(0x80000000)
33 #define get_ds() (KERNEL_DS)
34 #define get_fs() (current->addr_limit)
35 #define set_fs(x) (current->addr_limit=(x))
37 #define segment_eq(a,b) ((a).seg == (b).seg)
39 #define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
42 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
44 * sum := addr + size; carry? --> flag = true;
45 * if (sum >= addr_limit) flag = true;
47 #define __range_ok(addr,size) ({ \
48 unsigned long flag,sum; \
49 __asm__("clrt; addc %3,%1; movt %0; cmp/hi %4,%1; rotcl %0" \
50 :"=&r" (flag), "=r" (sum) \
51 :"1" (addr), "r" (size), "r" (current->addr_limit.seg)); \
52 flag; })
54 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
55 #define __access_ok(addr,size) (__range_ok(addr,size) == 0)
57 extern inline int verify_area(int type, const void * addr, unsigned long size)
59 return access_ok(type,addr,size) ? 0 : -EFAULT;
63 * Uh, these should become the main single-value transfer routines ...
64 * They automatically use the right size if we just have the right
65 * pointer type ...
67 * As MIPS uses the same address space for kernel and user data, we
68 * can just do these as direct assignments.
70 * Careful to not
71 * (a) re-use the arguments for side effects (sizeof is ok)
72 * (b) require any knowledge of processes at this stage
74 #define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
75 #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
78 * The "__xxx" versions do not do address space checking, useful when
79 * doing multiple accesses to the same area (the user has to do the
80 * checks by hand with "access_ok()")
82 #define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
83 #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
86 * The "xxx_ret" versions return constant specified in third argument, if
87 * something bad happens. These macros can be optimized for the
88 * case of just returning from the function xxx_ret is used.
91 #define put_user_ret(x,ptr,ret) ({ \
92 if (put_user(x,ptr)) return ret; })
94 #define get_user_ret(x,ptr,ret) ({ \
95 if (get_user(x,ptr)) return ret; })
97 #define __put_user_ret(x,ptr,ret) ({ \
98 if (__put_user(x,ptr)) return ret; })
100 #define __get_user_ret(x,ptr,ret) ({ \
101 if (__get_user(x,ptr)) return ret; })
103 struct __large_struct { unsigned long buf[100]; };
104 #define __m(x) (*(struct __large_struct *)(x))
106 #define __get_user_nocheck(x,ptr,size) ({ \
107 long __gu_err; \
108 __typeof(*(ptr)) __gu_val; \
109 long __gu_addr; \
110 __asm__("":"=r" (__gu_val)); \
111 __gu_addr = (long) (ptr); \
112 __asm__("":"=r" (__gu_err)); \
113 switch (size) { \
114 case 1: __get_user_asm("b"); break; \
115 case 2: __get_user_asm("w"); break; \
116 case 4: __get_user_asm("l"); break; \
117 default: __get_user_unknown(); break; \
118 } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
120 #define __get_user_check(x,ptr,size) ({ \
121 long __gu_err; \
122 __typeof__(*(ptr)) __gu_val; \
123 long __gu_addr; \
124 __asm__("":"=r" (__gu_val)); \
125 __gu_addr = (long) (ptr); \
126 __asm__("":"=r" (__gu_err)); \
127 if (__access_ok(__gu_addr,size)) { \
128 switch (size) { \
129 case 1: __get_user_asm("b"); break; \
130 case 2: __get_user_asm("w"); break; \
131 case 4: __get_user_asm("l"); break; \
132 default: __get_user_unknown(); break; \
133 } } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
135 #define __get_user_asm(insn) \
136 ({ \
137 __asm__ __volatile__( \
138 "1:\n\t" \
139 "mov." insn " %2,%1\n\t" \
140 "mov #0,%0\n" \
141 "2:\n\t" \
142 ".section .fixup,\"ax\"\n" \
143 "3:\n\t" \
144 "mov #0,%1\n\t" \
145 "mov.l 4f,%0\n\t" \
146 "jmp @%0\n\t" \
147 " mov %3,%0\n" \
148 "4: .long 2b\n\t" \
149 ".previous\n\t" \
150 ".section __ex_table,\"a\"\n\t" \
151 ".long 1b, 3b\n\t" \
152 ".previous" \
153 :"=&r" (__gu_err), "=&r" (__gu_val) \
154 :"m" (__m(__gu_addr)), "i" (-EFAULT)); })
156 extern void __get_user_unknown(void);
158 #define __put_user_nocheck(x,ptr,size) ({ \
159 long __pu_err; \
160 __typeof__(*(ptr)) __pu_val; \
161 long __pu_addr; \
162 __pu_val = (x); \
163 __pu_addr = (long) (ptr); \
164 __asm__("":"=r" (__pu_err)); \
165 switch (size) { \
166 case 1: __put_user_asm("b"); break; \
167 case 2: __put_user_asm("w"); break; \
168 case 4: __put_user_asm("l"); break; \
169 default: __put_user_unknown(); break; \
170 } __pu_err; })
172 #define __put_user_check(x,ptr,size) ({ \
173 long __pu_err; \
174 __typeof__(*(ptr)) __pu_val; \
175 long __pu_addr; \
176 __pu_val = (x); \
177 __pu_addr = (long) (ptr); \
178 __asm__("":"=r" (__pu_err)); \
179 if (__access_ok(__pu_addr,size)) { \
180 switch (size) { \
181 case 1: __put_user_asm("b"); break; \
182 case 2: __put_user_asm("w"); break; \
183 case 4: __put_user_asm("l"); break; \
184 default: __put_user_unknown(); break; \
185 } } __pu_err; })
187 #define __put_user_asm(insn) \
188 ({ \
189 __asm__ __volatile__( \
190 "1:\n\t" \
191 "mov." insn " %1,%2\n\t" \
192 "mov #0,%0\n" \
193 "2:\n\t" \
194 ".section .fixup,\"ax\"\n" \
195 "3:\n\t" \
196 "nop\n\t" \
197 "mov.l 4f,%0\n\t" \
198 "jmp @%0\n\t" \
199 "mov %3,%0\n" \
200 "4: .long 2b\n\t" \
201 ".previous\n\t" \
202 ".section __ex_table,\"a\"\n\t" \
203 ".long 1b, 3b\n\t" \
204 ".previous" \
205 :"=&r" (__pu_err) \
206 :"r" (__pu_val), "m" (__m(__pu_addr)), "i" (-EFAULT)); })
208 extern void __put_user_unknown(void);
210 /* Generic arbitrary sized copy. */
211 /* XXX: should be such that: 4byte and the rest. */
212 extern __inline__ __kernel_size_t
213 __copy_user(void *__to, const void *__from, __kernel_size_t __n)
215 unsigned long __dummy, _f, _t;
216 __kernel_size_t res;
218 __asm__ __volatile__(
219 "9:\n\t"
220 "mov.b @%2+,%1\n\t"
221 "dt %0\n"
222 "1:\n\t"
223 "mov.b %1,@%3\n\t"
224 "bf/s 9b\n\t"
225 " add #1,%3\n"
226 "2:"
227 ".section .fixup,\"ax\"\n"
228 "3:\n\t"
229 "mov.l 5f,%1\n\t"
230 "jmp @%1\n\t"
231 " mov %7,%0\n\t"
232 ".align 4\n"
233 "5: .long 2b\n"
234 ".previous\n"
235 ".section __ex_table,\"a\"\n"
236 " .align 4\n"
237 " .long 9b,3b\n"
238 " .long 1b,2b\n"
239 ".previous"
240 : "=&r" (res), "=&z" (__dummy), "=&r" (_f), "=&r" (_t)
241 : "2" (__from), "3" (__to), "0" (__n), "i" (-EFAULT)
242 : "memory");
244 return res;
247 #define copy_to_user(to,from,n) ({ \
248 void *__copy_to = (void *) (to); \
249 __kernel_size_t __copy_size = (__kernel_size_t) (n); \
250 __kernel_size_t __copy_res; \
251 if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
252 __copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
253 } else __copy_res = __copy_size; \
254 __copy_res; })
256 #define copy_to_user_ret(to,from,n,retval) ({ \
257 if (copy_to_user(to,from,n)) \
258 return retval; \
261 #define __copy_to_user(to,from,n) \
262 __copy_user((void *)(to), \
263 (void *)(from), n)
265 #define __copy_to_user_ret(to,from,n,retval) ({ \
266 if (__copy_to_user(to,from,n)) \
267 return retval; \
270 #define copy_from_user(to,from,n) ({ \
271 void *__copy_to = (void *) (to); \
272 void *__copy_from = (void *) (from); \
273 __kernel_size_t __copy_size = (__kernel_size_t) (n); \
274 __kernel_size_t __copy_res; \
275 if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
276 __copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
277 } else __copy_res = __copy_size; \
278 __copy_res; })
280 #define copy_from_user_ret(to,from,n,retval) ({ \
281 if (copy_from_user(to,from,n)) \
282 return retval; \
285 #define __copy_from_user(to,from,n) \
286 __copy_user((void *)(to), \
287 (void *)(from), n)
289 #define __copy_from_user_ret(to,from,n,retval) ({ \
290 if (__copy_from_user(to,from,n)) \
291 return retval; \
294 /* XXX: Not sure it works well..
295 should be such that: 4byte clear and the rest. */
296 extern __inline__ __kernel_size_t
297 __clear_user(void *addr, __kernel_size_t size)
299 __kernel_size_t res;
300 unsigned long __a, __s;
302 __asm__ __volatile__(
303 "9:\n\t"
304 "dt %2\n"
305 "1:\n\t"
306 "mov.b %5,@%1\n\t"
307 "bf/s 9b\n\t"
308 " add #1,%1\n\t"
309 "sub %2,%0\n"
310 "2:\n"
311 ".section .fixup,\"ax\"\n"
312 "3:\n\t"
313 "mov.l 4f,%0\n\t"
314 "jmp @%0\n\t"
315 " mov %7,%0\n"
316 ".align 4\n"
317 "4: .long 2b\n"
318 ".previous\n"
319 ".section __ex_table,\"a\"\n"
320 " .align 4\n"
321 " .long 1b,3b\n"
322 ".previous"
323 : "=&r" (res), "=&r" (__a), "=&r" (__s)
324 : "1" (addr), "2" (size), "r" (0), "0" (size), "i" (-EFAULT));
326 return res;
329 #define clear_user(addr,n) ({ \
330 void * __cl_addr = (addr); \
331 unsigned long __cl_size = (n); \
332 if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
333 __cl_size = __clear_user(__cl_addr, __cl_size); \
334 __cl_size; })
336 extern __inline__ int
337 __strncpy_from_user(unsigned long __dest, unsigned long __src, int __count)
339 __kernel_size_t res;
340 unsigned long __dummy, _d, _s;
342 __asm__ __volatile__(
343 "9:\n"
344 "mov.b @%2+,%1\n\t"
345 "cmp/eq #0,%1\n\t"
346 "bt/s 2f\n"
347 "1:\n"
348 "mov.b %1,@%3\n\t"
349 "dt %0\n\t"
350 "bf/s 9b\n\t"
351 " add #1,%3\n\t"
352 "sub %6,%0\n"
353 "2:\n"
354 ".section .fixup,\"ax\"\n"
355 "3:\n\t"
356 "mov.l 4f,%1\n\t"
357 "jmp @%1\n\t"
358 " mov %8,%0\n\t"
359 ".align 4\n"
360 "4: .long 2b\n"
361 ".previous\n"
362 ".section __ex_table,\"a\"\n"
363 " .align 4\n"
364 " .long 9b,3b\n"
365 " .long 1b,2b\n"
366 ".previous"
367 : "=&r" (res), "=&z" (__dummy), "=&r" (_s), "=&r" (_d)
368 : "2" (__src), "3" (__dest), "r" (__count), "0" (__count),
369 "i" (-EFAULT)
370 : "memory");
372 return res;
375 #define strncpy_from_user(dest,src,count) ({ \
376 unsigned long __sfu_src = (unsigned long) (src); \
377 int __sfu_count = (int) (count); \
378 long __sfu_res = -EFAULT; \
379 if(__access_ok(__sfu_src, __sfu_count)) { \
380 __sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
381 } __sfu_res; })
384 * Return the size of a string (including the ending 0!)
386 extern __inline__ long __strlen_user(const char *__s)
388 unsigned long res;
389 unsigned long __dummy;
391 __asm__ __volatile__(
392 "mov #-1,%1\n"
393 "9:\n"
394 "cmp/eq #0,%1\n\t"
395 "bf/s 9b\n\t"
396 "1:\t"
397 " mov.b @%0+,%1\n\t"
398 "sub %3,%0\n"
399 "2:\n"
400 ".section .fixup,\"ax\"\n"
401 "3:\n\t"
402 "mov.l 4f,%1\n\t"
403 "jmp @%1\n\t"
404 " mov %4,%0\n"
405 ".align 4\n"
406 "4: .long 2b\n"
407 ".previous\n"
408 ".section __ex_table,\"a\"\n"
409 " .align 4\n"
410 " .long 1b,3b\n"
411 ".previous"
412 : "=&r" (res), "=&z" (__dummy)
413 : "0" (__s), "r" (__s), "i" (-EFAULT));
414 return res;
417 extern __inline__ long strlen_user(const char *s)
419 if(!access_ok(VERIFY_READ, s, 0))
420 return 0;
421 else
422 return __strlen_user(s);
425 struct exception_table_entry
427 unsigned long insn, fixup;
430 /* Returns 0 if exception not found and fixup.unit otherwise. */
431 extern unsigned long search_exception_table(unsigned long addr);
433 /* Returns the new pc */
434 #define fixup_exception(map_reg, fixup_unit, pc) \
435 ({ \
436 fixup_unit; \
439 #endif /* __ASM_SH_UACCESS_H */