[ALSA] Add Aux input switch control for Aureon Universe
[linux-2.6/linux-loongson.git] / include / asm-arm / uaccess.h
blob064f0f5e8e2b9e3361082dbc420b2e6248263c40
1 /*
2 * linux/include/asm-arm/uaccess.h
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8 #ifndef _ASMARM_UACCESS_H
9 #define _ASMARM_UACCESS_H
12 * User space memory access functions
14 #include <linux/sched.h>
15 #include <asm/errno.h>
16 #include <asm/memory.h>
17 #include <asm/domain.h>
18 #include <asm/system.h>
20 #define VERIFY_READ 0
21 #define VERIFY_WRITE 1
24 * The exception table consists of pairs of addresses: the first is the
25 * address of an instruction that is allowed to fault, and the second is
26 * the address at which the program should continue. No registers are
27 * modified, so it is entirely up to the continuation code to figure out
28 * what to do.
30 * All the routines below use bits of fixup code that are out of line
31 * with the main instruction path. This means when everything is well,
32 * we don't even have to jump over them. Further, they do not intrude
33 * on our cache or tlb entries.
36 struct exception_table_entry
38 unsigned long insn, fixup;
41 extern int fixup_exception(struct pt_regs *regs);
44 * Note that this is actually 0x1,0000,0000
46 #define KERNEL_DS 0x00000000
47 #define USER_DS TASK_SIZE
49 #define get_ds() (KERNEL_DS)
50 #define get_fs() (current_thread_info()->addr_limit)
52 static inline void set_fs (mm_segment_t fs)
54 current_thread_info()->addr_limit = fs;
55 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
58 #define segment_eq(a,b) ((a) == (b))
60 #define __addr_ok(addr) ({ \
61 unsigned long flag; \
62 __asm__("cmp %2, %0; movlo %0, #0" \
63 : "=&r" (flag) \
64 : "0" (current_thread_info()->addr_limit), "r" (addr) \
65 : "cc"); \
66 (flag == 0); })
68 /* We use 33-bit arithmetic here... */
69 #define __range_ok(addr,size) ({ \
70 unsigned long flag, sum; \
71 __chk_user_ptr(addr); \
72 __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
73 : "=&r" (flag), "=&r" (sum) \
74 : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
75 : "cc"); \
76 flag; })
78 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
81 * Single-value transfer routines. They automatically use the right
82 * size if we just have the right pointer type. Note that the functions
83 * which read from user space (*get_*) need to take care not to leak
84 * kernel data even if the calling code is buggy and fails to check
85 * the return value. This means zeroing out the destination variable
86 * or buffer on error. Normally this is done out of line by the
87 * fixup code, but there are a few places where it intrudes on the
88 * main code path. When we only write to user space, there is no
89 * problem.
91 * The "__xxx" versions of the user access functions do not verify the
92 * address space - it must have been done previously with a separate
93 * "access_ok()" call.
95 * The "xxx_error" versions set the third argument to EFAULT if an
96 * error occurs, and leave it unchanged on success. Note that these
97 * versions are void (ie, don't return a value as such).
100 extern int __get_user_1(void *);
101 extern int __get_user_2(void *);
102 extern int __get_user_4(void *);
103 extern int __get_user_bad(void);
105 #define __get_user_x(__r2,__p,__e,__s,__i...) \
106 __asm__ __volatile__ ( \
107 __asmeq("%0", "r0") __asmeq("%1", "r2") \
108 "bl __get_user_" #__s \
109 : "=&r" (__e), "=r" (__r2) \
110 : "0" (__p) \
111 : __i, "cc")
113 #define get_user(x,p) \
114 ({ \
115 const register typeof(*(p)) __user *__p asm("r0") = (p);\
116 register unsigned int __r2 asm("r2"); \
117 register int __e asm("r0"); \
118 switch (sizeof(*(__p))) { \
119 case 1: \
120 __get_user_x(__r2, __p, __e, 1, "lr"); \
121 break; \
122 case 2: \
123 __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \
124 break; \
125 case 4: \
126 __get_user_x(__r2, __p, __e, 4, "lr"); \
127 break; \
128 default: __e = __get_user_bad(); break; \
130 x = (typeof(*(p))) __r2; \
131 __e; \
134 #define __get_user(x,ptr) \
135 ({ \
136 long __gu_err = 0; \
137 __get_user_err((x),(ptr),__gu_err); \
138 __gu_err; \
141 #define __get_user_error(x,ptr,err) \
142 ({ \
143 __get_user_err((x),(ptr),err); \
144 (void) 0; \
147 #define __get_user_err(x,ptr,err) \
148 do { \
149 unsigned long __gu_addr = (unsigned long)(ptr); \
150 unsigned long __gu_val; \
151 __chk_user_ptr(ptr); \
152 switch (sizeof(*(ptr))) { \
153 case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
154 case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
155 case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \
156 default: (__gu_val) = __get_user_bad(); \
158 (x) = (__typeof__(*(ptr)))__gu_val; \
159 } while (0)
161 #define __get_user_asm_byte(x,addr,err) \
162 __asm__ __volatile__( \
163 "1: ldrbt %1,[%2],#0\n" \
164 "2:\n" \
165 " .section .fixup,\"ax\"\n" \
166 " .align 2\n" \
167 "3: mov %0, %3\n" \
168 " mov %1, #0\n" \
169 " b 2b\n" \
170 " .previous\n" \
171 " .section __ex_table,\"a\"\n" \
172 " .align 3\n" \
173 " .long 1b, 3b\n" \
174 " .previous" \
175 : "+r" (err), "=&r" (x) \
176 : "r" (addr), "i" (-EFAULT) \
177 : "cc")
179 #ifndef __ARMEB__
180 #define __get_user_asm_half(x,__gu_addr,err) \
181 ({ \
182 unsigned long __b1, __b2; \
183 __get_user_asm_byte(__b1, __gu_addr, err); \
184 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
185 (x) = __b1 | (__b2 << 8); \
187 #else
188 #define __get_user_asm_half(x,__gu_addr,err) \
189 ({ \
190 unsigned long __b1, __b2; \
191 __get_user_asm_byte(__b1, __gu_addr, err); \
192 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
193 (x) = (__b1 << 8) | __b2; \
195 #endif
197 #define __get_user_asm_word(x,addr,err) \
198 __asm__ __volatile__( \
199 "1: ldrt %1,[%2],#0\n" \
200 "2:\n" \
201 " .section .fixup,\"ax\"\n" \
202 " .align 2\n" \
203 "3: mov %0, %3\n" \
204 " mov %1, #0\n" \
205 " b 2b\n" \
206 " .previous\n" \
207 " .section __ex_table,\"a\"\n" \
208 " .align 3\n" \
209 " .long 1b, 3b\n" \
210 " .previous" \
211 : "+r" (err), "=&r" (x) \
212 : "r" (addr), "i" (-EFAULT) \
213 : "cc")
215 extern int __put_user_1(void *, unsigned int);
216 extern int __put_user_2(void *, unsigned int);
217 extern int __put_user_4(void *, unsigned int);
218 extern int __put_user_8(void *, unsigned long long);
219 extern int __put_user_bad(void);
221 #define __put_user_x(__r2,__p,__e,__s) \
222 __asm__ __volatile__ ( \
223 __asmeq("%0", "r0") __asmeq("%2", "r2") \
224 "bl __put_user_" #__s \
225 : "=&r" (__e) \
226 : "0" (__p), "r" (__r2) \
227 : "ip", "lr", "cc")
229 #define put_user(x,p) \
230 ({ \
231 const register typeof(*(p)) __r2 asm("r2") = (x); \
232 const register typeof(*(p)) __user *__p asm("r0") = (p);\
233 register int __e asm("r0"); \
234 switch (sizeof(*(__p))) { \
235 case 1: \
236 __put_user_x(__r2, __p, __e, 1); \
237 break; \
238 case 2: \
239 __put_user_x(__r2, __p, __e, 2); \
240 break; \
241 case 4: \
242 __put_user_x(__r2, __p, __e, 4); \
243 break; \
244 case 8: \
245 __put_user_x(__r2, __p, __e, 8); \
246 break; \
247 default: __e = __put_user_bad(); break; \
249 __e; \
252 #define __put_user(x,ptr) \
253 ({ \
254 long __pu_err = 0; \
255 __put_user_err((x),(ptr),__pu_err); \
256 __pu_err; \
259 #define __put_user_error(x,ptr,err) \
260 ({ \
261 __put_user_err((x),(ptr),err); \
262 (void) 0; \
265 #define __put_user_err(x,ptr,err) \
266 do { \
267 unsigned long __pu_addr = (unsigned long)(ptr); \
268 __typeof__(*(ptr)) __pu_val = (x); \
269 __chk_user_ptr(ptr); \
270 switch (sizeof(*(ptr))) { \
271 case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
272 case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
273 case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \
274 case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \
275 default: __put_user_bad(); \
277 } while (0)
279 #define __put_user_asm_byte(x,__pu_addr,err) \
280 __asm__ __volatile__( \
281 "1: strbt %1,[%2],#0\n" \
282 "2:\n" \
283 " .section .fixup,\"ax\"\n" \
284 " .align 2\n" \
285 "3: mov %0, %3\n" \
286 " b 2b\n" \
287 " .previous\n" \
288 " .section __ex_table,\"a\"\n" \
289 " .align 3\n" \
290 " .long 1b, 3b\n" \
291 " .previous" \
292 : "+r" (err) \
293 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
294 : "cc")
296 #ifndef __ARMEB__
297 #define __put_user_asm_half(x,__pu_addr,err) \
298 ({ \
299 unsigned long __temp = (unsigned long)(x); \
300 __put_user_asm_byte(__temp, __pu_addr, err); \
301 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
303 #else
304 #define __put_user_asm_half(x,__pu_addr,err) \
305 ({ \
306 unsigned long __temp = (unsigned long)(x); \
307 __put_user_asm_byte(__temp >> 8, __pu_addr, err); \
308 __put_user_asm_byte(__temp, __pu_addr + 1, err); \
310 #endif
312 #define __put_user_asm_word(x,__pu_addr,err) \
313 __asm__ __volatile__( \
314 "1: strt %1,[%2],#0\n" \
315 "2:\n" \
316 " .section .fixup,\"ax\"\n" \
317 " .align 2\n" \
318 "3: mov %0, %3\n" \
319 " b 2b\n" \
320 " .previous\n" \
321 " .section __ex_table,\"a\"\n" \
322 " .align 3\n" \
323 " .long 1b, 3b\n" \
324 " .previous" \
325 : "+r" (err) \
326 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
327 : "cc")
329 #ifndef __ARMEB__
330 #define __reg_oper0 "%R2"
331 #define __reg_oper1 "%Q2"
332 #else
333 #define __reg_oper0 "%Q2"
334 #define __reg_oper1 "%R2"
335 #endif
337 #define __put_user_asm_dword(x,__pu_addr,err) \
338 __asm__ __volatile__( \
339 "1: strt " __reg_oper1 ", [%1], #4\n" \
340 "2: strt " __reg_oper0 ", [%1], #0\n" \
341 "3:\n" \
342 " .section .fixup,\"ax\"\n" \
343 " .align 2\n" \
344 "4: mov %0, %3\n" \
345 " b 3b\n" \
346 " .previous\n" \
347 " .section __ex_table,\"a\"\n" \
348 " .align 3\n" \
349 " .long 1b, 4b\n" \
350 " .long 2b, 4b\n" \
351 " .previous" \
352 : "+r" (err), "+r" (__pu_addr) \
353 : "r" (x), "i" (-EFAULT) \
354 : "cc")
356 extern unsigned long __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
357 extern unsigned long __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
358 extern unsigned long __arch_clear_user(void __user *addr, unsigned long n);
359 extern unsigned long __arch_strncpy_from_user(char *to, const char __user *from, unsigned long count);
360 extern unsigned long __arch_strnlen_user(const char __user *s, long n);
362 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
364 if (access_ok(VERIFY_READ, from, n))
365 n = __arch_copy_from_user(to, from, n);
366 else /* security hole - plug it */
367 memzero(to, n);
368 return n;
371 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
373 return __arch_copy_from_user(to, from, n);
376 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
378 if (access_ok(VERIFY_WRITE, to, n))
379 n = __arch_copy_to_user(to, from, n);
380 return n;
383 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
385 return __arch_copy_to_user(to, from, n);
388 #define __copy_to_user_inatomic __copy_to_user
389 #define __copy_from_user_inatomic __copy_from_user
391 static inline unsigned long clear_user (void __user *to, unsigned long n)
393 if (access_ok(VERIFY_WRITE, to, n))
394 n = __arch_clear_user(to, n);
395 return n;
398 static inline unsigned long __clear_user (void __user *to, unsigned long n)
400 return __arch_clear_user(to, n);
403 static inline long strncpy_from_user (char *dst, const char __user *src, long count)
405 long res = -EFAULT;
406 if (access_ok(VERIFY_READ, src, 1))
407 res = __arch_strncpy_from_user(dst, src, count);
408 return res;
411 static inline long __strncpy_from_user (char *dst, const char __user *src, long count)
413 return __arch_strncpy_from_user(dst, src, count);
416 #define strlen_user(s) strnlen_user(s, ~0UL >> 1)
418 static inline long strnlen_user(const char __user *s, long n)
420 unsigned long res = 0;
422 if (__addr_ok(s))
423 res = __arch_strnlen_user(s, n);
425 return res;
428 #endif /* _ASMARM_UACCESS_H */