Linux-2.6.12-rc2
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-x86_64 / uaccess.h
blob48f292752c96bac97900682b98e947651b701d6b
1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
4 /*
5 * User space memory access functions
6 */
7 #include <linux/config.h>
8 #include <linux/compiler.h>
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/prefetch.h>
12 #include <asm/page.h>
14 #define VERIFY_READ 0
15 #define VERIFY_WRITE 1
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
22 * For historical reasons, these macros are grossly misnamed.
25 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
27 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
28 #define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
30 #define get_ds() (KERNEL_DS)
31 #define get_fs() (current_thread_info()->addr_limit)
32 #define set_fs(x) (current_thread_info()->addr_limit = (x))
34 #define segment_eq(a,b) ((a).seg == (b).seg)
36 #define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg)))
39 * Uhhuh, this needs 65-bit arithmetic. We have a carry..
41 #define __range_not_ok(addr,size) ({ \
42 unsigned long flag,sum; \
43 __chk_user_ptr(addr); \
44 asm("# range_ok\n\r" \
45 "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
46 :"=&r" (flag), "=r" (sum) \
47 :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \
48 flag; })
50 #define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0)
52 /* this function will go away soon - use access_ok() instead */
53 extern inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
55 return access_ok(type,addr,size) ? 0 : -EFAULT;
60 * The exception table consists of pairs of addresses: the first is the
61 * address of an instruction that is allowed to fault, and the second is
62 * the address at which the program should continue. No registers are
63 * modified, so it is entirely up to the continuation code to figure out
64 * what to do.
66 * All the routines below use bits of fixup code that are out of line
67 * with the main instruction path. This means when everything is well,
68 * we don't even have to jump over them. Further, they do not intrude
69 * on our cache or tlb entries.
72 struct exception_table_entry
74 unsigned long insn, fixup;
77 #define ARCH_HAS_SEARCH_EXTABLE
80 * These are the main single-value transfer routines. They automatically
81 * use the right size if we just have the right pointer type.
83 * This gets kind of ugly. We want to return _two_ values in "get_user()"
84 * and yet we don't want to do any pointers, because that is too much
85 * of a performance impact. Thus we have a few rather ugly macros here,
86 * and hide all the ugliness from the user.
88 * The "__xxx" versions of the user access functions are versions that
89 * do not verify the address space, that must have been done previously
90 * with a separate "access_ok()" call (this is used when we do multiple
91 * accesses to the same area of user memory).
94 #define __get_user_x(size,ret,x,ptr) \
95 __asm__ __volatile__("call __get_user_" #size \
96 :"=a" (ret),"=d" (x) \
97 :"c" (ptr) \
98 :"r8")
100 /* Careful: we have to cast the result to the type of the pointer for sign reasons */
101 #define get_user(x,ptr) \
102 ({ unsigned long __val_gu; \
103 int __ret_gu; \
104 __chk_user_ptr(ptr); \
105 switch(sizeof (*(ptr))) { \
106 case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
107 case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
108 case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
109 case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \
110 default: __get_user_bad(); break; \
112 (x) = (__typeof__(*(ptr)))__val_gu; \
113 __ret_gu; \
116 extern void __put_user_1(void);
117 extern void __put_user_2(void);
118 extern void __put_user_4(void);
119 extern void __put_user_8(void);
120 extern void __put_user_bad(void);
122 #define __put_user_x(size,ret,x,ptr) \
123 __asm__ __volatile__("call __put_user_" #size \
124 :"=a" (ret) \
125 :"c" (ptr),"d" (x) \
126 :"r8")
128 #define put_user(x,ptr) \
129 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
131 #define __get_user(x,ptr) \
132 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
133 #define __put_user(x,ptr) \
134 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
136 #define __get_user_unaligned __get_user
137 #define __put_user_unaligned __put_user
139 #define __put_user_nocheck(x,ptr,size) \
140 ({ \
141 int __pu_err; \
142 __put_user_size((x),(ptr),(size),__pu_err); \
143 __pu_err; \
147 #define __put_user_check(x,ptr,size) \
148 ({ \
149 int __pu_err; \
150 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
151 switch (size) { \
152 case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \
153 case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \
154 case 4: __put_user_x(4,__pu_err,x,__pu_addr); break; \
155 case 8: __put_user_x(8,__pu_err,x,__pu_addr); break; \
156 default: __put_user_bad(); \
158 __pu_err; \
161 #define __put_user_size(x,ptr,size,retval) \
162 do { \
163 retval = 0; \
164 __chk_user_ptr(ptr); \
165 switch (size) { \
166 case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\
167 case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\
168 case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\
169 case 8: __put_user_asm(x,ptr,retval,"q","","ir",-EFAULT); break;\
170 default: __put_user_bad(); \
172 } while (0)
174 /* FIXME: this hack is definitely wrong -AK */
175 struct __large_struct { unsigned long buf[100]; };
176 #define __m(x) (*(struct __large_struct __user *)(x))
179 * Tell gcc we read from memory instead of writing: this is because
180 * we do not write to any memory gcc knows about, so there are no
181 * aliasing issues.
183 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
184 __asm__ __volatile__( \
185 "1: mov"itype" %"rtype"1,%2\n" \
186 "2:\n" \
187 ".section .fixup,\"ax\"\n" \
188 "3: mov %3,%0\n" \
189 " jmp 2b\n" \
190 ".previous\n" \
191 ".section __ex_table,\"a\"\n" \
192 " .align 8\n" \
193 " .quad 1b,3b\n" \
194 ".previous" \
195 : "=r"(err) \
196 : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err))
199 #define __get_user_nocheck(x,ptr,size) \
200 ({ \
201 int __gu_err; \
202 unsigned long __gu_val; \
203 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
204 (x) = (__typeof__(*(ptr)))__gu_val; \
205 __gu_err; \
208 extern int __get_user_1(void);
209 extern int __get_user_2(void);
210 extern int __get_user_4(void);
211 extern int __get_user_8(void);
212 extern int __get_user_bad(void);
214 #define __get_user_size(x,ptr,size,retval) \
215 do { \
216 retval = 0; \
217 __chk_user_ptr(ptr); \
218 switch (size) { \
219 case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\
220 case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\
221 case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\
222 case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\
223 default: (x) = __get_user_bad(); \
225 } while (0)
227 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
228 __asm__ __volatile__( \
229 "1: mov"itype" %2,%"rtype"1\n" \
230 "2:\n" \
231 ".section .fixup,\"ax\"\n" \
232 "3: mov %3,%0\n" \
233 " xor"itype" %"rtype"1,%"rtype"1\n" \
234 " jmp 2b\n" \
235 ".previous\n" \
236 ".section __ex_table,\"a\"\n" \
237 " .align 8\n" \
238 " .quad 1b,3b\n" \
239 ".previous" \
240 : "=r"(err), ltype (x) \
241 : "m"(__m(addr)), "i"(errno), "0"(err))
244 * Copy To/From Userspace
247 /* Handles exceptions in both to and from, but doesn't do access_ok */
248 extern unsigned long copy_user_generic(void *to, const void *from, unsigned len);
250 extern unsigned long copy_to_user(void __user *to, const void *from, unsigned len);
251 extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len);
252 extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len);
254 static inline int __copy_from_user(void *dst, const void __user *src, unsigned size)
256 int ret = 0;
257 if (!__builtin_constant_p(size))
258 return copy_user_generic(dst,(__force void *)src,size);
259 switch (size) {
260 case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1);
261 return ret;
262 case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2);
263 return ret;
264 case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4);
265 return ret;
266 case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8);
267 return ret;
268 case 10:
269 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
270 if (unlikely(ret)) return ret;
271 __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2);
272 return ret;
273 case 16:
274 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
275 if (unlikely(ret)) return ret;
276 __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8);
277 return ret;
278 default:
279 return copy_user_generic(dst,(__force void *)src,size);
283 static inline int __copy_to_user(void __user *dst, const void *src, unsigned size)
285 int ret = 0;
286 if (!__builtin_constant_p(size))
287 return copy_user_generic((__force void *)dst,src,size);
288 switch (size) {
289 case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1);
290 return ret;
291 case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2);
292 return ret;
293 case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4);
294 return ret;
295 case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8);
296 return ret;
297 case 10:
298 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10);
299 if (unlikely(ret)) return ret;
300 asm("":::"memory");
301 __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2);
302 return ret;
303 case 16:
304 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16);
305 if (unlikely(ret)) return ret;
306 asm("":::"memory");
307 __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8);
308 return ret;
309 default:
310 return copy_user_generic((__force void *)dst,src,size);
315 static inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
317 int ret = 0;
318 if (!__builtin_constant_p(size))
319 return copy_user_generic((__force void *)dst,(__force void *)src,size);
320 switch (size) {
321 case 1: {
322 u8 tmp;
323 __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1);
324 if (likely(!ret))
325 __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1);
326 return ret;
328 case 2: {
329 u16 tmp;
330 __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2);
331 if (likely(!ret))
332 __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2);
333 return ret;
336 case 4: {
337 u32 tmp;
338 __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4);
339 if (likely(!ret))
340 __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4);
341 return ret;
343 case 8: {
344 u64 tmp;
345 __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8);
346 if (likely(!ret))
347 __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8);
348 return ret;
350 default:
351 return copy_user_generic((__force void *)dst,(__force void *)src,size);
355 long strncpy_from_user(char *dst, const char __user *src, long count);
356 long __strncpy_from_user(char *dst, const char __user *src, long count);
357 long strnlen_user(const char __user *str, long n);
358 long strlen_user(const char __user *str);
359 unsigned long clear_user(void __user *mem, unsigned long len);
360 unsigned long __clear_user(void __user *mem, unsigned long len);
362 #define __copy_to_user_inatomic __copy_to_user
363 #define __copy_from_user_inatomic __copy_from_user
365 #endif /* __X86_64_UACCESS_H */