1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
5 * User space memory access functions
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/prefetch.h>
13 #define VERIFY_WRITE 1
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
20 * For historical reasons, these macros are grossly misnamed.
23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
25 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
26 #define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
28 #define get_ds() (KERNEL_DS)
29 #define get_fs() (current_thread_info()->addr_limit)
30 #define set_fs(x) (current_thread_info()->addr_limit = (x))
32 #define segment_eq(a,b) ((a).seg == (b).seg)
34 #define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg)))
37 * Uhhuh, this needs 65-bit arithmetic. We have a carry..
39 #define __range_not_ok(addr,size) ({ \
40 unsigned long flag,sum; \
41 __chk_user_ptr(addr); \
42 asm("# range_ok\n\r" \
43 "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
44 :"=&r" (flag), "=r" (sum) \
45 :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \
48 #define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0)
51 * The exception table consists of pairs of addresses: the first is the
52 * address of an instruction that is allowed to fault, and the second is
53 * the address at which the program should continue. No registers are
54 * modified, so it is entirely up to the continuation code to figure out
57 * All the routines below use bits of fixup code that are out of line
58 * with the main instruction path. This means when everything is well,
59 * we don't even have to jump over them. Further, they do not intrude
60 * on our cache or tlb entries.
63 struct exception_table_entry
65 unsigned long insn
, fixup
;
68 #define ARCH_HAS_SEARCH_EXTABLE
71 * These are the main single-value transfer routines. They automatically
72 * use the right size if we just have the right pointer type.
74 * This gets kind of ugly. We want to return _two_ values in "get_user()"
75 * and yet we don't want to do any pointers, because that is too much
76 * of a performance impact. Thus we have a few rather ugly macros here,
77 * and hide all the ugliness from the user.
79 * The "__xxx" versions of the user access functions are versions that
80 * do not verify the address space, that must have been done previously
81 * with a separate "access_ok()" call (this is used when we do multiple
82 * accesses to the same area of user memory).
85 #define __get_user_x(size,ret,x,ptr) \
86 asm volatile("call __get_user_" #size \
87 :"=a" (ret),"=d" (x) \
91 /* Careful: we have to cast the result to the type of the pointer for sign reasons */
92 #define get_user(x,ptr) \
93 ({ unsigned long __val_gu; \
95 __chk_user_ptr(ptr); \
96 switch(sizeof (*(ptr))) { \
97 case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
98 case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
99 case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
100 case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \
101 default: __get_user_bad(); break; \
103 (x) = (typeof(*(ptr)))__val_gu; \
107 extern void __put_user_1(void);
108 extern void __put_user_2(void);
109 extern void __put_user_4(void);
110 extern void __put_user_8(void);
111 extern void __put_user_bad(void);
113 #define __put_user_x(size,ret,x,ptr) \
114 asm volatile("call __put_user_" #size \
119 #define put_user(x,ptr) \
120 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
122 #define __get_user(x,ptr) \
123 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
124 #define __put_user(x,ptr) \
125 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
127 #define __get_user_unaligned __get_user
128 #define __put_user_unaligned __put_user
130 #define __put_user_nocheck(x,ptr,size) \
133 __put_user_size((x),(ptr),(size),__pu_err); \
138 #define __put_user_check(x,ptr,size) \
141 typeof(*(ptr)) __user *__pu_addr = (ptr); \
143 case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \
144 case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \
145 case 4: __put_user_x(4,__pu_err,x,__pu_addr); break; \
146 case 8: __put_user_x(8,__pu_err,x,__pu_addr); break; \
147 default: __put_user_bad(); \
152 #define __put_user_size(x,ptr,size,retval) \
155 __chk_user_ptr(ptr); \
157 case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\
158 case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\
159 case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\
160 case 8: __put_user_asm(x,ptr,retval,"q","","Zr",-EFAULT); break;\
161 default: __put_user_bad(); \
165 /* FIXME: this hack is definitely wrong -AK */
166 struct __large_struct
{ unsigned long buf
[100]; };
167 #define __m(x) (*(struct __large_struct __user *)(x))
170 * Tell gcc we read from memory instead of writing: this is because
171 * we do not write to any memory gcc knows about, so there are no
174 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
176 "1: mov"itype" %"rtype"1,%2\n" \
178 ".section .fixup,\"ax\"\n" \
182 ".section __ex_table,\"a\"\n" \
187 : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err))
190 #define __get_user_nocheck(x,ptr,size) \
193 unsigned long __gu_val; \
194 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
195 (x) = (typeof(*(ptr)))__gu_val; \
199 extern int __get_user_1(void);
200 extern int __get_user_2(void);
201 extern int __get_user_4(void);
202 extern int __get_user_8(void);
203 extern int __get_user_bad(void);
205 #define __get_user_size(x,ptr,size,retval) \
208 __chk_user_ptr(ptr); \
210 case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\
211 case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\
212 case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\
213 case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\
214 default: (x) = __get_user_bad(); \
218 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
220 "1: mov"itype" %2,%"rtype"1\n" \
222 ".section .fixup,\"ax\"\n" \
224 " xor"itype" %"rtype"1,%"rtype"1\n" \
227 ".section __ex_table,\"a\"\n" \
231 : "=r"(err), ltype (x) \
232 : "m"(__m(addr)), "i"(errno), "0"(err))
235 * Copy To/From Userspace
238 /* Handles exceptions in both to and from, but doesn't do access_ok */
239 __must_check
unsigned long
240 copy_user_generic(void *to
, const void *from
, unsigned len
);
242 __must_check
unsigned long
243 copy_to_user(void __user
*to
, const void *from
, unsigned len
);
244 __must_check
unsigned long
245 copy_from_user(void *to
, const void __user
*from
, unsigned len
);
246 __must_check
unsigned long
247 copy_in_user(void __user
*to
, const void __user
*from
, unsigned len
);
249 static __always_inline __must_check
250 int __copy_from_user(void *dst
, const void __user
*src
, unsigned size
)
253 if (!__builtin_constant_p(size
))
254 return copy_user_generic(dst
,(__force
void *)src
,size
);
256 case 1:__get_user_asm(*(u8
*)dst
,(u8 __user
*)src
,ret
,"b","b","=q",1);
258 case 2:__get_user_asm(*(u16
*)dst
,(u16 __user
*)src
,ret
,"w","w","=r",2);
260 case 4:__get_user_asm(*(u32
*)dst
,(u32 __user
*)src
,ret
,"l","k","=r",4);
262 case 8:__get_user_asm(*(u64
*)dst
,(u64 __user
*)src
,ret
,"q","","=r",8);
265 __get_user_asm(*(u64
*)dst
,(u64 __user
*)src
,ret
,"q","","=r",16);
266 if (unlikely(ret
)) return ret
;
267 __get_user_asm(*(u16
*)(8+(char*)dst
),(u16 __user
*)(8+(char __user
*)src
),ret
,"w","w","=r",2);
270 __get_user_asm(*(u64
*)dst
,(u64 __user
*)src
,ret
,"q","","=r",16);
271 if (unlikely(ret
)) return ret
;
272 __get_user_asm(*(u64
*)(8+(char*)dst
),(u64 __user
*)(8+(char __user
*)src
),ret
,"q","","=r",8);
275 return copy_user_generic(dst
,(__force
void *)src
,size
);
279 static __always_inline __must_check
280 int __copy_to_user(void __user
*dst
, const void *src
, unsigned size
)
283 if (!__builtin_constant_p(size
))
284 return copy_user_generic((__force
void *)dst
,src
,size
);
286 case 1:__put_user_asm(*(u8
*)src
,(u8 __user
*)dst
,ret
,"b","b","iq",1);
288 case 2:__put_user_asm(*(u16
*)src
,(u16 __user
*)dst
,ret
,"w","w","ir",2);
290 case 4:__put_user_asm(*(u32
*)src
,(u32 __user
*)dst
,ret
,"l","k","ir",4);
292 case 8:__put_user_asm(*(u64
*)src
,(u64 __user
*)dst
,ret
,"q","","ir",8);
295 __put_user_asm(*(u64
*)src
,(u64 __user
*)dst
,ret
,"q","","ir",10);
296 if (unlikely(ret
)) return ret
;
298 __put_user_asm(4[(u16
*)src
],4+(u16 __user
*)dst
,ret
,"w","w","ir",2);
301 __put_user_asm(*(u64
*)src
,(u64 __user
*)dst
,ret
,"q","","ir",16);
302 if (unlikely(ret
)) return ret
;
304 __put_user_asm(1[(u64
*)src
],1+(u64 __user
*)dst
,ret
,"q","","ir",8);
307 return copy_user_generic((__force
void *)dst
,src
,size
);
311 static __always_inline __must_check
312 int __copy_in_user(void __user
*dst
, const void __user
*src
, unsigned size
)
315 if (!__builtin_constant_p(size
))
316 return copy_user_generic((__force
void *)dst
,(__force
void *)src
,size
);
320 __get_user_asm(tmp
,(u8 __user
*)src
,ret
,"b","b","=q",1);
322 __put_user_asm(tmp
,(u8 __user
*)dst
,ret
,"b","b","iq",1);
327 __get_user_asm(tmp
,(u16 __user
*)src
,ret
,"w","w","=r",2);
329 __put_user_asm(tmp
,(u16 __user
*)dst
,ret
,"w","w","ir",2);
335 __get_user_asm(tmp
,(u32 __user
*)src
,ret
,"l","k","=r",4);
337 __put_user_asm(tmp
,(u32 __user
*)dst
,ret
,"l","k","ir",4);
342 __get_user_asm(tmp
,(u64 __user
*)src
,ret
,"q","","=r",8);
344 __put_user_asm(tmp
,(u64 __user
*)dst
,ret
,"q","","ir",8);
348 return copy_user_generic((__force
void *)dst
,(__force
void *)src
,size
);
353 strncpy_from_user(char *dst
, const char __user
*src
, long count
);
355 __strncpy_from_user(char *dst
, const char __user
*src
, long count
);
356 __must_check
long strnlen_user(const char __user
*str
, long n
);
357 __must_check
long __strnlen_user(const char __user
*str
, long n
);
358 __must_check
long strlen_user(const char __user
*str
);
359 __must_check
unsigned long clear_user(void __user
*mem
, unsigned long len
);
360 __must_check
unsigned long __clear_user(void __user
*mem
, unsigned long len
);
362 __must_check
long __copy_from_user_inatomic(void *dst
, const void __user
*src
, unsigned size
);
364 static __must_check __always_inline
int
365 __copy_to_user_inatomic(void __user
*dst
, const void *src
, unsigned size
)
367 return copy_user_generic((__force
void *)dst
, src
, size
);
370 #endif /* __X86_64_UACCESS_H */