1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
5 * User space memory access functions
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/prefetch.h>
13 #define VERIFY_WRITE 1
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
20 * For historical reasons, these macros are grossly misnamed.
23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
25 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
26 #define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
28 #define get_ds() (KERNEL_DS)
29 #define get_fs() (current_thread_info()->addr_limit)
30 #define set_fs(x) (current_thread_info()->addr_limit = (x))
32 #define segment_eq(a, b) ((a).seg == (b).seg)
34 #define __addr_ok(addr) (!((unsigned long)(addr) & \
35 (current_thread_info()->addr_limit.seg)))
38 * Uhhuh, this needs 65-bit arithmetic. We have a carry..
40 #define __range_not_ok(addr, size) \
42 unsigned long flag, roksum; \
43 __chk_user_ptr(addr); \
44 asm("# range_ok\n\r" \
45 "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
46 : "=&r" (flag), "=r" (roksum) \
47 : "1" (addr), "g" ((long)(size)), \
48 "g" (current_thread_info()->addr_limit.seg)); \
52 #define access_ok(type, addr, size) (__range_not_ok(addr, size) == 0)
55 * The exception table consists of pairs of addresses: the first is the
56 * address of an instruction that is allowed to fault, and the second is
57 * the address at which the program should continue. No registers are
58 * modified, so it is entirely up to the continuation code to figure out
61 * All the routines below use bits of fixup code that are out of line
62 * with the main instruction path. This means when everything is well,
63 * we don't even have to jump over them. Further, they do not intrude
64 * on our cache or tlb entries.
67 struct exception_table_entry
{
68 unsigned long insn
, fixup
;
71 extern int fixup_exception(struct pt_regs
*regs
);
73 #define ARCH_HAS_SEARCH_EXTABLE
76 * These are the main single-value transfer routines. They automatically
77 * use the right size if we just have the right pointer type.
79 * This gets kind of ugly. We want to return _two_ values in "get_user()"
80 * and yet we don't want to do any pointers, because that is too much
81 * of a performance impact. Thus we have a few rather ugly macros here,
82 * and hide all the ugliness from the user.
84 * The "__xxx" versions of the user access functions are versions that
85 * do not verify the address space, that must have been done previously
86 * with a separate "access_ok()" call (this is used when we do multiple
87 * accesses to the same area of user memory).
90 #define __get_user_x(size, ret, x, ptr) \
91 asm volatile("call __get_user_" #size \
92 : "=a" (ret),"=d" (x) \
96 /* Careful: we have to cast the result to the type of the pointer
99 #define get_user(x, ptr) \
101 unsigned long __val_gu; \
103 __chk_user_ptr(ptr); \
104 switch (sizeof(*(ptr))) { \
106 __get_user_x(1, __ret_gu, __val_gu, ptr); \
109 __get_user_x(2, __ret_gu, __val_gu, ptr); \
112 __get_user_x(4, __ret_gu, __val_gu, ptr); \
115 __get_user_x(8, __ret_gu, __val_gu, ptr); \
121 (x) = (__force typeof(*(ptr)))__val_gu; \
125 extern void __put_user_1(void);
126 extern void __put_user_2(void);
127 extern void __put_user_4(void);
128 extern void __put_user_8(void);
129 extern void __put_user_bad(void);
131 #define __put_user_x(size, ret, x, ptr) \
132 asm volatile("call __put_user_" #size \
137 #define put_user(x, ptr) \
138 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
140 #define __get_user(x, ptr) \
141 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
142 #define __put_user(x, ptr) \
143 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
145 #define __get_user_unaligned __get_user
146 #define __put_user_unaligned __put_user
148 #define __put_user_nocheck(x, ptr, size) \
151 __put_user_size((x), (ptr), (size), __pu_err); \
156 #define __put_user_check(x, ptr, size) \
159 typeof(*(ptr)) __user *__pu_addr = (ptr); \
162 __put_user_x(1, __pu_err, x, __pu_addr); \
165 __put_user_x(2, __pu_err, x, __pu_addr); \
168 __put_user_x(4, __pu_err, x, __pu_addr); \
171 __put_user_x(8, __pu_err, x, __pu_addr); \
179 #define __put_user_size(x, ptr, size, retval) \
182 __chk_user_ptr(ptr); \
185 __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
188 __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
191 __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
194 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \
201 /* FIXME: this hack is definitely wrong -AK */
202 struct __large_struct
{ unsigned long buf
[100]; };
203 #define __m(x) (*(struct __large_struct __user *)(x))
206 * Tell gcc we read from memory instead of writing: this is because
207 * we do not write to any memory gcc knows about, so there are no
210 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
211 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
213 ".section .fixup, \"ax\"\n" \
217 _ASM_EXTABLE(1b, 3b) \
219 : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
222 #define __get_user_nocheck(x, ptr, size) \
225 unsigned long __gu_val; \
226 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
227 (x) = (__force typeof(*(ptr)))__gu_val; \
231 extern int __get_user_1(void);
232 extern int __get_user_2(void);
233 extern int __get_user_4(void);
234 extern int __get_user_8(void);
235 extern int __get_user_bad(void);
237 #define __get_user_size(x, ptr, size, retval) \
240 __chk_user_ptr(ptr); \
243 __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
246 __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
249 __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
252 __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
255 (x) = __get_user_bad(); \
259 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
260 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
262 ".section .fixup, \"ax\"\n" \
264 " xor"itype" %"rtype"1,%"rtype"1\n" \
267 _ASM_EXTABLE(1b, 3b) \
268 : "=r" (err), ltype (x) \
269 : "m" (__m(addr)), "i"(errno), "0"(err))
272 * Copy To/From Userspace
275 /* Handles exceptions in both to and from, but doesn't do access_ok */
276 __must_check
unsigned long
277 copy_user_generic(void *to
, const void *from
, unsigned len
);
279 __must_check
unsigned long
280 copy_to_user(void __user
*to
, const void *from
, unsigned len
);
281 __must_check
unsigned long
282 copy_from_user(void *to
, const void __user
*from
, unsigned len
);
283 __must_check
unsigned long
284 copy_in_user(void __user
*to
, const void __user
*from
, unsigned len
);
286 static __always_inline __must_check
287 int __copy_from_user(void *dst
, const void __user
*src
, unsigned size
)
290 if (!__builtin_constant_p(size
))
291 return copy_user_generic(dst
, (__force
void *)src
, size
);
293 case 1:__get_user_asm(*(u8
*)dst
, (u8 __user
*)src
,
294 ret
, "b", "b", "=q", 1);
296 case 2:__get_user_asm(*(u16
*)dst
, (u16 __user
*)src
,
297 ret
, "w", "w", "=r", 2);
299 case 4:__get_user_asm(*(u32
*)dst
, (u32 __user
*)src
,
300 ret
, "l", "k", "=r", 4);
302 case 8:__get_user_asm(*(u64
*)dst
, (u64 __user
*)src
,
303 ret
, "q", "", "=r", 8);
306 __get_user_asm(*(u64
*)dst
, (u64 __user
*)src
,
307 ret
, "q", "", "=r", 16);
310 __get_user_asm(*(u16
*)(8 + (char *)dst
),
311 (u16 __user
*)(8 + (char __user
*)src
),
312 ret
, "w", "w", "=r", 2);
315 __get_user_asm(*(u64
*)dst
, (u64 __user
*)src
,
316 ret
, "q", "", "=r", 16);
319 __get_user_asm(*(u64
*)(8 + (char *)dst
),
320 (u64 __user
*)(8 + (char __user
*)src
),
321 ret
, "q", "", "=r", 8);
324 return copy_user_generic(dst
, (__force
void *)src
, size
);
328 static __always_inline __must_check
329 int __copy_to_user(void __user
*dst
, const void *src
, unsigned size
)
332 if (!__builtin_constant_p(size
))
333 return copy_user_generic((__force
void *)dst
, src
, size
);
335 case 1:__put_user_asm(*(u8
*)src
, (u8 __user
*)dst
,
336 ret
, "b", "b", "iq", 1);
338 case 2:__put_user_asm(*(u16
*)src
, (u16 __user
*)dst
,
339 ret
, "w", "w", "ir", 2);
341 case 4:__put_user_asm(*(u32
*)src
, (u32 __user
*)dst
,
342 ret
, "l", "k", "ir", 4);
344 case 8:__put_user_asm(*(u64
*)src
, (u64 __user
*)dst
,
345 ret
, "q", "", "ir", 8);
348 __put_user_asm(*(u64
*)src
, (u64 __user
*)dst
,
349 ret
, "q", "", "ir", 10);
353 __put_user_asm(4[(u16
*)src
], 4 + (u16 __user
*)dst
,
354 ret
, "w", "w", "ir", 2);
357 __put_user_asm(*(u64
*)src
, (u64 __user
*)dst
,
358 ret
, "q", "", "ir", 16);
362 __put_user_asm(1[(u64
*)src
], 1 + (u64 __user
*)dst
,
363 ret
, "q", "", "ir", 8);
366 return copy_user_generic((__force
void *)dst
, src
, size
);
370 static __always_inline __must_check
371 int __copy_in_user(void __user
*dst
, const void __user
*src
, unsigned size
)
374 if (!__builtin_constant_p(size
))
375 return copy_user_generic((__force
void *)dst
,
376 (__force
void *)src
, size
);
380 __get_user_asm(tmp
, (u8 __user
*)src
,
381 ret
, "b", "b", "=q", 1);
383 __put_user_asm(tmp
, (u8 __user
*)dst
,
384 ret
, "b", "b", "iq", 1);
389 __get_user_asm(tmp
, (u16 __user
*)src
,
390 ret
, "w", "w", "=r", 2);
392 __put_user_asm(tmp
, (u16 __user
*)dst
,
393 ret
, "w", "w", "ir", 2);
399 __get_user_asm(tmp
, (u32 __user
*)src
,
400 ret
, "l", "k", "=r", 4);
402 __put_user_asm(tmp
, (u32 __user
*)dst
,
403 ret
, "l", "k", "ir", 4);
408 __get_user_asm(tmp
, (u64 __user
*)src
,
409 ret
, "q", "", "=r", 8);
411 __put_user_asm(tmp
, (u64 __user
*)dst
,
412 ret
, "q", "", "ir", 8);
416 return copy_user_generic((__force
void *)dst
,
417 (__force
void *)src
, size
);
422 strncpy_from_user(char *dst
, const char __user
*src
, long count
);
424 __strncpy_from_user(char *dst
, const char __user
*src
, long count
);
425 __must_check
long strnlen_user(const char __user
*str
, long n
);
426 __must_check
long __strnlen_user(const char __user
*str
, long n
);
427 __must_check
long strlen_user(const char __user
*str
);
428 __must_check
unsigned long clear_user(void __user
*mem
, unsigned long len
);
429 __must_check
unsigned long __clear_user(void __user
*mem
, unsigned long len
);
431 __must_check
long __copy_from_user_inatomic(void *dst
, const void __user
*src
,
434 static __must_check __always_inline
int
435 __copy_to_user_inatomic(void __user
*dst
, const void *src
, unsigned size
)
437 return copy_user_generic((__force
void *)dst
, src
, size
);
440 #define ARCH_HAS_NOCACHE_UACCESS 1
441 extern long __copy_user_nocache(void *dst
, const void __user
*src
,
442 unsigned size
, int zerorest
);
444 static inline int __copy_from_user_nocache(void *dst
, const void __user
*src
,
448 return __copy_user_nocache(dst
, src
, size
, 1);
451 static inline int __copy_from_user_inatomic_nocache(void *dst
,
452 const void __user
*src
,
455 return __copy_user_nocache(dst
, src
, size
, 0);
458 #endif /* __X86_64_UACCESS_H */