x86: some lock annotations for user copy paths, v2
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-x86 / uaccess_64.h
blob13fd56fbc3aba9791d0fc5b9655f311eb4c8436c
1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
4 /*
5 * User space memory access functions
6 */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/prefetch.h>
10 #include <asm/page.h>
13 * Copy To/From Userspace
16 /* Handles exceptions in both to and from, but doesn't do access_ok */
17 __must_check unsigned long
18 copy_user_generic(void *to, const void *from, unsigned len);
20 __must_check unsigned long
21 copy_to_user(void __user *to, const void *from, unsigned len);
22 __must_check unsigned long
23 copy_from_user(void *to, const void __user *from, unsigned len);
24 __must_check unsigned long
25 copy_in_user(void __user *to, const void __user *from, unsigned len);
27 static __always_inline __must_check
28 int __copy_from_user(void *dst, const void __user *src, unsigned size)
30 int ret = 0;
32 might_fault();
33 if (!__builtin_constant_p(size))
34 return copy_user_generic(dst, (__force void *)src, size);
35 switch (size) {
36 case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
37 ret, "b", "b", "=q", 1);
38 return ret;
39 case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
40 ret, "w", "w", "=r", 2);
41 return ret;
42 case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
43 ret, "l", "k", "=r", 4);
44 return ret;
45 case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
46 ret, "q", "", "=r", 8);
47 return ret;
48 case 10:
49 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
50 ret, "q", "", "=r", 16);
51 if (unlikely(ret))
52 return ret;
53 __get_user_asm(*(u16 *)(8 + (char *)dst),
54 (u16 __user *)(8 + (char __user *)src),
55 ret, "w", "w", "=r", 2);
56 return ret;
57 case 16:
58 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
59 ret, "q", "", "=r", 16);
60 if (unlikely(ret))
61 return ret;
62 __get_user_asm(*(u64 *)(8 + (char *)dst),
63 (u64 __user *)(8 + (char __user *)src),
64 ret, "q", "", "=r", 8);
65 return ret;
66 default:
67 return copy_user_generic(dst, (__force void *)src, size);
71 static __always_inline __must_check
72 int __copy_to_user(void __user *dst, const void *src, unsigned size)
74 int ret = 0;
76 might_fault();
77 if (!__builtin_constant_p(size))
78 return copy_user_generic((__force void *)dst, src, size);
79 switch (size) {
80 case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
81 ret, "b", "b", "iq", 1);
82 return ret;
83 case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
84 ret, "w", "w", "ir", 2);
85 return ret;
86 case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
87 ret, "l", "k", "ir", 4);
88 return ret;
89 case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
90 ret, "q", "", "ir", 8);
91 return ret;
92 case 10:
93 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
94 ret, "q", "", "ir", 10);
95 if (unlikely(ret))
96 return ret;
97 asm("":::"memory");
98 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
99 ret, "w", "w", "ir", 2);
100 return ret;
101 case 16:
102 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
103 ret, "q", "", "ir", 16);
104 if (unlikely(ret))
105 return ret;
106 asm("":::"memory");
107 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
108 ret, "q", "", "ir", 8);
109 return ret;
110 default:
111 return copy_user_generic((__force void *)dst, src, size);
115 static __always_inline __must_check
116 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
118 int ret = 0;
120 might_fault();
121 if (!__builtin_constant_p(size))
122 return copy_user_generic((__force void *)dst,
123 (__force void *)src, size);
124 switch (size) {
125 case 1: {
126 u8 tmp;
127 __get_user_asm(tmp, (u8 __user *)src,
128 ret, "b", "b", "=q", 1);
129 if (likely(!ret))
130 __put_user_asm(tmp, (u8 __user *)dst,
131 ret, "b", "b", "iq", 1);
132 return ret;
134 case 2: {
135 u16 tmp;
136 __get_user_asm(tmp, (u16 __user *)src,
137 ret, "w", "w", "=r", 2);
138 if (likely(!ret))
139 __put_user_asm(tmp, (u16 __user *)dst,
140 ret, "w", "w", "ir", 2);
141 return ret;
144 case 4: {
145 u32 tmp;
146 __get_user_asm(tmp, (u32 __user *)src,
147 ret, "l", "k", "=r", 4);
148 if (likely(!ret))
149 __put_user_asm(tmp, (u32 __user *)dst,
150 ret, "l", "k", "ir", 4);
151 return ret;
153 case 8: {
154 u64 tmp;
155 __get_user_asm(tmp, (u64 __user *)src,
156 ret, "q", "", "=r", 8);
157 if (likely(!ret))
158 __put_user_asm(tmp, (u64 __user *)dst,
159 ret, "q", "", "ir", 8);
160 return ret;
162 default:
163 return copy_user_generic((__force void *)dst,
164 (__force void *)src, size);
168 __must_check long
169 strncpy_from_user(char *dst, const char __user *src, long count);
170 __must_check long
171 __strncpy_from_user(char *dst, const char __user *src, long count);
172 __must_check long strnlen_user(const char __user *str, long n);
173 __must_check long __strnlen_user(const char __user *str, long n);
174 __must_check long strlen_user(const char __user *str);
175 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
176 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
178 __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
179 unsigned size);
181 static __must_check __always_inline int
182 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
184 return copy_user_generic((__force void *)dst, src, size);
187 extern long __copy_user_nocache(void *dst, const void __user *src,
188 unsigned size, int zerorest);
190 static inline int __copy_from_user_nocache(void *dst, const void __user *src,
191 unsigned size)
193 might_sleep();
194 return __copy_user_nocache(dst, src, size, 1);
197 static inline int __copy_from_user_inatomic_nocache(void *dst,
198 const void __user *src,
199 unsigned size)
201 return __copy_user_nocache(dst, src, size, 0);
204 unsigned long
205 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
207 #endif /* __X86_64_UACCESS_H */