[PATCH] sparse: trivial parts of fs/* annotation
[linux-2.6/history.git] / include / asm-parisc / uaccess.h
blobf147bac9db50b9691fb6e990b379e1e98c84c9ef
1 #ifndef __PARISC_UACCESS_H
2 #define __PARISC_UACCESS_H
4 /*
5 * User space memory access functions
6 */
7 #include <linux/sched.h>
8 #include <asm/page.h>
9 #include <asm/system.h>
10 #include <asm/cache.h>
12 #define VERIFY_READ 0
13 #define VERIFY_WRITE 1
15 #define KERNEL_DS ((mm_segment_t){0})
16 #define USER_DS ((mm_segment_t){1})
18 #define segment_eq(a,b) ((a).seg == (b).seg)
20 #define get_ds() (KERNEL_DS)
21 #define get_fs() (current_thread_info()->addr_limit)
22 #define set_fs(x) (current_thread_info()->addr_limit = (x))
25 * Note that since kernel addresses are in a separate address space on
26 * parisc, we don't need to do anything for access_ok() or verify_area().
27 * We just let the page fault handler do the right thing. This also means
28 * that put_user is the same as __put_user, etc.
31 extern int __get_kernel_bad(void);
32 extern int __get_user_bad(void);
33 extern int __put_kernel_bad(void);
34 extern int __put_user_bad(void);
36 #define access_ok(type,addr,size) (1)
37 #define verify_area(type,addr,size) (0)
39 #define put_user __put_user
40 #define get_user __get_user
42 #if BITS_PER_LONG == 32
43 #define LDD_KERNEL(ptr) __get_kernel_bad();
44 #define LDD_USER(ptr) __get_user_bad();
45 #define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr)
46 #define STD_USER(x, ptr) __put_user_asm64(x,ptr)
47 #else
48 #define LDD_KERNEL(ptr) __get_kernel_asm("ldd",ptr)
49 #define LDD_USER(ptr) __get_user_asm("ldd",ptr)
50 #define STD_KERNEL(x, ptr) __put_kernel_asm("std",x,ptr)
51 #define STD_USER(x, ptr) __put_user_asm("std",x,ptr)
52 #endif
55 * The exception table contains two values: the first is an address
56 * for an instruction that is allowed to fault, and the second is
57 * the number of bytes to skip if a fault occurs. We also support in
58 * two bit flags: 0x2 tells the exception handler to clear register
59 * r9 and 0x1 tells the exception handler to put -EFAULT in r8.
60 * This allows us to handle the simple cases for put_user and
61 * get_user without having to have .fixup sections.
64 struct exception_table_entry {
65 unsigned long insn; /* address of insn that is allowed to fault. */
66 long skip; /* pcoq skip | r9 clear flag | r8 -EFAULT flag */
69 #define __get_user(x,ptr) \
70 ({ \
71 register long __gu_err __asm__ ("r8") = 0; \
72 register long __gu_val __asm__ ("r9") = 0; \
74 if (segment_eq(get_fs(),KERNEL_DS)) { \
75 switch (sizeof(*(ptr))) { \
76 case 1: __get_kernel_asm("ldb",ptr); break; \
77 case 2: __get_kernel_asm("ldh",ptr); break; \
78 case 4: __get_kernel_asm("ldw",ptr); break; \
79 case 8: LDD_KERNEL(ptr); break; \
80 default: __get_kernel_bad(); break; \
81 } \
82 } \
83 else { \
84 switch (sizeof(*(ptr))) { \
85 case 1: __get_user_asm("ldb",ptr); break; \
86 case 2: __get_user_asm("ldh",ptr); break; \
87 case 4: __get_user_asm("ldw",ptr); break; \
88 case 8: LDD_USER(ptr); break; \
89 default: __get_user_bad(); break; \
90 } \
91 } \
93 (x) = (__typeof__(*(ptr))) __gu_val; \
94 __gu_err; \
97 #ifdef __LP64__
98 #define __get_kernel_asm(ldx,ptr) \
99 __asm__("\n1:\t" ldx "\t0(%2),%0\n" \
100 "2:\n" \
101 "\t.section __ex_table,\"aw\"\n" \
102 "\t.dword\t1b\n" \
103 "\t.dword\t(2b-1b)+3\n" \
104 "\t.previous" \
105 : "=r"(__gu_val), "=r"(__gu_err) \
106 : "r"(ptr), "1"(__gu_err));
108 #define __get_user_asm(ldx,ptr) \
109 __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n" \
110 "2:\n" \
111 "\t.section __ex_table,\"aw\"\n" \
112 "\t.dword\t1b\n" \
113 "\t.dword\t(2b-1b)+3\n" \
114 "\t.previous" \
115 : "=r"(__gu_val), "=r"(__gu_err) \
116 : "r"(ptr), "1"(__gu_err));
117 #else
118 #define __get_kernel_asm(ldx,ptr) \
119 __asm__("\n1:\t" ldx "\t0(%2),%0\n" \
120 "2:\n" \
121 "\t.section __ex_table,\"aw\"\n" \
122 "\t.word\t1b\n" \
123 "\t.word\t(2b-1b)+3\n" \
124 "\t.previous" \
125 : "=r"(__gu_val), "=r"(__gu_err) \
126 : "r"(ptr), "1"(__gu_err));
128 #define __get_user_asm(ldx,ptr) \
129 __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n" \
130 "2:\n" \
131 "\t.section __ex_table,\"aw\"\n" \
132 "\t.word\t1b\n" \
133 "\t.word\t(2b-1b)+3\n" \
134 "\t.previous" \
135 : "=r"(__gu_val), "=r"(__gu_err) \
136 : "r"(ptr), "1"(__gu_err));
137 #endif /* !__LP64__ */
139 #define __put_user(x,ptr) \
140 ({ \
141 register long __pu_err __asm__ ("r8") = 0; \
143 if (segment_eq(get_fs(),KERNEL_DS)) { \
144 switch (sizeof(*(ptr))) { \
145 case 1: __put_kernel_asm("stb",x,ptr); break; \
146 case 2: __put_kernel_asm("sth",x,ptr); break; \
147 case 4: __put_kernel_asm("stw",x,ptr); break; \
148 case 8: STD_KERNEL(x,ptr); break; \
149 default: __put_kernel_bad(); break; \
152 else { \
153 switch (sizeof(*(ptr))) { \
154 case 1: __put_user_asm("stb",x,ptr); break; \
155 case 2: __put_user_asm("sth",x,ptr); break; \
156 case 4: __put_user_asm("stw",x,ptr); break; \
157 case 8: STD_USER(x,ptr); break; \
158 default: __put_user_bad(); break; \
162 __pu_err; \
166 * The "__put_user/kernel_asm()" macros tell gcc they read from memory
167 * instead of writing. This is because they do not write to any memory
168 * gcc knows about, so there are no aliasing issues.
171 #ifdef __LP64__
172 #define __put_kernel_asm(stx,x,ptr) \
173 __asm__ __volatile__ ( \
174 "\n1:\t" stx "\t%2,0(%1)\n" \
175 "2:\n" \
176 "\t.section __ex_table,\"aw\"\n" \
177 "\t.dword\t1b\n" \
178 "\t.dword\t(2b-1b)+1\n" \
179 "\t.previous" \
180 : "=r"(__pu_err) \
181 : "r"(ptr), "r"(x), "0"(__pu_err))
183 #define __put_user_asm(stx,x,ptr) \
184 __asm__ __volatile__ ( \
185 "\n1:\t" stx "\t%2,0(%%sr3,%1)\n" \
186 "2:\n" \
187 "\t.section __ex_table,\"aw\"\n" \
188 "\t.dword\t1b\n" \
189 "\t.dword\t(2b-1b)+1\n" \
190 "\t.previous" \
191 : "=r"(__pu_err) \
192 : "r"(ptr), "r"(x), "0"(__pu_err))
193 #else
194 #define __put_kernel_asm(stx,x,ptr) \
195 __asm__ __volatile__ ( \
196 "\n1:\t" stx "\t%2,0(%1)\n" \
197 "2:\n" \
198 "\t.section __ex_table,\"aw\"\n" \
199 "\t.word\t1b\n" \
200 "\t.word\t(2b-1b)+1\n" \
201 "\t.previous" \
202 : "=r"(__pu_err) \
203 : "r"(ptr), "r"(x), "0"(__pu_err))
205 #define __put_user_asm(stx,x,ptr) \
206 __asm__ __volatile__ ( \
207 "\n1:\t" stx "\t%2,0(%%sr3,%1)\n" \
208 "2:\n" \
209 "\t.section __ex_table,\"aw\"\n" \
210 "\t.word\t1b\n" \
211 "\t.word\t(2b-1b)+1\n" \
212 "\t.previous" \
213 : "=r"(__pu_err) \
214 : "r"(ptr), "r"(x), "0"(__pu_err))
216 static inline void __put_kernel_asm64(u64 x, void *ptr)
218 u32 hi = x>>32;
219 u32 lo = x&0xffffffff;
220 __asm__ __volatile__ (
221 "\n1:\tstw %1,0(%0)\n"
222 "\n2:\tstw %2,4(%0)\n"
223 "3:\n"
224 "\t.section __ex_table,\"aw\"\n"
225 "\t.word\t1b\n"
226 "\t.word\t(3b-1b)+1\n"
227 "\t.word\t2b\n"
228 "\t.word\t(3b-2b)+1\n"
229 "\t.previous"
230 : : "r"(ptr), "r"(hi), "r"(lo));
234 static inline void __put_user_asm64(u64 x, void *ptr)
236 u32 hi = x>>32;
237 u32 lo = x&0xffffffff;
238 __asm__ __volatile__ (
239 "\n1:\tstw %1,0(%%sr3,%0)\n"
240 "\n2:\tstw %2,4(%%sr3,%0)\n"
241 "3:\n"
242 "\t.section __ex_table,\"aw\"\n"
243 "\t.word\t1b\n"
244 "\t.word\t(3b-1b)+1\n"
245 "\t.word\t2b\n"
246 "\t.word\t(3b-2b)+1\n"
247 "\t.previous"
248 : : "r"(ptr), "r"(hi), "r"(lo));
252 #endif /* !__LP64__ */
256 * Complex access routines -- external declarations
259 extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
260 extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
261 extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
262 extern long lstrncpy_from_user(char *, const char __user *, long);
263 extern unsigned lclear_user(void __user *,unsigned long);
264 extern long lstrnlen_user(const char __user *,long);
267 * Complex access routines -- macros
270 #define strncpy_from_user lstrncpy_from_user
271 #define strnlen_user lstrnlen_user
272 #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
273 #define clear_user lclear_user
274 #define __clear_user lclear_user
276 #define copy_from_user lcopy_from_user
277 #define __copy_from_user lcopy_from_user
278 #define copy_to_user lcopy_to_user
279 #define __copy_to_user lcopy_to_user
280 #define copy_in_user lcopy_in_user
281 #define __copy_in_user lcopy_in_user
283 #endif /* __PARISC_UACCESS_H */