- Alan Cox: synch. PA-RISC arch and bitops cleanups
[davej-history.git] / include / asm-parisc / uaccess.h
blob0d825d0ce86dfb40e2543c42f998021d58809526
1 #ifndef __PARISC_UACCESS_H
2 #define __PARISC_UACCESS_H
4 /*
5 * User space memory access functions
6 */
7 #include <linux/config.h>
8 #include <linux/sched.h>
9 #include <asm/page.h>
10 #include <asm/system.h>
11 #include <asm/cache.h>
13 #define VERIFY_READ 0
14 #define VERIFY_WRITE 1
16 #define KERNEL_DS ((mm_segment_t){0})
17 #define USER_DS ((mm_segment_t){1})
19 #define segment_eq(a,b) ((a).seg == (b).seg)
21 #define get_ds() (KERNEL_DS)
22 #define get_fs() (current->addr_limit)
23 #define set_fs(x) (current->addr_limit = (x))
26 * Note that since kernel addresses are in a separate address space on
27 * parisc, we don't need to do anything for access_ok() or verify_area().
28 * We just let the page fault handler do the right thing. This also means
29 * that put_user is the same as __put_user, etc.
32 #define access_ok(type,addr,size) (1)
33 #define verify_area(type,addr,size) (0)
35 #define put_user __put_user
36 #define get_user __get_user
39 * The exception table contains two values: the first is an address
40 * for an instruction that is allowed to fault, and the second is
41 * the number of bytes to skip if a fault occurs. We also support in
42 * two bit flags: 0x2 tells the exception handler to clear register
43 * r9 and 0x1 tells the exception handler to put -EFAULT in r8.
44 * This allows us to handle the simple cases for put_user and
45 * get_user without having to have .fixup sections.
48 struct exception_table_entry {
49 unsigned long addr; /* address of insn that is allowed to fault. */
50 int skip; /* pcoq skip | r9 clear flag | r8 -EFAULT flag */
53 extern const struct exception_table_entry
54 *search_exception_table(unsigned long addr);
56 #define __get_user(x,ptr) \
57 ({ \
58 register long __gu_err __asm__ ("r8") = 0; \
59 register long __gu_val __asm__ ("r9") = 0; \
61 if (segment_eq(get_fs(),KERNEL_DS)) { \
62 switch (sizeof(*(ptr))) { \
63 case 1: __get_kernel_asm("ldb",ptr); break; \
64 case 2: __get_kernel_asm("ldh",ptr); break; \
65 case 4: __get_kernel_asm("ldw",ptr); break; \
66 case 8: __get_kernel_asm("ldd",ptr); break; \
67 default: BUG(); break; \
68 } \
69 } \
70 else { \
71 switch (sizeof(*(ptr))) { \
72 case 1: __get_user_asm("ldb",ptr); break; \
73 case 2: __get_user_asm("ldh",ptr); break; \
74 case 4: __get_user_asm("ldw",ptr); break; \
75 case 8: __get_user_asm("ldd",ptr); break; \
76 default: BUG(); break; \
77 } \
78 } \
80 (x) = (__typeof__(*(ptr))) __gu_val; \
81 __gu_err; \
84 #define __get_kernel_asm(ldx,ptr) \
85 __asm__("\n1:\t" ldx "\t0(%2),%0\n" \
86 "2:\n" \
87 "\t.section __ex_table,\"a\"\n" \
88 "\t.word\t1b\n" \
89 "\t.word\t(2b-1b)+3\n" \
90 "\t.previous" \
91 : "=r"(__gu_val), "=r"(__gu_err) \
92 : "r"(ptr), "1"(__gu_err));
94 #define __get_user_asm(ldx,ptr) \
95 __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n" \
96 "2:\n" \
97 "\t.section __ex_table,\"a\"\n" \
98 "\t.word\t1b\n" \
99 "\t.word\t(2b-1b)+3\n" \
100 "\t.previous" \
101 : "=r"(__gu_val), "=r"(__gu_err) \
102 : "r"(ptr), "1"(__gu_err));
105 #define __put_user(x,ptr) \
106 ({ \
107 register long __pu_err __asm__ ("r8") = 0; \
109 if (segment_eq(get_fs(),KERNEL_DS)) { \
110 switch (sizeof(*(ptr))) { \
111 case 1: __put_kernel_asm("stb",x,ptr); break; \
112 case 2: __put_kernel_asm("sth",x,ptr); break; \
113 case 4: __put_kernel_asm("stw",x,ptr); break; \
114 case 8: __put_kernel_asm("std",x,ptr); break; \
115 default: BUG(); break; \
118 else { \
119 switch (sizeof(*(ptr))) { \
120 case 1: __put_user_asm("stb",x,ptr); break; \
121 case 2: __put_user_asm("sth",x,ptr); break; \
122 case 4: __put_user_asm("stw",x,ptr); break; \
123 case 8: __put_user_asm("std",x,ptr); break; \
124 default: BUG(); break; \
128 __pu_err; \
132 * The "__put_user/kernel_asm()" macros tell gcc they read from memory
133 * instead of writing. This is because they do not write to any memory
134 * gcc knows about, so there are no aliasing issues.
137 #define __put_kernel_asm(stx,x,ptr) \
138 __asm__ __volatile__ ( \
139 "\n1:\t" stx "\t%2,0(%1)\n" \
140 "2:\n" \
141 "\t.section __ex_table,\"a\"\n" \
142 "\t.word\t1b\n" \
143 "\t.word\t(2b-1b)+1\n" \
144 "\t.previous" \
145 : "=r"(__pu_err) \
146 : "r"(ptr), "r"(x), "0"(__pu_err))
148 #define __put_user_asm(stx,x,ptr) \
149 __asm__ __volatile__ ( \
150 "\n1:\t" stx "\t%2,0(%%sr3,%1)\n" \
151 "2:\n" \
152 "\t.section __ex_table,\"a\"\n" \
153 "\t.word\t1b\n" \
154 "\t.word\t(2b-1b)+1\n" \
155 "\t.previous" \
156 : "=r"(__pu_err) \
157 : "r"(ptr), "r"(x), "0"(__pu_err))
161 * Complex access routines -- external declarations
164 extern unsigned long lcopy_to_user(void *, const void *, unsigned long);
165 extern unsigned long lcopy_from_user(void *, const void *, unsigned long);
166 extern long lstrncpy_from_user(char *, const char *, long);
167 extern unsigned lclear_user(void *,unsigned long);
168 extern long lstrnlen_user(const char *,long);
171 * Complex access routines -- macros
174 #define strncpy_from_user lstrncpy_from_user
175 #define strnlen_user lstrnlen_user
176 #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
177 #define clear_user lclear_user
179 #define copy_from_user lcopy_from_user
180 #define __copy_from_user lcopy_from_user
181 #define copy_to_user lcopy_to_user
182 #define __copy_to_user lcopy_to_user
184 #define copy_to_user_ret(to,from,n,retval) \
185 ({ if (lcopy_to_user(to,from,n)) return retval; })
187 #define copy_from_user_ret(to,from,n,retval) \
188 ({ if (lcopy_from_user(to,from,n)) return retval; })
190 #endif /* __PARISC_UACCESS_H */