security: revalidate rw permissions for sys_splice and sys_vmsplice
[linux-2.6.22.y-op.git] / include / asm-m68k / page.h
blob9e6d0d6debdb39e5617902ed6231047edc0bb743
1 #ifndef _M68K_PAGE_H
2 #define _M68K_PAGE_H
5 #ifdef __KERNEL__
7 /* PAGE_SHIFT determines the page size */
8 #ifndef CONFIG_SUN3
9 #define PAGE_SHIFT (12)
10 #else
11 #define PAGE_SHIFT (13)
12 #endif
13 #ifdef __ASSEMBLY__
14 #define PAGE_SIZE (1 << PAGE_SHIFT)
15 #else
16 #define PAGE_SIZE (1UL << PAGE_SHIFT)
17 #endif
18 #define PAGE_MASK (~(PAGE_SIZE-1))
20 #include <asm/setup.h>
22 #if PAGE_SHIFT < 13
23 #define THREAD_SIZE (8192)
24 #else
25 #define THREAD_SIZE PAGE_SIZE
26 #endif
28 #ifndef __ASSEMBLY__
30 #include <asm/module.h>
32 #define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
33 #define free_user_page(page, addr) free_page(addr)
36 * We don't need to check for alignment etc.
38 #ifdef CPU_M68040_OR_M68060_ONLY
39 static inline void copy_page(void *to, void *from)
41 unsigned long tmp;
43 __asm__ __volatile__("1:\t"
44 ".chip 68040\n\t"
45 "move16 %1@+,%0@+\n\t"
46 "move16 %1@+,%0@+\n\t"
47 ".chip 68k\n\t"
48 "dbra %2,1b\n\t"
49 : "=a" (to), "=a" (from), "=d" (tmp)
50 : "0" (to), "1" (from) , "2" (PAGE_SIZE / 32 - 1)
54 static inline void clear_page(void *page)
56 unsigned long tmp;
57 unsigned long *sp = page;
59 *sp++ = 0;
60 *sp++ = 0;
61 *sp++ = 0;
62 *sp++ = 0;
64 __asm__ __volatile__("1:\t"
65 ".chip 68040\n\t"
66 "move16 %2@+,%0@+\n\t"
67 ".chip 68k\n\t"
68 "subqw #8,%2\n\t"
69 "subqw #8,%2\n\t"
70 "dbra %1,1b\n\t"
71 : "=a" (sp), "=d" (tmp)
72 : "a" (page), "0" (sp),
73 "1" ((PAGE_SIZE - 16) / 16 - 1));
76 #else
77 #define clear_page(page) memset((page), 0, PAGE_SIZE)
78 #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
79 #endif
81 #define clear_user_page(addr, vaddr, page) \
82 do { clear_page(addr); \
83 flush_dcache_page(page); \
84 } while (0)
85 #define copy_user_page(to, from, vaddr, page) \
86 do { copy_page(to, from); \
87 flush_dcache_page(page); \
88 } while (0)
91 * These are used to make use of C type-checking..
93 typedef struct { unsigned long pte; } pte_t;
94 typedef struct { unsigned long pmd[16]; } pmd_t;
95 typedef struct { unsigned long pgd; } pgd_t;
96 typedef struct { unsigned long pgprot; } pgprot_t;
98 #define pte_val(x) ((x).pte)
99 #define pmd_val(x) ((&x)->pmd[0])
100 #define pgd_val(x) ((x).pgd)
101 #define pgprot_val(x) ((x).pgprot)
103 #define __pte(x) ((pte_t) { (x) } )
104 #define __pmd(x) ((pmd_t) { (x) } )
105 #define __pgd(x) ((pgd_t) { (x) } )
106 #define __pgprot(x) ((pgprot_t) { (x) } )
108 /* to align the pointer to the (next) page boundary */
109 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
111 #endif /* !__ASSEMBLY__ */
113 #include <asm/page_offset.h>
115 #define PAGE_OFFSET (PAGE_OFFSET_RAW)
117 #ifndef __ASSEMBLY__
119 extern unsigned long m68k_memoffset;
121 #ifndef CONFIG_SUN3
123 #define WANT_PAGE_VIRTUAL
125 static inline unsigned long ___pa(void *vaddr)
127 unsigned long paddr;
128 asm (
129 "1: addl #0,%0\n"
130 m68k_fixup(%c2, 1b+2)
131 : "=r" (paddr)
132 : "0" (vaddr), "i" (m68k_fixup_memoffset));
133 return paddr;
135 #define __pa(vaddr) ___pa((void *)(vaddr))
136 static inline void *__va(unsigned long paddr)
138 void *vaddr;
139 asm (
140 "1: subl #0,%0\n"
141 m68k_fixup(%c2, 1b+2)
142 : "=r" (vaddr)
143 : "0" (paddr), "i" (m68k_fixup_memoffset));
144 return vaddr;
147 #else /* !CONFIG_SUN3 */
148 /* This #define is a horrible hack to suppress lots of warnings. --m */
149 #define __pa(x) ___pa((unsigned long)(x))
150 static inline unsigned long ___pa(unsigned long x)
152 if(x == 0)
153 return 0;
154 if(x >= PAGE_OFFSET)
155 return (x-PAGE_OFFSET);
156 else
157 return (x+0x2000000);
160 static inline void *__va(unsigned long x)
162 if(x == 0)
163 return (void *)0;
165 if(x < 0x2000000)
166 return (void *)(x+PAGE_OFFSET);
167 else
168 return (void *)(x-0x2000000);
170 #endif /* CONFIG_SUN3 */
173 * NOTE: virtual isn't really correct, actually it should be the offset into the
174 * memory node, but we have no highmem, so that works for now.
175 * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
176 * of the shifts unnecessary.
178 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
179 #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
181 extern int m68k_virt_to_node_shift;
183 #ifdef CONFIG_SINGLE_MEMORY_CHUNK
184 #define __virt_to_node(addr) (&pg_data_map[0])
185 #else
186 extern struct pglist_data *pg_data_table[];
188 static inline __attribute_const__ int __virt_to_node_shift(void)
190 int shift;
192 asm (
193 "1: moveq #0,%0\n"
194 m68k_fixup(%c1, 1b)
195 : "=d" (shift)
196 : "i" (m68k_fixup_vnode_shift));
197 return shift;
200 #define __virt_to_node(addr) (pg_data_table[(unsigned long)(addr) >> __virt_to_node_shift()])
201 #endif
203 #define virt_to_page(addr) ({ \
204 pfn_to_page(virt_to_pfn(addr)); \
206 #define page_to_virt(page) ({ \
207 pfn_to_virt(page_to_pfn(page)); \
210 #define pfn_to_page(pfn) ({ \
211 unsigned long __pfn = (pfn); \
212 struct pglist_data *pgdat; \
213 pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn)); \
214 pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \
216 #define page_to_pfn(_page) ({ \
217 struct page *__p = (_page); \
218 struct pglist_data *pgdat; \
219 pgdat = &pg_data_map[page_to_nid(__p)]; \
220 ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
223 #define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_memory)
224 #define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
226 #endif /* __ASSEMBLY__ */
228 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
229 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
231 #include <asm-generic/page.h>
233 #endif /* __KERNEL__ */
235 #endif /* _M68K_PAGE_H */