sparc: Kill custom io_remap_pfn_range().
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / sparc / include / asm / pgtable_64.h
blob38ebb2c601374a386192c061b50ea171c4f83373
1 /*
2 * pgtable.h: SpitFire page table operations.
4 * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
8 #ifndef _SPARC64_PGTABLE_H
9 #define _SPARC64_PGTABLE_H
11 /* This file contains the functions and defines necessary to modify and use
12 * the SpitFire page tables.
15 #include <asm-generic/pgtable-nopud.h>
17 #include <linux/compiler.h>
18 #include <linux/const.h>
19 #include <asm/types.h>
20 #include <asm/spitfire.h>
21 #include <asm/asi.h>
22 #include <asm/system.h>
23 #include <asm/page.h>
24 #include <asm/processor.h>
26 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
27 * The page copy blockops can use 0x6000000 to 0x8000000.
28 * The TSB is mapped in the 0x8000000 to 0xa000000 range.
29 * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
30 * The vmalloc area spans 0x100000000 to 0x200000000.
31 * Since modules need to be in the lowest 32-bits of the address space,
32 * we place them right before the OBP area from 0x10000000 to 0xf0000000.
33 * There is a single static kernel PMD which maps from 0x0 to address
34 * 0x400000000.
36 #define TLBTEMP_BASE _AC(0x0000000006000000,UL)
37 #define TSBMAP_BASE _AC(0x0000000008000000,UL)
38 #define MODULES_VADDR _AC(0x0000000010000000,UL)
39 #define MODULES_LEN _AC(0x00000000e0000000,UL)
40 #define MODULES_END _AC(0x00000000f0000000,UL)
41 #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
42 #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
43 #define VMALLOC_START _AC(0x0000000100000000,UL)
44 #define VMALLOC_END _AC(0x0000010000000000,UL)
45 #define VMEMMAP_BASE _AC(0x0000010000000000,UL)
47 #define vmemmap ((struct page *)VMEMMAP_BASE)
49 /* XXX All of this needs to be rethought so we can take advantage
50 * XXX cheetah's full 64-bit virtual address space, ie. no more hole
51 * XXX in the middle like on spitfire. -DaveM
54 * Given a virtual address, the lowest PAGE_SHIFT bits determine offset
55 * into the page; the next higher PAGE_SHIFT-3 bits determine the pte#
56 * in the proper pagetable (the -3 is from the 8 byte ptes, and each page
57 * table is a single page long). The next higher PMD_BITS determine pmd#
58 * in the proper pmdtable (where we must have PMD_BITS <= (PAGE_SHIFT-2)
59 * since the pmd entries are 4 bytes, and each pmd page is a single page
60 * long). Finally, the higher few bits determine pgde#.
63 /* PMD_SHIFT determines the size of the area a second-level page
64 * table can map
66 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
67 #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
68 #define PMD_MASK (~(PMD_SIZE-1))
69 #define PMD_BITS (PAGE_SHIFT - 2)
71 /* PGDIR_SHIFT determines what a third-level page table entry can map */
72 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
73 #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
74 #define PGDIR_MASK (~(PGDIR_SIZE-1))
75 #define PGDIR_BITS (PAGE_SHIFT - 2)
77 #ifndef __ASSEMBLY__
79 #include <linux/sched.h>
81 /* Entries per page directory level. */
82 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
83 #define PTRS_PER_PMD (1UL << PMD_BITS)
84 #define PTRS_PER_PGD (1UL << PGDIR_BITS)
86 /* Kernel has a separate 44bit address space. */
87 #define FIRST_USER_ADDRESS 0
89 #define pte_ERROR(e) __builtin_trap()
90 #define pmd_ERROR(e) __builtin_trap()
91 #define pgd_ERROR(e) __builtin_trap()
93 #endif /* !(__ASSEMBLY__) */
95 /* PTE bits which are the same in SUN4U and SUN4V format. */
96 #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
97 #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
98 #define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */
100 /* Advertise support for _PAGE_SPECIAL */
101 #define __HAVE_ARCH_PTE_SPECIAL
103 /* SUN4U pte bits... */
104 #define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */
105 #define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */
106 #define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */
107 #define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */
108 #define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */
109 #define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */
110 #define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
111 #define _PAGE_SPECIAL_4U _AC(0x0200000000000000,UL) /* Special page */
112 #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */
113 #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */
114 #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
115 #define _PAGE_SZALL_4U _AC(0x6001000000000000,UL) /* All pgsz bits */
116 #define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
117 #define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */
118 #define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */
119 #define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */
120 #define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */
121 #define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */
122 #define _PAGE_FILE_4U _AC(0x0000000000000800,UL) /* Pagecache page */
123 #define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
124 #define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */
125 #define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */
126 #define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */
127 #define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */
128 #define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
129 #define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
130 #define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */
131 #define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */
132 #define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */
134 /* SUN4V pte bits... */
135 #define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */
136 #define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
137 #define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */
138 #define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */
139 #define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */
140 #define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */
141 #define _PAGE_SPECIAL_4V _AC(0x0200000000000000,UL) /* Special page */
142 #define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */
143 #define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */
144 #define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */
145 #define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
146 #define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
147 #define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */
148 #define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */
149 #define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */
150 #define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */
151 #define _PAGE_FILE_4V _AC(0x0000000000000020,UL) /* Pagecache page */
152 #define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */
153 #define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */
154 #define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */
155 #define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */
156 #define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */
157 #define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */
158 #define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */
159 #define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */
160 #define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */
161 #define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */
162 #define _PAGE_SZALL_4V _AC(0x0000000000000007,UL) /* All pgsz bits */
164 #if PAGE_SHIFT == 13
165 #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
166 #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
167 #elif PAGE_SHIFT == 16
168 #define _PAGE_SZBITS_4U _PAGE_SZ64K_4U
169 #define _PAGE_SZBITS_4V _PAGE_SZ64K_4V
170 #else
171 #error Wrong PAGE_SHIFT specified
172 #endif
174 #if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
175 #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
176 #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
177 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
178 #define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U
179 #define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V
180 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
181 #define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U
182 #define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V
183 #endif
185 /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
186 #define __P000 __pgprot(0)
187 #define __P001 __pgprot(0)
188 #define __P010 __pgprot(0)
189 #define __P011 __pgprot(0)
190 #define __P100 __pgprot(0)
191 #define __P101 __pgprot(0)
192 #define __P110 __pgprot(0)
193 #define __P111 __pgprot(0)
195 #define __S000 __pgprot(0)
196 #define __S001 __pgprot(0)
197 #define __S010 __pgprot(0)
198 #define __S011 __pgprot(0)
199 #define __S100 __pgprot(0)
200 #define __S101 __pgprot(0)
201 #define __S110 __pgprot(0)
202 #define __S111 __pgprot(0)
204 #ifndef __ASSEMBLY__
206 extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
208 extern unsigned long pte_sz_bits(unsigned long size);
210 extern pgprot_t PAGE_KERNEL;
211 extern pgprot_t PAGE_KERNEL_LOCKED;
212 extern pgprot_t PAGE_COPY;
213 extern pgprot_t PAGE_SHARED;
215 /* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */
216 extern unsigned long _PAGE_IE;
217 extern unsigned long _PAGE_E;
218 extern unsigned long _PAGE_CACHE;
220 extern unsigned long pg_iobits;
221 extern unsigned long _PAGE_ALL_SZ_BITS;
222 extern unsigned long _PAGE_SZBITS;
224 extern struct page *mem_map_zero;
225 #define ZERO_PAGE(vaddr) (mem_map_zero)
227 /* PFNs are real physical page numbers. However, mem_map only begins to record
228 * per-page information starting at pfn_base. This is to handle systems where
229 * the first physical page in the machine is at some huge physical address,
230 * such as 4GB. This is common on a partitioned E10000, for example.
232 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
234 unsigned long paddr = pfn << PAGE_SHIFT;
235 unsigned long sz_bits;
237 sz_bits = 0UL;
238 if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) {
239 __asm__ __volatile__(
240 "\n661: sethi %%uhi(%1), %0\n"
241 " sllx %0, 32, %0\n"
242 " .section .sun4v_2insn_patch, \"ax\"\n"
243 " .word 661b\n"
244 " mov %2, %0\n"
245 " nop\n"
246 " .previous\n"
247 : "=r" (sz_bits)
248 : "i" (_PAGE_SZBITS_4U), "i" (_PAGE_SZBITS_4V));
250 return __pte(paddr | sz_bits | pgprot_val(prot));
252 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
254 /* This one can be done with two shifts. */
255 static inline unsigned long pte_pfn(pte_t pte)
257 unsigned long ret;
259 __asm__ __volatile__(
260 "\n661: sllx %1, %2, %0\n"
261 " srlx %0, %3, %0\n"
262 " .section .sun4v_2insn_patch, \"ax\"\n"
263 " .word 661b\n"
264 " sllx %1, %4, %0\n"
265 " srlx %0, %5, %0\n"
266 " .previous\n"
267 : "=r" (ret)
268 : "r" (pte_val(pte)),
269 "i" (21), "i" (21 + PAGE_SHIFT),
270 "i" (8), "i" (8 + PAGE_SHIFT));
272 return ret;
274 #define pte_page(x) pfn_to_page(pte_pfn(x))
276 static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
278 unsigned long mask, tmp;
280 /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347)
281 * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8)
283 * Even if we use negation tricks the result is still a 6
284 * instruction sequence, so don't try to play fancy and just
285 * do the most straightforward implementation.
287 * Note: We encode this into 3 sun4v 2-insn patch sequences.
290 __asm__ __volatile__(
291 "\n661: sethi %%uhi(%2), %1\n"
292 " sethi %%hi(%2), %0\n"
293 "\n662: or %1, %%ulo(%2), %1\n"
294 " or %0, %%lo(%2), %0\n"
295 "\n663: sllx %1, 32, %1\n"
296 " or %0, %1, %0\n"
297 " .section .sun4v_2insn_patch, \"ax\"\n"
298 " .word 661b\n"
299 " sethi %%uhi(%3), %1\n"
300 " sethi %%hi(%3), %0\n"
301 " .word 662b\n"
302 " or %1, %%ulo(%3), %1\n"
303 " or %0, %%lo(%3), %0\n"
304 " .word 663b\n"
305 " sllx %1, 32, %1\n"
306 " or %0, %1, %0\n"
307 " .previous\n"
308 : "=r" (mask), "=r" (tmp)
309 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
310 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U |
311 _PAGE_SZBITS_4U | _PAGE_SPECIAL),
312 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
313 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V |
314 _PAGE_SZBITS_4V | _PAGE_SPECIAL));
316 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
319 static inline pte_t pgoff_to_pte(unsigned long off)
321 off <<= PAGE_SHIFT;
323 __asm__ __volatile__(
324 "\n661: or %0, %2, %0\n"
325 " .section .sun4v_1insn_patch, \"ax\"\n"
326 " .word 661b\n"
327 " or %0, %3, %0\n"
328 " .previous\n"
329 : "=r" (off)
330 : "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
332 return __pte(off);
335 static inline pgprot_t pgprot_noncached(pgprot_t prot)
337 unsigned long val = pgprot_val(prot);
339 __asm__ __volatile__(
340 "\n661: andn %0, %2, %0\n"
341 " or %0, %3, %0\n"
342 " .section .sun4v_2insn_patch, \"ax\"\n"
343 " .word 661b\n"
344 " andn %0, %4, %0\n"
345 " or %0, %5, %0\n"
346 " .previous\n"
347 : "=r" (val)
348 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
349 "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V));
351 return __pgprot(val);
353 /* Various pieces of code check for platform support by ifdef testing
354 * on "pgprot_noncached". That's broken and should be fixed, but for
355 * now...
357 #define pgprot_noncached pgprot_noncached
359 #ifdef CONFIG_HUGETLB_PAGE
360 static inline pte_t pte_mkhuge(pte_t pte)
362 unsigned long mask;
364 __asm__ __volatile__(
365 "\n661: sethi %%uhi(%1), %0\n"
366 " sllx %0, 32, %0\n"
367 " .section .sun4v_2insn_patch, \"ax\"\n"
368 " .word 661b\n"
369 " mov %2, %0\n"
370 " nop\n"
371 " .previous\n"
372 : "=r" (mask)
373 : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
375 return __pte(pte_val(pte) | mask);
377 #endif
379 static inline pte_t pte_mkdirty(pte_t pte)
381 unsigned long val = pte_val(pte), tmp;
383 __asm__ __volatile__(
384 "\n661: or %0, %3, %0\n"
385 " nop\n"
386 "\n662: nop\n"
387 " nop\n"
388 " .section .sun4v_2insn_patch, \"ax\"\n"
389 " .word 661b\n"
390 " sethi %%uhi(%4), %1\n"
391 " sllx %1, 32, %1\n"
392 " .word 662b\n"
393 " or %1, %%lo(%4), %1\n"
394 " or %0, %1, %0\n"
395 " .previous\n"
396 : "=r" (val), "=r" (tmp)
397 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
398 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
400 return __pte(val);
403 static inline pte_t pte_mkclean(pte_t pte)
405 unsigned long val = pte_val(pte), tmp;
407 __asm__ __volatile__(
408 "\n661: andn %0, %3, %0\n"
409 " nop\n"
410 "\n662: nop\n"
411 " nop\n"
412 " .section .sun4v_2insn_patch, \"ax\"\n"
413 " .word 661b\n"
414 " sethi %%uhi(%4), %1\n"
415 " sllx %1, 32, %1\n"
416 " .word 662b\n"
417 " or %1, %%lo(%4), %1\n"
418 " andn %0, %1, %0\n"
419 " .previous\n"
420 : "=r" (val), "=r" (tmp)
421 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
422 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
424 return __pte(val);
427 static inline pte_t pte_mkwrite(pte_t pte)
429 unsigned long val = pte_val(pte), mask;
431 __asm__ __volatile__(
432 "\n661: mov %1, %0\n"
433 " nop\n"
434 " .section .sun4v_2insn_patch, \"ax\"\n"
435 " .word 661b\n"
436 " sethi %%uhi(%2), %0\n"
437 " sllx %0, 32, %0\n"
438 " .previous\n"
439 : "=r" (mask)
440 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
442 return __pte(val | mask);
445 static inline pte_t pte_wrprotect(pte_t pte)
447 unsigned long val = pte_val(pte), tmp;
449 __asm__ __volatile__(
450 "\n661: andn %0, %3, %0\n"
451 " nop\n"
452 "\n662: nop\n"
453 " nop\n"
454 " .section .sun4v_2insn_patch, \"ax\"\n"
455 " .word 661b\n"
456 " sethi %%uhi(%4), %1\n"
457 " sllx %1, 32, %1\n"
458 " .word 662b\n"
459 " or %1, %%lo(%4), %1\n"
460 " andn %0, %1, %0\n"
461 " .previous\n"
462 : "=r" (val), "=r" (tmp)
463 : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
464 "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
466 return __pte(val);
469 static inline pte_t pte_mkold(pte_t pte)
471 unsigned long mask;
473 __asm__ __volatile__(
474 "\n661: mov %1, %0\n"
475 " nop\n"
476 " .section .sun4v_2insn_patch, \"ax\"\n"
477 " .word 661b\n"
478 " sethi %%uhi(%2), %0\n"
479 " sllx %0, 32, %0\n"
480 " .previous\n"
481 : "=r" (mask)
482 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
484 mask |= _PAGE_R;
486 return __pte(pte_val(pte) & ~mask);
489 static inline pte_t pte_mkyoung(pte_t pte)
491 unsigned long mask;
493 __asm__ __volatile__(
494 "\n661: mov %1, %0\n"
495 " nop\n"
496 " .section .sun4v_2insn_patch, \"ax\"\n"
497 " .word 661b\n"
498 " sethi %%uhi(%2), %0\n"
499 " sllx %0, 32, %0\n"
500 " .previous\n"
501 : "=r" (mask)
502 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
504 mask |= _PAGE_R;
506 return __pte(pte_val(pte) | mask);
509 static inline pte_t pte_mkspecial(pte_t pte)
511 pte_val(pte) |= _PAGE_SPECIAL;
512 return pte;
515 static inline unsigned long pte_young(pte_t pte)
517 unsigned long mask;
519 __asm__ __volatile__(
520 "\n661: mov %1, %0\n"
521 " nop\n"
522 " .section .sun4v_2insn_patch, \"ax\"\n"
523 " .word 661b\n"
524 " sethi %%uhi(%2), %0\n"
525 " sllx %0, 32, %0\n"
526 " .previous\n"
527 : "=r" (mask)
528 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
530 return (pte_val(pte) & mask);
533 static inline unsigned long pte_dirty(pte_t pte)
535 unsigned long mask;
537 __asm__ __volatile__(
538 "\n661: mov %1, %0\n"
539 " nop\n"
540 " .section .sun4v_2insn_patch, \"ax\"\n"
541 " .word 661b\n"
542 " sethi %%uhi(%2), %0\n"
543 " sllx %0, 32, %0\n"
544 " .previous\n"
545 : "=r" (mask)
546 : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
548 return (pte_val(pte) & mask);
551 static inline unsigned long pte_write(pte_t pte)
553 unsigned long mask;
555 __asm__ __volatile__(
556 "\n661: mov %1, %0\n"
557 " nop\n"
558 " .section .sun4v_2insn_patch, \"ax\"\n"
559 " .word 661b\n"
560 " sethi %%uhi(%2), %0\n"
561 " sllx %0, 32, %0\n"
562 " .previous\n"
563 : "=r" (mask)
564 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
566 return (pte_val(pte) & mask);
569 static inline unsigned long pte_exec(pte_t pte)
571 unsigned long mask;
573 __asm__ __volatile__(
574 "\n661: sethi %%hi(%1), %0\n"
575 " .section .sun4v_1insn_patch, \"ax\"\n"
576 " .word 661b\n"
577 " mov %2, %0\n"
578 " .previous\n"
579 : "=r" (mask)
580 : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
582 return (pte_val(pte) & mask);
585 static inline unsigned long pte_file(pte_t pte)
587 unsigned long val = pte_val(pte);
589 __asm__ __volatile__(
590 "\n661: and %0, %2, %0\n"
591 " .section .sun4v_1insn_patch, \"ax\"\n"
592 " .word 661b\n"
593 " and %0, %3, %0\n"
594 " .previous\n"
595 : "=r" (val)
596 : "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
598 return val;
601 static inline unsigned long pte_present(pte_t pte)
603 unsigned long val = pte_val(pte);
605 __asm__ __volatile__(
606 "\n661: and %0, %2, %0\n"
607 " .section .sun4v_1insn_patch, \"ax\"\n"
608 " .word 661b\n"
609 " and %0, %3, %0\n"
610 " .previous\n"
611 : "=r" (val)
612 : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
614 return val;
617 static inline unsigned long pte_special(pte_t pte)
619 return pte_val(pte) & _PAGE_SPECIAL;
622 #define pmd_set(pmdp, ptep) \
623 (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
624 #define pud_set(pudp, pmdp) \
625 (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> 11UL))
626 #define __pmd_page(pmd) \
627 ((unsigned long) __va((((unsigned long)pmd_val(pmd))<<11UL)))
628 #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
629 #define pud_page_vaddr(pud) \
630 ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL)))
631 #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
632 #define pmd_none(pmd) (!pmd_val(pmd))
633 #define pmd_bad(pmd) (0)
634 #define pmd_present(pmd) (pmd_val(pmd) != 0U)
635 #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0U)
636 #define pud_none(pud) (!pud_val(pud))
637 #define pud_bad(pud) (0)
638 #define pud_present(pud) (pud_val(pud) != 0U)
639 #define pud_clear(pudp) (pud_val(*(pudp)) = 0U)
641 /* Same in both SUN4V and SUN4U. */
642 #define pte_none(pte) (!pte_val(pte))
644 /* to find an entry in a page-table-directory. */
645 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
646 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
648 /* to find an entry in a kernel page-table-directory */
649 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
651 /* Find an entry in the second-level page table.. */
652 #define pmd_offset(pudp, address) \
653 ((pmd_t *) pud_page_vaddr(*(pudp)) + \
654 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))
656 /* Find an entry in the third-level page table.. */
657 #define pte_index(dir, address) \
658 ((pte_t *) __pmd_page(*(dir)) + \
659 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
660 #define pte_offset_kernel pte_index
661 #define pte_offset_map pte_index
662 #define pte_unmap(pte) do { } while (0)
664 /* Actual page table PTE updates. */
665 extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
666 pte_t *ptep, pte_t orig, int fullmm);
668 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
669 pte_t *ptep, pte_t pte, int fullmm)
671 pte_t orig = *ptep;
673 *ptep = pte;
675 /* It is more efficient to let flush_tlb_kernel_range()
676 * handle init_mm tlb flushes.
678 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
679 * and SUN4V pte layout, so this inline test is fine.
681 if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID))
682 tlb_batch_add(mm, addr, ptep, orig, fullmm);
685 #define set_pte_at(mm,addr,ptep,pte) \
686 __set_pte_at((mm), (addr), (ptep), (pte), 0)
688 #define pte_clear(mm,addr,ptep) \
689 set_pte_at((mm), (addr), (ptep), __pte(0UL))
691 #define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
692 #define pte_clear_not_present_full(mm,addr,ptep,fullmm) \
693 __set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))
695 #ifdef DCACHE_ALIASING_POSSIBLE
696 #define __HAVE_ARCH_MOVE_PTE
697 #define move_pte(pte, prot, old_addr, new_addr) \
698 ({ \
699 pte_t newpte = (pte); \
700 if (tlb_type != hypervisor && pte_present(pte)) { \
701 unsigned long this_pfn = pte_pfn(pte); \
703 if (pfn_valid(this_pfn) && \
704 (((old_addr) ^ (new_addr)) & (1 << 13))) \
705 flush_dcache_page_all(current->mm, \
706 pfn_to_page(this_pfn)); \
708 newpte; \
710 #endif
712 extern pgd_t swapper_pg_dir[2048];
713 extern pmd_t swapper_low_pmd_dir[2048];
715 extern void paging_init(void);
716 extern unsigned long find_ecache_flush_span(unsigned long size);
718 struct seq_file;
719 extern void mmu_info(struct seq_file *);
721 /* These do nothing with the way I have things setup. */
722 #define mmu_lockarea(vaddr, len) (vaddr)
723 #define mmu_unlockarea(vaddr, len) do { } while(0)
725 struct vm_area_struct;
726 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
728 /* Encode and de-code a swap entry */
729 #define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL)
730 #define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))
731 #define __swp_entry(type, offset) \
732 ( (swp_entry_t) \
734 (((long)(type) << PAGE_SHIFT) | \
735 ((long)(offset) << (PAGE_SHIFT + 8UL))) \
737 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
738 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
740 /* File offset in PTE support. */
741 extern unsigned long pte_file(pte_t);
742 #define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT)
743 extern pte_t pgoff_to_pte(unsigned long);
744 #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
746 extern unsigned long sparc64_valid_addr_bitmap[];
748 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
749 static inline bool kern_addr_valid(unsigned long addr)
751 unsigned long paddr = __pa(addr);
753 if ((paddr >> 41UL) != 0UL)
754 return false;
755 return test_bit(paddr >> 22, sparc64_valid_addr_bitmap);
758 extern int page_in_phys_avail(unsigned long paddr);
761 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
762 * its high 4 bits. These macros/functions put it there or get it from there.
764 #define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
765 #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
766 #define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
768 extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
769 unsigned long, pgprot_t);
771 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
772 unsigned long from, unsigned long pfn,
773 unsigned long size, pgprot_t prot)
775 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
776 int space = GET_IOSPACE(pfn);
777 unsigned long phys_base;
779 phys_base = offset | (((unsigned long) space) << 32UL);
781 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
784 #include <asm-generic/pgtable.h>
786 /* We provide our own get_unmapped_area to cope with VA holes and
787 * SHM area cache aliasing for userland.
789 #define HAVE_ARCH_UNMAPPED_AREA
790 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
792 /* We provide a special get_unmapped_area for framebuffer mmaps to try and use
793 * the largest alignment possible such that larget PTEs can be used.
795 extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
796 unsigned long, unsigned long,
797 unsigned long);
798 #define HAVE_ARCH_FB_UNMAPPED_AREA
800 extern void pgtable_cache_init(void);
801 extern void sun4v_register_fault_status(void);
802 extern void sun4v_ktsb_register(void);
803 extern void __init cheetah_ecache_flush_init(void);
804 extern void sun4v_patch_tlb_handlers(void);
806 extern unsigned long cmdline_memory_size;
808 extern asmlinkage void do_sparc64_fault(struct pt_regs *regs);
810 #endif /* !(__ASSEMBLY__) */
812 #endif /* !(_SPARC64_PGTABLE_H) */