2.2.0-final
[davej-history.git] / include / asm-sparc64 / pgtable.h
blobc074344a461b7a22e2ff1b90d20373db603e4d91
1 /* $Id: pgtable.h,v 1.96 1998/10/27 23:28:42 davem Exp $
2 * pgtable.h: SpitFire page table operations.
4 * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
8 #ifndef _SPARC64_PGTABLE_H
9 #define _SPARC64_PGTABLE_H
11 /* This file contains the functions and defines necessary to modify and use
12 * the SpitFire page tables.
15 #ifndef __ASSEMBLY__
16 #include <linux/mm.h>
17 #endif
18 #include <asm/spitfire.h>
19 #include <asm/asi.h>
20 #include <asm/mmu_context.h>
21 #include <asm/system.h>
23 #ifndef __ASSEMBLY__
24 #include <asm/sbus.h>
26 /* Certain architectures need to do special things when pte's
27 * within a page table are directly modified. Thus, the following
28 * hook is made available.
30 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
32 /* PMD_SHIFT determines the size of the area a second-level page table can map */
33 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
34 #define PMD_SIZE (1UL << PMD_SHIFT)
35 #define PMD_MASK (~(PMD_SIZE-1))
37 /* PGDIR_SHIFT determines what a third-level page table entry can map */
38 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + (PAGE_SHIFT-2))
39 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
40 #define PGDIR_MASK (~(PGDIR_SIZE-1))
42 /* Entries per page directory level. */
43 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
45 /* We the first one in this file, what we export to the kernel
46 * is different so we can optimize correctly for 32-bit tasks.
48 #define REAL_PTRS_PER_PMD (1UL << (PAGE_SHIFT-2))
49 #define PTRS_PER_PMD ((const int)((current->tss.flags & SPARC_FLAG_32BIT) ? \
50 (REAL_PTRS_PER_PMD >> 2) : (REAL_PTRS_PER_PMD)))
52 /* We cannot use the top 16G because VPTE table lives there. */
53 #define PTRS_PER_PGD ((1UL << (PAGE_SHIFT-3))-1)
55 /* Kernel has a separate 44bit address space. */
56 #define USER_PTRS_PER_PGD ((const int)((current->tss.flags & SPARC_FLAG_32BIT) ? \
57 (1) : (PTRS_PER_PGD)))
59 #define PTE_TABLE_SIZE 0x2000 /* 1024 entries 8 bytes each */
60 #define PMD_TABLE_SIZE 0x2000 /* 2048 entries 4 bytes each */
61 #define PGD_TABLE_SIZE 0x1000 /* 1024 entries 4 bytes each */
63 /* the no. of pointers that fit on a page */
64 #define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
66 /* NOTE: TLB miss handlers depend heavily upon where this is. */
67 #define VMALLOC_START 0x0000000140000000UL
68 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
69 #define VMALLOC_END 0x0000000200000000UL
71 #endif /* !(__ASSEMBLY__) */
73 /* SpitFire TTE bits. */
74 #define _PAGE_VALID 0x8000000000000000 /* Valid TTE */
75 #define _PAGE_R 0x8000000000000000 /* Used to keep ref bit up to date */
76 #define _PAGE_SZ4MB 0x6000000000000000 /* 4MB Page */
77 #define _PAGE_SZ512K 0x4000000000000000 /* 512K Page */
78 #define _PAGE_SZ64K 0x2000000000000000 /* 64K Page */
79 #define _PAGE_SZ8K 0x0000000000000000 /* 8K Page */
80 #define _PAGE_NFO 0x1000000000000000 /* No Fault Only */
81 #define _PAGE_IE 0x0800000000000000 /* Invert Endianness */
82 #define _PAGE_SOFT2 0x07FC000000000000 /* Second set of software bits */
83 #define _PAGE_DIAG 0x0003FE0000000000 /* Diagnostic TTE bits */
84 #define _PAGE_PADDR 0x000001FFFFFFE000 /* Physical Address bits [40:13] */
85 #define _PAGE_SOFT 0x0000000000001F80 /* First set of software bits */
86 #define _PAGE_L 0x0000000000000040 /* Locked TTE */
87 #define _PAGE_CP 0x0000000000000020 /* Cacheable in Physical Cache */
88 #define _PAGE_CV 0x0000000000000010 /* Cacheable in Virtual Cache */
89 #define _PAGE_E 0x0000000000000008 /* side-Effect */
90 #define _PAGE_P 0x0000000000000004 /* Privileged Page */
91 #define _PAGE_W 0x0000000000000002 /* Writable */
92 #define _PAGE_G 0x0000000000000001 /* Global */
94 /* Here are the SpitFire software bits we use in the TTE's. */
95 #define _PAGE_MODIFIED 0x0000000000000800 /* Modified Page (ie. dirty) */
96 #define _PAGE_ACCESSED 0x0000000000000400 /* Accessed Page (ie. referenced) */
97 #define _PAGE_READ 0x0000000000000200 /* Readable SW Bit */
98 #define _PAGE_WRITE 0x0000000000000100 /* Writable SW Bit */
99 #define _PAGE_PRESENT 0x0000000000000080 /* Present Page (ie. not swapped out) */
101 #define _PAGE_CACHE (_PAGE_CP | _PAGE_CV)
103 #define __DIRTY_BITS (_PAGE_MODIFIED | _PAGE_WRITE | _PAGE_W)
104 #define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_READ | _PAGE_R)
105 #define __PRIV_BITS _PAGE_P
107 #define PAGE_NONE __pgprot (_PAGE_PRESENT | _PAGE_ACCESSED)
109 #define PAGE_SHARED __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
110 __ACCESS_BITS | _PAGE_W | _PAGE_WRITE)
112 #define PAGE_COPY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
113 __ACCESS_BITS)
115 #define PAGE_READONLY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
116 __ACCESS_BITS)
118 #define PAGE_KERNEL __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
119 __PRIV_BITS | __ACCESS_BITS | __DIRTY_BITS)
121 #define PAGE_INVALID __pgprot (0)
123 #define _PFN_MASK _PAGE_PADDR
125 #define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_MODIFIED | _PAGE_ACCESSED | _PAGE_PRESENT)
127 #define pg_iobits (_PAGE_VALID | _PAGE_PRESENT | __DIRTY_BITS | __ACCESS_BITS | _PAGE_E)
129 #define __P000 PAGE_NONE
130 #define __P001 PAGE_READONLY
131 #define __P010 PAGE_COPY
132 #define __P011 PAGE_COPY
133 #define __P100 PAGE_READONLY
134 #define __P101 PAGE_READONLY
135 #define __P110 PAGE_COPY
136 #define __P111 PAGE_COPY
138 #define __S000 PAGE_NONE
139 #define __S001 PAGE_READONLY
140 #define __S010 PAGE_SHARED
141 #define __S011 PAGE_SHARED
142 #define __S100 PAGE_READONLY
143 #define __S101 PAGE_READONLY
144 #define __S110 PAGE_SHARED
145 #define __S111 PAGE_SHARED
147 #ifndef __ASSEMBLY__
149 extern pte_t __bad_page(void);
151 #define BAD_PAGE __bad_page()
153 /* First physical page can be anywhere, the following is needed so that
154 * va-->pa and vice versa conversions work properly without performance
155 * hit for all __pa()/__va() operations.
157 extern unsigned long phys_base;
158 #define ZERO_PAGE ((unsigned long)__va(phys_base))
160 /* Allocate a block of RAM which is aligned to its size.
161 * This procedure can be used until the call to mem_init().
163 extern void *sparc_init_alloc(unsigned long *kbrk, unsigned long size);
165 /* Cache and TLB flush operations. */
167 /* These are the same regardless of whether this is an SMP kernel or not. */
168 #define flush_cache_mm(mm) flushw_user()
169 #define flush_cache_range(mm, start, end) flushw_user()
170 #define flush_cache_page(vma, page) flushw_user()
172 /* These operations are unnecessary on the SpitFire since D-CACHE is write-through. */
173 #define flush_icache_range(start, end) do { } while (0)
174 #define flush_page_to_ram(page) do { } while (0)
176 extern void __flush_dcache_range(unsigned long start, unsigned long end);
178 extern void __flush_cache_all(void);
180 extern void __flush_tlb_all(void);
181 extern void __flush_tlb_mm(unsigned long context, unsigned long r);
182 extern void __flush_tlb_range(unsigned long context, unsigned long start,
183 unsigned long r, unsigned long end,
184 unsigned long pgsz, unsigned long size);
185 extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r);
187 #ifndef __SMP__
189 #define flush_cache_all() __flush_cache_all()
190 #define flush_tlb_all() __flush_tlb_all()
192 #define flush_tlb_mm(mm) \
193 do { if((mm)->context != NO_CONTEXT) \
194 __flush_tlb_mm((mm)->context & 0x3ff, SECONDARY_CONTEXT); \
195 } while(0)
197 #define flush_tlb_range(mm, start, end) \
198 do { if((mm)->context != NO_CONTEXT) { \
199 unsigned long __start = (start)&PAGE_MASK; \
200 unsigned long __end = (end)&PAGE_MASK; \
201 __flush_tlb_range((mm)->context & 0x3ff, __start, \
202 SECONDARY_CONTEXT, __end, PAGE_SIZE, \
203 (__end - __start)); \
205 } while(0)
207 #define flush_tlb_page(vma, page) \
208 do { struct mm_struct *__mm = (vma)->vm_mm; \
209 if(__mm->context != NO_CONTEXT) \
210 __flush_tlb_page(__mm->context & 0x3ff, (page)&PAGE_MASK, \
211 SECONDARY_CONTEXT); \
212 } while(0)
214 #else /* __SMP__ */
216 extern void smp_flush_cache_all(void);
217 extern void smp_flush_tlb_all(void);
218 extern void smp_flush_tlb_mm(struct mm_struct *mm);
219 extern void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
220 unsigned long end);
221 extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
223 #define flush_cache_all() smp_flush_cache_all()
224 #define flush_tlb_all() smp_flush_tlb_all()
226 extern __inline__ void flush_tlb_mm(struct mm_struct *mm)
228 if(mm->context != NO_CONTEXT)
229 smp_flush_tlb_mm(mm);
232 extern __inline__ void flush_tlb_range(struct mm_struct *mm, unsigned long start,
233 unsigned long end)
235 if(mm->context != NO_CONTEXT)
236 smp_flush_tlb_range(mm, start, end);
239 extern __inline__ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
241 struct mm_struct *mm = vma->vm_mm;
243 if(mm->context != NO_CONTEXT)
244 smp_flush_tlb_page(mm, page);
247 #endif
249 #define mk_pte(page, pgprot) (__pte(__pa(page) | pgprot_val(pgprot)))
250 #define mk_pte_phys(physpage, pgprot) (__pte((physpage) | pgprot_val(pgprot)))
251 #define pte_modify(_pte, newprot) \
252 (pte_val(_pte) = ((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
253 #define pmd_set(pmdp, ptep) (pmd_val(*(pmdp)) = __pa((unsigned long) (ptep)))
254 #define pgd_set(pgdp, pmdp) (pgd_val(*(pgdp)) = __pa((unsigned long) (pmdp)))
255 #define pte_page(pte) ((unsigned long) __va(((pte_val(pte)&~PAGE_OFFSET)&~(0xfffUL))))
256 #define pmd_page(pmd) ((unsigned long) __va(pmd_val(pmd)))
257 #define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd)))
258 #define pte_none(pte) (!pte_val(pte))
259 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
260 #define pte_clear(pte) (pte_val(*(pte)) = 0UL)
261 #define pmd_none(pmd) (!pmd_val(pmd))
262 #define pmd_bad(pmd) (0)
263 #define pmd_present(pmd) (pmd_val(pmd) != 0UL)
264 #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
265 #define pgd_none(pgd) (!pgd_val(pgd))
266 #define pgd_bad(pgd) (0)
267 #define pgd_present(pgd) (pgd_val(pgd) != 0UL)
268 #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
270 /* The following only work if pte_present() is true.
271 * Undefined behaviour if not..
273 #define pte_read(pte) (pte_val(pte) & _PAGE_READ)
274 #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
275 #define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
276 #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
277 #define pte_wrprotect(pte) (__pte(pte_val(pte) & ~(_PAGE_WRITE|_PAGE_W)))
278 #define pte_rdprotect(pte) (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_READ))
279 #define pte_mkclean(pte) (__pte(pte_val(pte) & ~(_PAGE_MODIFIED|_PAGE_W)))
280 #define pte_mkold(pte) (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_ACCESSED))
282 /* Be very careful when you change these three, they are delicate. */
283 static __inline__ pte_t pte_mkyoung(pte_t _pte)
284 { if(pte_val(_pte) & _PAGE_READ)
285 return __pte(pte_val(_pte)|(_PAGE_ACCESSED|_PAGE_R));
286 else
287 return __pte(pte_val(_pte)|(_PAGE_ACCESSED));
290 static __inline__ pte_t pte_mkwrite(pte_t _pte)
291 { if(pte_val(_pte) & _PAGE_MODIFIED)
292 return __pte(pte_val(_pte)|(_PAGE_WRITE|_PAGE_W));
293 else
294 return __pte(pte_val(_pte)|(_PAGE_WRITE));
297 static __inline__ pte_t pte_mkdirty(pte_t _pte)
298 { if(pte_val(_pte) & _PAGE_WRITE)
299 return __pte(pte_val(_pte)|(_PAGE_MODIFIED|_PAGE_W));
300 else
301 return __pte(pte_val(_pte)|(_PAGE_MODIFIED));
304 /* to find an entry in a page-table-directory. */
305 #define pgd_offset(mm, address) ((mm)->pgd + ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD)))
307 /* to find an entry in a kernel page-table-directory */
308 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
310 /* Find an entry in the second-level page table.. */
311 #define pmd_offset(dir, address) ((pmd_t *) pgd_page(*(dir)) + \
312 ((address >> PMD_SHIFT) & (REAL_PTRS_PER_PMD-1)))
314 /* Find an entry in the third-level page table.. */
315 #define pte_offset(dir, address) ((pte_t *) pmd_page(*(dir)) + \
316 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
318 /* Very stupidly, we used to get new pgd's and pmd's, init their contents
319 * to point to the NULL versions of the next level page table, later on
320 * completely re-init them the same way, then free them up. This wasted
321 * a lot of work and caused unnecessary memory traffic. How broken...
322 * We fix this by caching them.
325 #ifdef __SMP__
326 /* Sliiiicck */
327 #define pgt_quicklists cpu_data[smp_processor_id()]
328 #else
329 extern struct pgtable_cache_struct {
330 unsigned long *pgd_cache;
331 unsigned long *pte_cache;
332 unsigned long pgcache_size;
333 unsigned long pgdcache_size;
334 } pgt_quicklists;
335 #endif
336 #define pgd_quicklist (pgt_quicklists.pgd_cache)
337 #define pmd_quicklist ((unsigned long *)0)
338 #define pte_quicklist (pgt_quicklists.pte_cache)
339 #define pgtable_cache_size (pgt_quicklists.pgcache_size)
340 #define pgd_cache_size (pgt_quicklists.pgdcache_size)
342 #ifndef __SMP__
344 extern __inline__ void free_pgd_fast(pgd_t *pgd)
346 struct page *page = mem_map + MAP_NR(pgd);
348 if (!page->pprev_hash) {
349 (unsigned long *)page->next_hash = pgd_quicklist;
350 pgd_quicklist = (unsigned long *)page;
352 (unsigned long)page->pprev_hash |=
353 (((unsigned long)pgd & (PAGE_SIZE / 2)) ? 2 : 1);
354 pgd_cache_size++;
357 extern __inline__ pgd_t *get_pgd_fast(void)
359 struct page *ret;
361 if ((ret = (struct page *)pgd_quicklist) != NULL) {
362 unsigned long mask = (unsigned long)ret->pprev_hash;
363 unsigned long off = 0;
365 if (mask & 1)
366 mask &= ~1;
367 else {
368 off = PAGE_SIZE / 2;
369 mask &= ~2;
371 (unsigned long)ret->pprev_hash = mask;
372 if (!mask)
373 pgd_quicklist = (unsigned long *)ret->next_hash;
374 ret = (struct page *) (page_address(ret) + off);
375 pgd_cache_size--;
376 } else {
377 ret = (struct page *) __get_free_page(GFP_KERNEL);
378 if(ret) {
379 struct page *page = mem_map + MAP_NR(ret);
381 memset(ret, 0, PAGE_SIZE);
382 (unsigned long)page->pprev_hash = 2;
383 (unsigned long *)page->next_hash = pgd_quicklist;
384 pgd_quicklist = (unsigned long *)page;
385 pgd_cache_size++;
388 return (pgd_t *)ret;
391 #else /* __SMP__ */
393 extern __inline__ void free_pgd_fast(pgd_t *pgd)
395 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
396 pgd_quicklist = (unsigned long *) pgd;
397 pgtable_cache_size++;
400 extern __inline__ pgd_t *get_pgd_fast(void)
402 unsigned long *ret;
404 if((ret = pgd_quicklist) != NULL) {
405 pgd_quicklist = (unsigned long *)(*ret);
406 ret[0] = 0;
407 pgtable_cache_size--;
408 } else {
409 ret = (unsigned long *) __get_free_page(GFP_KERNEL);
410 if(ret)
411 memset(ret, 0, PAGE_SIZE);
413 return (pgd_t *)ret;
416 extern __inline__ void free_pgd_slow(pgd_t *pgd)
418 free_page((unsigned long)pgd);
421 #endif /* __SMP__ */
423 extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long address_premasked);
425 extern __inline__ pmd_t *get_pmd_fast(void)
427 unsigned long *ret;
429 if((ret = (unsigned long *)pte_quicklist) != NULL) {
430 pte_quicklist = (unsigned long *)(*ret);
431 ret[0] = 0;
432 pgtable_cache_size--;
434 return (pmd_t *)ret;
437 extern __inline__ void free_pmd_fast(pgd_t *pmd)
439 *(unsigned long *)pmd = (unsigned long) pte_quicklist;
440 pte_quicklist = (unsigned long *) pmd;
441 pgtable_cache_size++;
444 extern __inline__ void free_pmd_slow(pmd_t *pmd)
446 free_page((unsigned long)pmd);
449 extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
451 extern __inline__ pte_t *get_pte_fast(void)
453 unsigned long *ret;
455 if((ret = (unsigned long *)pte_quicklist) != NULL) {
456 pte_quicklist = (unsigned long *)(*ret);
457 ret[0] = 0;
458 pgtable_cache_size--;
460 return (pte_t *)ret;
463 extern __inline__ void free_pte_fast(pte_t *pte)
465 *(unsigned long *)pte = (unsigned long) pte_quicklist;
466 pte_quicklist = (unsigned long *) pte;
467 pgtable_cache_size++;
470 extern __inline__ void free_pte_slow(pte_t *pte)
472 free_page((unsigned long)pte);
475 #define pte_free_kernel(pte) free_pte_fast(pte)
476 #define pte_free(pte) free_pte_fast(pte)
477 #define pmd_free_kernel(pmd) free_pmd_fast(pmd)
478 #define pmd_free(pmd) free_pmd_fast(pmd)
479 #define pgd_free(pgd) free_pgd_fast(pgd)
480 #define pgd_alloc() get_pgd_fast()
482 extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
484 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
485 if (pmd_none(*pmd)) {
486 pte_t *page = get_pte_fast();
488 if (!page)
489 return get_pte_slow(pmd, address);
490 pmd_set(pmd, page);
491 return page + address;
493 return (pte_t *) pmd_page(*pmd) + address;
496 extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
498 address = (address >> PMD_SHIFT) & (REAL_PTRS_PER_PMD - 1);
499 if (pgd_none(*pgd)) {
500 pmd_t *page = get_pmd_fast();
502 if (!page)
503 return get_pmd_slow(pgd, address);
504 pgd_set(pgd, page);
505 return page + address;
507 return (pmd_t *) pgd_page(*pgd) + address;
510 #define pte_alloc_kernel(pmd, addr) pte_alloc(pmd, addr)
511 #define pmd_alloc_kernel(pgd, addr) pmd_alloc(pgd, addr)
513 extern int do_check_pgt_cache(int, int);
515 /* Nothing to do on sparc64 :) */
516 #define set_pgdir(address, entry) do { } while(0)
518 extern pgd_t swapper_pg_dir[1];
520 extern inline void SET_PAGE_DIR(struct task_struct *tsk, pgd_t *pgdir)
522 if(pgdir != swapper_pg_dir && tsk->mm == current->mm) {
523 register unsigned long paddr asm("o5");
525 paddr = __pa(pgdir);
526 __asm__ __volatile__ ("
527 rdpr %%pstate, %%o4
528 wrpr %%o4, %1, %%pstate
529 mov %3, %%g4
530 mov %0, %%g7
531 stxa %%g0, [%%g4] %2
532 wrpr %%o4, 0x0, %%pstate
533 " : /* No outputs */
534 : "r" (paddr), "i" (PSTATE_MG|PSTATE_IE),
535 "i" (ASI_DMMU), "i" (TSB_REG)
536 : "o4");
537 flush_tlb_mm(current->mm);
541 /* Routines for getting a dvma scsi buffer. */
542 struct mmu_sglist {
543 char *addr;
544 char *__dont_touch;
545 unsigned int len;
546 __u32 dvma_addr;
549 extern __u32 mmu_get_scsi_one(char *, unsigned long, struct linux_sbus *sbus);
550 extern void mmu_get_scsi_sgl(struct mmu_sglist *, int, struct linux_sbus *sbus);
552 extern void mmu_release_scsi_one(u32 vaddr, unsigned long len,
553 struct linux_sbus *sbus);
554 extern void mmu_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus);
556 #define NEED_DMA_SYNCHRONIZATION
557 #define mmu_sync_dma(dma_addr, len, sbus_instance) \
558 mmu_release_scsi_one((dma_addr), (len), (sbus_instance))
560 /* These do nothing with the way I have things setup. */
561 #define mmu_lockarea(vaddr, len) (vaddr)
562 #define mmu_unlockarea(vaddr, len) do { } while(0)
564 /* There used to be some funny code here which tried to guess which
565 * TLB wanted the mapping, that wasn't accurate enough to justify it's
566 * existance. The real way to do that is to have each TLB miss handler
567 * pass in a distinct code to do_sparc64_fault() and do it more accurately
568 * there.
570 * What we do need to handle here is prevent I-cache corruption. The
571 * deal is that the I-cache snoops stores from other CPUs and all DMA
572 * activity, however stores from the local processor are not snooped.
573 * The dynamic linker and our signal handler mechanism take care of
574 * the cases where they write into instruction space, but when a page
575 * is copied in the kernel and then executed in user-space is not handled
576 * right. This leads to corruptions if things are "just right", consider
577 * the following scenerio:
578 * 1) Process 1 frees up a page that was used for the PLT of libc in
579 * it's address space.
580 * 2) Process 2 writes into a page in the PLT of libc for the first
581 * time. do_wp_page() copies the page locally, the local I-cache of
582 * the processor does not notice the writes during the page copy.
583 * The new page used just so happens to be the one just freed in #1.
584 * 3) After the PLT write, later the cpu calls into an unresolved PLT
585 * entry, the CPU executes old instructions from process 1's PLT
586 * table.
587 * 4) Splat.
589 extern void flush_icache_page(unsigned long phys_page);
590 #define update_mmu_cache(__vma, __address, _pte) \
591 do { \
592 unsigned short __flags = ((__vma)->vm_flags); \
593 if ((__flags & VM_EXEC) != 0 && \
594 ((pte_val(_pte) & (_PAGE_PRESENT | _PAGE_WRITE | _PAGE_MODIFIED)) == \
595 (_PAGE_PRESENT | _PAGE_WRITE | _PAGE_MODIFIED))) { \
596 flush_icache_page(pte_page(_pte) - page_offset); \
598 } while(0)
600 /* Make a non-present pseudo-TTE. */
601 extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
602 { pte_t pte; pte_val(pte) = (type<<PAGE_SHIFT)|(offset<<(PAGE_SHIFT+8)); return pte; }
604 extern inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space)
606 pte_t pte;
607 pte_val(pte) = ((page) | pgprot_val(prot) | _PAGE_E) & ~(unsigned long)_PAGE_CACHE;
608 pte_val(pte) |= (((unsigned long)space) << 32);
609 return pte;
612 #define SWP_TYPE(entry) (((entry>>PAGE_SHIFT) & 0xff))
613 #define SWP_OFFSET(entry) ((entry) >> (PAGE_SHIFT+8))
614 #define SWP_ENTRY(type,offset) pte_val(mk_swap_pte((type),(offset)))
616 extern __inline__ unsigned long
617 sun4u_get_pte (unsigned long addr)
619 pgd_t *pgdp;
620 pmd_t *pmdp;
621 pte_t *ptep;
623 if (addr >= PAGE_OFFSET)
624 return addr & _PAGE_PADDR;
625 pgdp = pgd_offset_k (addr);
626 pmdp = pmd_offset (pgdp, addr);
627 ptep = pte_offset (pmdp, addr);
628 return pte_val (*ptep) & _PAGE_PADDR;
631 extern __inline__ unsigned long
632 __get_phys (unsigned long addr)
634 return sun4u_get_pte (addr);
637 extern __inline__ int
638 __get_iospace (unsigned long addr)
640 return ((sun4u_get_pte (addr) & 0xf0000000) >> 28);
643 extern void * module_map (unsigned long size);
644 extern void module_unmap (void *addr);
646 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
647 #define PageSkip(page) (test_bit(PG_skip, &(page)->flags))
649 extern int io_remap_page_range(unsigned long from, unsigned long offset,
650 unsigned long size, pgprot_t prot, int space);
652 #endif /* !(__ASSEMBLY__) */
654 #endif /* !(_SPARC64_PGTABLE_H) */