1 /* $Id: pgtable.h,v 1.19 1998/10/26 19:59:39 davem Exp $
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
7 * Copyright (C) 1994 - 1998 by Ralf Baechle at alii
9 #ifndef __ASM_MIPS_PGTABLE_H
10 #define __ASM_MIPS_PGTABLE_H
12 #include <asm/addrspace.h>
13 #include <asm/mipsconfig.h>
15 #ifndef _LANGUAGE_ASSEMBLY
17 #include <linux/linkage.h>
18 #include <asm/cachectl.h>
22 * - flush_cache_all() flushes entire cache
23 * - flush_cache_mm(mm) flushes the specified mm context's cache lines
24 * - flush_cache_page(mm, vmaddr) flushes a single page
25 * - flush_cache_range(mm, start, end) flushes a range of pages
26 * - flush_page_to_ram(page) write back kernel page to ram
28 extern void (*flush_cache_all
)(void);
29 extern void (*flush_cache_mm
)(struct mm_struct
*mm
);
30 extern void (*flush_cache_range
)(struct mm_struct
*mm
, unsigned long start
,
32 extern void (*flush_cache_page
)(struct vm_area_struct
*vma
, unsigned long page
);
33 extern void (*flush_cache_sigtramp
)(unsigned long addr
);
34 extern void (*flush_page_to_ram
)(unsigned long page
);
35 #define flush_icache_range(start, end) flush_cache_all()
39 * - flush_tlb_all() flushes all processes TLB entries
40 * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
41 * - flush_tlb_page(mm, vmaddr) flushes a single page
42 * - flush_tlb_range(mm, start, end) flushes a range of pages
44 extern void (*flush_tlb_all
)(void);
45 extern void (*flush_tlb_mm
)(struct mm_struct
*mm
);
46 extern void (*flush_tlb_range
)(struct mm_struct
*mm
, unsigned long start
,
48 extern void (*flush_tlb_page
)(struct vm_area_struct
*vma
, unsigned long page
);
51 * - add_wired_entry() add a fixed TLB entry, and move wired register
53 extern void (*add_wired_entry
)(unsigned long entrylo0
, unsigned long entrylo1
,
54 unsigned long entryhi
, unsigned long pagemask
);
57 /* Basically we have the same two-level (which is the logical three level
58 * Linux page table layout folded) page tables as the i386. Some day
59 * when we have proper page coloring support we can have a 1% quicker
60 * tlb refill handling mechanism, but for now it is a bit slower but
61 * works even with the cache aliasing problem the R4k and above have.
64 #endif /* !defined (_LANGUAGE_ASSEMBLY) */
66 /* PMD_SHIFT determines the size of the area a second-level page table can map */
68 #define PMD_SIZE (1UL << PMD_SHIFT)
69 #define PMD_MASK (~(PMD_SIZE-1))
71 /* PGDIR_SHIFT determines what a third-level page table entry can map */
72 #define PGDIR_SHIFT 22
73 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
74 #define PGDIR_MASK (~(PGDIR_SIZE-1))
76 /* Entries per page directory level: we use two-level, so
77 * we don't really have any PMD directory physically.
79 #define PTRS_PER_PTE 1024
80 #define PTRS_PER_PMD 1
81 #define PTRS_PER_PGD 1024
82 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
84 #define VMALLOC_START KSEG2
85 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
86 #define VMALLOC_END KSEG3
88 /* Note that we shift the lower 32bits of each EntryLo[01] entry
89 * 6 bits to the left. That way we can convert the PFN into the
90 * physical address by a single 'and' operation and gain 6 additional
91 * bits for storing information which isn't present in a normal
94 * Similar to the Alpha port, we need to keep track of the ref
95 * and mod bits in software. We have a software "yeah you can read
96 * from this page" bit, and a hardware one which actually lets the
97 * process read from the page. On the same token we have a software
98 * writable bit and the real hardware one which actually lets the
99 * process write to the page, this keeps a mod bit via the hardware
102 * Certain revisions of the R4000 and R5000 have a bug where if a
103 * certain sequence occurs in the last 3 instructions of an executable
104 * page, and the following page is not mapped, the cpu can do
105 * unpredictable things. The code (when it is written) to deal with
106 * this problem will be in the update_mmu_cache() code for the r4k.
108 #define _PAGE_PRESENT (1<<0) /* implemented in software */
109 #define _PAGE_READ (1<<1) /* implemented in software */
110 #define _PAGE_WRITE (1<<2) /* implemented in software */
111 #define _PAGE_ACCESSED (1<<3) /* implemented in software */
112 #define _PAGE_MODIFIED (1<<4) /* implemented in software */
113 #define _PAGE_R4KBUG (1<<5) /* workaround for r4k bug */
114 #define _PAGE_GLOBAL (1<<6)
115 #define _PAGE_VALID (1<<7)
116 #define _PAGE_SILENT_READ (1<<7) /* synonym */
117 #define _PAGE_DIRTY (1<<8) /* The MIPS dirty bit */
118 #define _PAGE_SILENT_WRITE (1<<8)
119 #define _CACHE_CACHABLE_NO_WA (0<<9) /* R4600 only */
120 #define _CACHE_CACHABLE_WA (1<<9) /* R4600 only */
121 #define _CACHE_UNCACHED (2<<9) /* R4[0246]00 */
122 #define _CACHE_CACHABLE_NONCOHERENT (3<<9) /* R4[0246]00 */
123 #define _CACHE_CACHABLE_CE (4<<9) /* R4[04]00 only */
124 #define _CACHE_CACHABLE_COW (5<<9) /* R4[04]00 only */
125 #define _CACHE_CACHABLE_CUW (6<<9) /* R4[04]00 only */
126 #define _CACHE_CACHABLE_ACCELERATED (7<<9) /* R10000 only */
127 #define _CACHE_MASK (7<<9)
129 #define __READABLE (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED)
130 #define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
132 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
134 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
135 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
136 _CACHE_CACHABLE_NONCOHERENT)
137 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
138 _CACHE_CACHABLE_NONCOHERENT)
139 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
140 _CACHE_CACHABLE_NONCOHERENT)
141 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
142 _CACHE_CACHABLE_NONCOHERENT)
143 #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
145 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
149 * MIPS can't do page protection for execute, and considers that the same like
150 * read. Also, write permissions imply read permissions. This is the closest
151 * we can get by reasonable means..
153 #define __P000 PAGE_NONE
154 #define __P001 PAGE_READONLY
155 #define __P010 PAGE_COPY
156 #define __P011 PAGE_COPY
157 #define __P100 PAGE_READONLY
158 #define __P101 PAGE_READONLY
159 #define __P110 PAGE_COPY
160 #define __P111 PAGE_COPY
162 #define __S000 PAGE_NONE
163 #define __S001 PAGE_READONLY
164 #define __S010 PAGE_SHARED
165 #define __S011 PAGE_SHARED
166 #define __S100 PAGE_READONLY
167 #define __S101 PAGE_READONLY
168 #define __S110 PAGE_SHARED
169 #define __S111 PAGE_SHARED
171 #if !defined (_LANGUAGE_ASSEMBLY)
174 * BAD_PAGETABLE is used when we need a bogus page-table, while
175 * BAD_PAGE is used for a bogus page.
177 * ZERO_PAGE is a global shared page that is always zero: used
178 * for zero-mapped memory areas etc..
180 extern pte_t
__bad_page(void);
181 extern pte_t
*__bad_pagetable(void);
183 extern unsigned long empty_zero_page
;
184 extern unsigned long zero_page_mask
;
186 #define BAD_PAGETABLE __bad_pagetable()
187 #define BAD_PAGE __bad_page()
188 #define ZERO_PAGE(__vaddr) \
189 (empty_zero_page + (((unsigned long)(__vaddr)) & zero_page_mask))
191 /* number of bits that fit into a memory pointer */
192 #define BITS_PER_PTR (8*sizeof(unsigned long))
194 /* to align the pointer to a pointer address */
195 #define PTR_MASK (~(sizeof(void*)-1))
198 * sizeof(void*) == (1 << SIZEOF_PTR_LOG2)
200 #define SIZEOF_PTR_LOG2 2
202 /* to find an entry in a page-table */
203 #define PAGE_PTR(address) \
204 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
206 extern void (*load_pgd
)(unsigned long pg_dir
);
208 /* to set the page-dir */
209 #define SET_PAGE_DIR(tsk,pgdir) (tsk)->tss.pg_dir = ((unsigned long) (pgdir))
211 extern pmd_t invalid_pte_table
[PAGE_SIZE
/sizeof(pmd_t
)];
214 * Conversion functions: convert a page and protection to a page entry,
215 * and a page entry and page directory to the page they refer to.
217 extern inline unsigned long pte_page(pte_t pte
)
219 return PAGE_OFFSET
+ (pte_val(pte
) & PAGE_MASK
);
222 extern inline unsigned long pmd_page(pmd_t pmd
)
227 extern inline void pmd_set(pmd_t
* pmdp
, pte_t
* ptep
)
229 pmd_val(*pmdp
) = (((unsigned long) ptep
) & PAGE_MASK
);
232 extern inline int pte_none(pte_t pte
) { return !pte_val(pte
); }
233 extern inline int pte_present(pte_t pte
) { return pte_val(pte
) & _PAGE_PRESENT
; }
235 /* Certain architectures need to do special things when pte's
236 * within a page table are directly modified. Thus, the following
237 * hook is made available.
239 extern inline void set_pte(pte_t
*ptep
, pte_t pteval
)
244 extern inline void pte_clear(pte_t
*ptep
)
246 set_pte(ptep
, __pte(0));
250 * Empty pgd/pmd entries point to the invalid_pte_table.
252 extern inline int pmd_none(pmd_t pmd
)
254 return pmd_val(pmd
) == (unsigned long) invalid_pte_table
;
257 extern inline int pmd_bad(pmd_t pmd
)
259 return ((pmd_page(pmd
) > (unsigned long) high_memory
) ||
260 (pmd_page(pmd
) < PAGE_OFFSET
));
263 extern inline int pmd_present(pmd_t pmd
)
268 extern inline void pmd_clear(pmd_t
*pmdp
)
270 pmd_val(*pmdp
) = ((unsigned long) invalid_pte_table
);
274 * The "pgd_xxx()" functions here are trivial for a folded two-level
275 * setup: the pgd is never bad, and a pmd always exists (as it's folded
276 * into the pgd entry)
278 extern inline int pgd_none(pgd_t pgd
) { return 0; }
279 extern inline int pgd_bad(pgd_t pgd
) { return 0; }
280 extern inline int pgd_present(pgd_t pgd
) { return 1; }
281 extern inline void pgd_clear(pgd_t
*pgdp
) { }
284 * The following only work if pte_present() is true.
285 * Undefined behaviour if not..
287 extern inline int pte_read(pte_t pte
) { return pte_val(pte
) & _PAGE_READ
; }
288 extern inline int pte_write(pte_t pte
) { return pte_val(pte
) & _PAGE_WRITE
; }
289 extern inline int pte_dirty(pte_t pte
) { return pte_val(pte
) & _PAGE_MODIFIED
; }
290 extern inline int pte_young(pte_t pte
) { return pte_val(pte
) & _PAGE_ACCESSED
; }
292 extern inline pte_t
pte_wrprotect(pte_t pte
)
294 pte_val(pte
) &= ~(_PAGE_WRITE
| _PAGE_SILENT_WRITE
);
298 extern inline pte_t
pte_rdprotect(pte_t pte
)
300 pte_val(pte
) &= ~(_PAGE_READ
| _PAGE_SILENT_READ
);
304 extern inline pte_t
pte_mkclean(pte_t pte
)
306 pte_val(pte
) &= ~(_PAGE_MODIFIED
|_PAGE_SILENT_WRITE
);
310 extern inline pte_t
pte_mkold(pte_t pte
)
312 pte_val(pte
) &= ~(_PAGE_ACCESSED
|_PAGE_SILENT_READ
);
316 extern inline pte_t
pte_mkwrite(pte_t pte
)
318 pte_val(pte
) |= _PAGE_WRITE
;
319 if (pte_val(pte
) & _PAGE_MODIFIED
)
320 pte_val(pte
) |= _PAGE_SILENT_WRITE
;
324 extern inline pte_t
pte_mkread(pte_t pte
)
326 pte_val(pte
) |= _PAGE_READ
;
327 if (pte_val(pte
) & _PAGE_ACCESSED
)
328 pte_val(pte
) |= _PAGE_SILENT_READ
;
332 extern inline pte_t
pte_mkdirty(pte_t pte
)
334 pte_val(pte
) |= _PAGE_MODIFIED
;
335 if (pte_val(pte
) & _PAGE_WRITE
)
336 pte_val(pte
) |= _PAGE_SILENT_WRITE
;
340 extern inline pte_t
pte_mkyoung(pte_t pte
)
342 pte_val(pte
) |= _PAGE_ACCESSED
;
343 if (pte_val(pte
) & _PAGE_READ
)
344 pte_val(pte
) |= _PAGE_SILENT_READ
;
349 * Conversion functions: convert a page and protection to a page entry,
350 * and a page entry and page directory to the page they refer to.
352 extern inline pte_t
mk_pte(unsigned long page
, pgprot_t pgprot
)
354 return __pte(((page
& PAGE_MASK
) - PAGE_OFFSET
) | pgprot_val(pgprot
));
357 extern inline pte_t
mk_pte_phys(unsigned long physpage
, pgprot_t pgprot
)
359 return __pte(physpage
| pgprot_val(pgprot
));
362 extern inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
364 return __pte((pte_val(pte
) & _PAGE_CHG_MASK
) | pgprot_val(newprot
));
367 /* to find an entry in a kernel page-table-directory */
368 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
370 /* to find an entry in a page-table-directory */
371 extern inline pgd_t
*pgd_offset(struct mm_struct
*mm
, unsigned long address
)
373 return mm
->pgd
+ (address
>> PGDIR_SHIFT
);
376 /* Find an entry in the second-level page table.. */
377 extern inline pmd_t
*pmd_offset(pgd_t
*dir
, unsigned long address
)
379 return (pmd_t
*) dir
;
382 /* Find an entry in the third-level page table.. */
383 extern inline pte_t
*pte_offset(pmd_t
* dir
, unsigned long address
)
385 return (pte_t
*) (pmd_page(*dir
)) +
386 ((address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1));
390 * Initialize new page directory with pointers to invalid ptes
392 extern void (*pgd_init
)(unsigned long page
);
395 * Allocate and free page tables. The xxx_kernel() versions are
396 * used to allocate a kernel page table - this turns on ASN bits
400 #define pgd_quicklist (current_cpu_data.pgd_quick)
401 #define pmd_quicklist ((unsigned long *)0)
402 #define pte_quicklist (current_cpu_data.pte_quick)
403 #define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
405 extern __inline__ pgd_t
*get_pgd_slow(void)
407 pgd_t
*ret
= (pgd_t
*)__get_free_page(GFP_KERNEL
), *init
;
410 init
= pgd_offset(&init_mm
, 0);
411 pgd_init((unsigned long)ret
);
412 memcpy (ret
+ USER_PTRS_PER_PGD
, init
+ USER_PTRS_PER_PGD
,
413 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
418 extern __inline__ pgd_t
*get_pgd_fast(void)
422 if((ret
= pgd_quicklist
) != NULL
) {
423 pgd_quicklist
= (unsigned long *)(*ret
);
425 pgtable_cache_size
--;
427 ret
= (unsigned long *)get_pgd_slow();
431 extern __inline__
void free_pgd_fast(pgd_t
*pgd
)
433 *(unsigned long *)pgd
= (unsigned long) pgd_quicklist
;
434 pgd_quicklist
= (unsigned long *) pgd
;
435 pgtable_cache_size
++;
438 extern __inline__
void free_pgd_slow(pgd_t
*pgd
)
440 free_page((unsigned long)pgd
);
443 extern pte_t
*get_pte_slow(pmd_t
*pmd
, unsigned long address_preadjusted
);
444 extern pte_t
*get_pte_kernel_slow(pmd_t
*pmd
, unsigned long address_preadjusted
);
446 extern __inline__ pte_t
*get_pte_fast(void)
450 if((ret
= (unsigned long *)pte_quicklist
) != NULL
) {
451 pte_quicklist
= (unsigned long *)(*ret
);
453 pgtable_cache_size
--;
458 extern __inline__
void free_pte_fast(pte_t
*pte
)
460 *(unsigned long *)pte
= (unsigned long) pte_quicklist
;
461 pte_quicklist
= (unsigned long *) pte
;
462 pgtable_cache_size
++;
465 extern __inline__
void free_pte_slow(pte_t
*pte
)
467 free_page((unsigned long)pte
);
470 /* We don't use pmd cache, so these are dummy routines */
471 extern __inline__ pmd_t
*get_pmd_fast(void)
476 extern __inline__
void free_pmd_fast(pmd_t
*pmd
)
480 extern __inline__
void free_pmd_slow(pmd_t
*pmd
)
484 extern void __bad_pte(pmd_t
*pmd
);
485 extern void __bad_pte_kernel(pmd_t
*pmd
);
487 #define pte_free_kernel(pte) free_pte_fast(pte)
488 #define pte_free(pte) free_pte_fast(pte)
489 #define pgd_free(pgd) free_pgd_fast(pgd)
490 #define pgd_alloc() get_pgd_fast()
492 extern inline pte_t
* pte_alloc_kernel(pmd_t
* pmd
, unsigned long address
)
494 address
= (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
496 if (pmd_none(*pmd
)) {
497 pte_t
*page
= get_pte_fast();
499 pmd_val(*pmd
) = (unsigned long)page
;
500 return page
+ address
;
502 return get_pte_kernel_slow(pmd
, address
);
505 __bad_pte_kernel(pmd
);
508 return (pte_t
*) pmd_page(*pmd
) + address
;
511 extern inline pte_t
* pte_alloc(pmd_t
* pmd
, unsigned long address
)
513 address
= (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
515 if (pmd_none(*pmd
)) {
516 pte_t
*page
= get_pte_fast();
518 pmd_val(*pmd
) = (unsigned long)page
;
519 return page
+ address
;
521 return get_pte_slow(pmd
, address
);
527 return (pte_t
*) pmd_page(*pmd
) + address
;
531 * allocating and freeing a pmd is trivial: the 1-entry pmd is
532 * inside the pgd, so has no extra memory associated with it.
534 extern inline void pmd_free(pmd_t
* pmd
)
538 extern inline pmd_t
* pmd_alloc(pgd_t
* pgd
, unsigned long address
)
540 return (pmd_t
*) pgd
;
543 #define pmd_free_kernel pmd_free
544 #define pmd_alloc_kernel pmd_alloc
546 extern int do_check_pgt_cache(int, int);
548 extern inline void set_pgdir(unsigned long address
, pgd_t entry
)
550 struct task_struct
* p
;
556 read_lock(&tasklist_lock
);
560 *pgd_offset(p
->mm
,address
) = entry
;
562 read_unlock(&tasklist_lock
);
564 for (pgd
= (pgd_t
*)pgd_quicklist
; pgd
; pgd
= (pgd_t
*)*(unsigned long *)pgd
)
565 pgd
[address
>> PGDIR_SHIFT
] = entry
;
567 /* To pgd_alloc/pgd_free, one holds master kernel lock and so does our
568 callee, so we can modify pgd caches of other CPUs as well. -jj */
569 for (i
= 0; i
< NR_CPUS
; i
++)
570 for (pgd
= (pgd_t
*)cpu_data
[i
].pgd_quick
; pgd
; pgd
= (pgd_t
*)*(unsigned long *)pgd
)
571 pgd
[address
>> PGDIR_SHIFT
] = entry
;
575 extern pgd_t swapper_pg_dir
[1024];
577 extern void (*update_mmu_cache
)(struct vm_area_struct
*vma
,
578 unsigned long address
, pte_t pte
);
581 * Kernel with 32 bit address space
583 #define SWP_TYPE(entry) (((entry) >> 1) & 0x3f)
584 #define SWP_OFFSET(entry) ((entry) >> 8)
585 #define SWP_ENTRY(type,offset) (((type) << 1) | ((offset) << 8))
587 #define module_map vmalloc
588 #define module_unmap vfree
590 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
591 #define PageSkip(page) (0)
592 #define kern_addr_valid(addr) (1)
594 /* TLB operations. */
595 extern inline void tlb_probe(void)
597 __asm__
__volatile__(
603 extern inline void tlb_read(void)
605 __asm__
__volatile__(
611 extern inline void tlb_write_indexed(void)
613 __asm__
__volatile__(
619 extern inline void tlb_write_random(void)
621 __asm__
__volatile__(
627 /* Dealing with various CP0 mmu/cache related registers. */
629 /* CP0_PAGEMASK register */
630 extern inline unsigned long get_pagemask(void)
634 __asm__
__volatile__(
644 extern inline void set_pagemask(unsigned long val
)
646 __asm__
__volatile__(
655 /* CP0_ENTRYLO0 and CP0_ENTRYLO1 registers */
656 extern inline unsigned long get_entrylo0(void)
660 __asm__
__volatile__(
670 extern inline void set_entrylo0(unsigned long val
)
672 __asm__
__volatile__(
681 extern inline unsigned long get_entrylo1(void)
685 __asm__
__volatile__(
690 ".set reorder" : "=r" (val
));
695 extern inline void set_entrylo1(unsigned long val
)
697 __asm__
__volatile__(
706 /* CP0_ENTRYHI register */
707 extern inline unsigned long get_entryhi(void)
711 __asm__
__volatile__(
722 extern inline void set_entryhi(unsigned long val
)
724 __asm__
__volatile__(
733 /* CP0_INDEX register */
734 extern inline unsigned long get_index(void)
738 __asm__
__volatile__(
748 extern inline void set_index(unsigned long val
)
750 __asm__
__volatile__(
759 /* CP0_WIRED register */
760 extern inline unsigned long get_wired(void)
764 __asm__
__volatile__(
774 extern inline void set_wired(unsigned long val
)
776 __asm__
__volatile__(
777 "\n\t.set noreorder\n\t"
785 /* CP0_TAGLO and CP0_TAGHI registers */
786 extern inline unsigned long get_taglo(void)
790 __asm__
__volatile__(
800 extern inline void set_taglo(unsigned long val
)
802 __asm__
__volatile__(
811 extern inline unsigned long get_taghi(void)
815 __asm__
__volatile__(
825 extern inline void set_taghi(unsigned long val
)
827 __asm__
__volatile__(
836 /* CP0_CONTEXT register */
837 extern inline unsigned long get_context(void)
841 __asm__
__volatile__(
852 extern inline void set_context(unsigned long val
)
854 __asm__
__volatile__(
863 #endif /* !defined (_LANGUAGE_ASSEMBLY) */
865 #endif /* __ASM_MIPS_PGTABLE_H */