1 #ifndef _ASM_POWERPC_PGTABLE_H
2 #define _ASM_POWERPC_PGTABLE_H
6 #include <asm/processor.h> /* For TASK_SIZE */
10 #endif /* !__ASSEMBLY__ */
12 #if defined(CONFIG_PPC64)
13 # include <asm/pgtable-ppc64.h>
15 # include <asm/pgtable-ppc32.h>
20 * ZERO_PAGE is a global shared page that is always zero: used
21 * for zero-mapped memory areas etc..
23 extern unsigned long empty_zero_page
[];
24 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
26 extern pgd_t swapper_pg_dir
[];
28 extern void paging_init(void);
31 * kern_addr_valid is intended to indicate whether an address is a valid
32 * kernel address. Most 32-bit archs define it as always true (like this)
33 * but most 64-bit archs actually perform a test. What should we do here?
35 #define kern_addr_valid(addr) (1)
37 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
38 remap_pfn_range(vma, vaddr, pfn, size, prot)
40 #include <asm-generic/pgtable.h>
44 * This gets called at the end of handling a page fault, when
45 * the kernel has put a new PTE into the page table for the process.
46 * We use it to ensure coherency between the i-cache and d-cache
47 * for the page which has just been mapped in.
48 * On machines which use an MMU hash table, we use this to put a
49 * corresponding HPTE into the hash table ahead of time, instead of
50 * waiting for the inevitable extra hash-table miss exception.
52 extern void update_mmu_cache(struct vm_area_struct
*, unsigned long, pte_t
);
54 #endif /* __ASSEMBLY__ */
56 #endif /* __KERNEL__ */
57 #endif /* _ASM_POWERPC_PGTABLE_H */