[PATCH] rcutorture: add call_rcu_bh() operations
[linux-2.6/mini2440.git] / include / asm-powerpc / page.h
blobfb597b37c2a2f0deaeadf8cf303105f632fb9aa6
1 #ifndef _ASM_POWERPC_PAGE_H
2 #define _ASM_POWERPC_PAGE_H
4 /*
5 * Copyright (C) 2001,2005 IBM Corporation.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #ifdef __KERNEL__
14 #include <asm/asm-compat.h>
15 #include <asm/kdump.h>
18 * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software
19 * page size. When using 64K pages however, whether we are really supporting
20 * 64K pages in HW or not is irrelevant to those definitions.
22 #ifdef CONFIG_PPC_64K_PAGES
23 #define PAGE_SHIFT 16
24 #else
25 #define PAGE_SHIFT 12
26 #endif
28 #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
30 /* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
31 #define __HAVE_ARCH_GATE_AREA 1
34 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
35 * assign PAGE_MASK to a larger type it gets extended the way we want
36 * (i.e. with 1s in the high bits)
38 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
41 * KERNELBASE is the virtual address of the start of the kernel, it's often
42 * the same as PAGE_OFFSET, but _might not be_.
44 * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
46 * To get a physical address from a virtual one you subtract PAGE_OFFSET,
47 * _not_ KERNELBASE.
49 * If you want to know something's offset from the start of the kernel you
50 * should subtract KERNELBASE.
52 * If you want to test if something's a kernel address, use is_kernel_addr().
55 #define PAGE_OFFSET ASM_CONST(CONFIG_KERNEL_START)
56 #define KERNELBASE (PAGE_OFFSET + PHYSICAL_START)
58 #ifdef CONFIG_DISCONTIGMEM
59 #define page_to_pfn(page) discontigmem_page_to_pfn(page)
60 #define pfn_to_page(pfn) discontigmem_pfn_to_page(pfn)
61 #define pfn_valid(pfn) discontigmem_pfn_valid(pfn)
62 #endif
64 #ifdef CONFIG_FLATMEM
65 #define pfn_valid(pfn) ((pfn) < max_mapnr)
66 #endif
68 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
69 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
70 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
72 #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
73 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
76 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
77 * and needs to be executable. This means the whole heap ends
78 * up being executable.
80 #define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
81 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
83 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
84 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
86 #ifdef __powerpc64__
87 #include <asm/page_64.h>
88 #else
89 #include <asm/page_32.h>
90 #endif
92 /* align addr on a size boundary - adjust address up/down if needed */
93 #define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
94 #define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
96 /* align addr on a size boundary - adjust address up if needed */
97 #define _ALIGN(addr,size) _ALIGN_UP(addr,size)
99 /* to align the pointer to the (next) page boundary */
100 #define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
103 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
104 * "kernelness", use is_kernel_addr() - it should do what you want.
106 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
108 #ifndef __ASSEMBLY__
110 #undef STRICT_MM_TYPECHECKS
112 #ifdef STRICT_MM_TYPECHECKS
113 /* These are used to make use of C type-checking. */
115 /* PTE level */
116 typedef struct { pte_basic_t pte; } pte_t;
117 #define pte_val(x) ((x).pte)
118 #define __pte(x) ((pte_t) { (x) })
120 /* 64k pages additionally define a bigger "real PTE" type that gathers
121 * the "second half" part of the PTE for pseudo 64k pages
123 #ifdef CONFIG_PPC_64K_PAGES
124 typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
125 #else
126 typedef struct { pte_t pte; } real_pte_t;
127 #endif
129 /* PMD level */
130 typedef struct { unsigned long pmd; } pmd_t;
131 #define pmd_val(x) ((x).pmd)
132 #define __pmd(x) ((pmd_t) { (x) })
134 /* PUD level exusts only on 4k pages */
135 #ifndef CONFIG_PPC_64K_PAGES
136 typedef struct { unsigned long pud; } pud_t;
137 #define pud_val(x) ((x).pud)
138 #define __pud(x) ((pud_t) { (x) })
139 #endif
141 /* PGD level */
142 typedef struct { unsigned long pgd; } pgd_t;
143 #define pgd_val(x) ((x).pgd)
144 #define __pgd(x) ((pgd_t) { (x) })
146 /* Page protection bits */
147 typedef struct { unsigned long pgprot; } pgprot_t;
148 #define pgprot_val(x) ((x).pgprot)
149 #define __pgprot(x) ((pgprot_t) { (x) })
151 #else
154 * .. while these make it easier on the compiler
157 typedef pte_basic_t pte_t;
158 #define pte_val(x) (x)
159 #define __pte(x) (x)
161 #ifdef CONFIG_PPC_64K_PAGES
162 typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
163 #else
164 typedef unsigned long real_pte_t;
165 #endif
168 typedef unsigned long pmd_t;
169 #define pmd_val(x) (x)
170 #define __pmd(x) (x)
172 #if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_64K_PAGES)
173 typedef unsigned long pud_t;
174 #define pud_val(x) (x)
175 #define __pud(x) (x)
176 #endif
178 typedef unsigned long pgd_t;
179 #define pgd_val(x) (x)
180 #define pgprot_val(x) (x)
182 typedef unsigned long pgprot_t;
183 #define __pgd(x) (x)
184 #define __pgprot(x) (x)
186 #endif
188 struct page;
189 extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
190 extern void copy_user_page(void *to, void *from, unsigned long vaddr,
191 struct page *p);
192 extern int page_is_ram(unsigned long pfn);
194 struct vm_area_struct;
195 extern const char *arch_vma_name(struct vm_area_struct *vma);
197 #include <asm-generic/memory_model.h>
198 #endif /* __ASSEMBLY__ */
200 #endif /* __KERNEL__ */
202 #endif /* _ASM_POWERPC_PAGE_H */