Make HZ_TO_STD macro name lowercase.
[linux-2.6/linux-mips.git] / include / asm-i386 / pgtable.h
blobcadb5cbf756ab7b99ef4f565546165cc302add00
1 #ifndef _I386_PGTABLE_H
2 #define _I386_PGTABLE_H
4 #include <linux/config.h>
6 /*
7 * The Linux memory management assumes a three-level page table setup. On
8 * the i386, we use that, but "fold" the mid level into the top-level page
9 * table, so that we physically have the same two-level page table as the
10 * i386 mmu expects.
12 * This file contains the functions and defines necessary to modify and use
13 * the i386 page table tree.
15 #ifndef __ASSEMBLY__
16 #include <asm/processor.h>
17 #include <asm/fixmap.h>
18 #include <linux/threads.h>
20 extern pgd_t swapper_pg_dir[1024];
21 extern void paging_init(void);
23 /* Caches aren't brain-dead on the intel. */
24 #define flush_cache_all() do { } while (0)
25 #define flush_cache_mm(mm) do { } while (0)
26 #define flush_cache_range(mm, start, end) do { } while (0)
27 #define flush_cache_page(vma, vmaddr) do { } while (0)
28 #define flush_page_to_ram(page) do { } while (0)
29 #define flush_icache_range(start, end) do { } while (0)
30 #define flush_icache_page(vma,pg) do { } while (0)
32 #define __flush_tlb() \
33 do { \
34 unsigned int tmpreg; \
36 __asm__ __volatile__( \
37 "movl %%cr3, %0; # flush TLB \n" \
38 "movl %0, %%cr3; \n" \
39 : "=r" (tmpreg) \
40 :: "memory"); \
41 } while (0)
44 * Global pages have to be flushed a bit differently. Not a real
45 * performance problem because this does not happen often.
47 #define __flush_tlb_global() \
48 do { \
49 unsigned int tmpreg; \
51 __asm__ __volatile__( \
52 "movl %1, %%cr4; # turn off PGE \n" \
53 "movl %%cr3, %0; # flush TLB \n" \
54 "movl %0, %%cr3; \n" \
55 "movl %2, %%cr4; # turn PGE back on \n" \
56 : "=&r" (tmpreg) \
57 : "r" (mmu_cr4_features & ~X86_CR4_PGE), \
58 "r" (mmu_cr4_features) \
59 : "memory"); \
60 } while (0)
62 extern unsigned long pgkern_mask;
65 * Do not check the PGE bit unnecesserily if this is a PPro+ kernel.
67 #ifdef CONFIG_X86_PGE
68 # define __flush_tlb_all() __flush_tlb_global()
69 #else
70 # define __flush_tlb_all() \
71 do { \
72 if (cpu_has_pge) \
73 __flush_tlb_global(); \
74 else \
75 __flush_tlb(); \
76 } while (0)
77 #endif
79 #ifndef CONFIG_X86_INVLPG
80 #define __flush_tlb_one(addr) __flush_tlb()
81 #else
82 #define __flush_tlb_one(addr) \
83 __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
84 #endif
87 * ZERO_PAGE is a global shared page that is always zero: used
88 * for zero-mapped memory areas etc..
90 extern unsigned long empty_zero_page[1024];
91 #define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
93 #endif /* !__ASSEMBLY__ */
96 * The Linux x86 paging architecture is 'compile-time dual-mode', it
97 * implements both the traditional 2-level x86 page tables and the
98 * newer 3-level PAE-mode page tables.
100 #ifndef __ASSEMBLY__
101 #if CONFIG_X86_PAE
102 # include <asm/pgtable-3level.h>
103 #else
104 # include <asm/pgtable-2level.h>
105 #endif
106 #endif
108 #define __beep() asm("movb $0x3,%al; outb %al,$0x61")
110 #define PMD_SIZE (1UL << PMD_SHIFT)
111 #define PMD_MASK (~(PMD_SIZE-1))
112 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
113 #define PGDIR_MASK (~(PGDIR_SIZE-1))
115 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
116 #define FIRST_USER_PGD_NR 0
118 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
119 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
121 #define TWOLEVEL_PGDIR_SHIFT 22
122 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
123 #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
126 #ifndef __ASSEMBLY__
127 /* Just any arbitrary offset to the start of the vmalloc VM area: the
128 * current 8MB value just means that there will be a 8MB "hole" after the
129 * physical memory until the kernel virtual memory starts. That means that
130 * any out-of-bounds memory accesses will hopefully be caught.
131 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
132 * area for the same reason. ;)
134 #define VMALLOC_OFFSET (8*1024*1024)
135 #define VMALLOC_START (((unsigned long) high_memory + 2*VMALLOC_OFFSET-1) & \
136 ~(VMALLOC_OFFSET-1))
137 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
138 #define VMALLOC_END (FIXADDR_START)
141 * The 4MB page is guessing.. Detailed in the infamous "Chapter H"
142 * of the Pentium details, but assuming intel did the straightforward
143 * thing, this bit set in the page directory entry just means that
144 * the page directory entry points directly to a 4MB-aligned block of
145 * memory.
147 #define _PAGE_PRESENT 0x001
148 #define _PAGE_RW 0x002
149 #define _PAGE_USER 0x004
150 #define _PAGE_PWT 0x008
151 #define _PAGE_PCD 0x010
152 #define _PAGE_ACCESSED 0x020
153 #define _PAGE_DIRTY 0x040
154 #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
155 #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
157 #define _PAGE_PROTNONE 0x080 /* If not present */
159 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
160 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
161 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
163 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
164 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
165 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
166 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
168 #define __PAGE_KERNEL \
169 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
170 #define __PAGE_KERNEL_NOCACHE \
171 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
172 #define __PAGE_KERNEL_RO \
173 (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
175 #ifdef CONFIG_X86_PGE
176 # define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
177 #else
178 # define MAKE_GLOBAL(x) \
179 ({ \
180 pgprot_t __ret; \
182 if (cpu_has_pge) \
183 __ret = __pgprot((x) | _PAGE_GLOBAL); \
184 else \
185 __ret = __pgprot(x); \
186 __ret; \
188 #endif
190 #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
191 #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
192 #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
195 * The i386 can't do page protection for execute, and considers that
196 * the same are read. Also, write permissions imply read permissions.
197 * This is the closest we can get..
199 #define __P000 PAGE_NONE
200 #define __P001 PAGE_READONLY
201 #define __P010 PAGE_COPY
202 #define __P011 PAGE_COPY
203 #define __P100 PAGE_READONLY
204 #define __P101 PAGE_READONLY
205 #define __P110 PAGE_COPY
206 #define __P111 PAGE_COPY
208 #define __S000 PAGE_NONE
209 #define __S001 PAGE_READONLY
210 #define __S010 PAGE_SHARED
211 #define __S011 PAGE_SHARED
212 #define __S100 PAGE_READONLY
213 #define __S101 PAGE_READONLY
214 #define __S110 PAGE_SHARED
215 #define __S111 PAGE_SHARED
218 * Define this if things work differently on an i386 and an i486:
219 * it will (on an i486) warn about kernel memory accesses that are
220 * done without a 'verify_area(VERIFY_WRITE,..)'
222 #undef TEST_VERIFY_AREA
224 /* page table for 0-4MB for everybody */
225 extern unsigned long pg0[1024];
228 * Handling allocation failures during page table setup.
230 extern void __handle_bad_pmd(pmd_t * pmd);
231 extern void __handle_bad_pmd_kernel(pmd_t * pmd);
233 #define pte_none(x) (!pte_val(x))
234 #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
235 #define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0)
236 #define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PAGE_SHIFT)))
238 #define pmd_none(x) (!pmd_val(x))
239 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
240 #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
241 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
244 * Permanent address of a page. Obviously must never be
245 * called on a highmem page.
247 #define page_address(page) ({ if (!(page)->virtual) BUG(); (page)->virtual; })
248 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
249 #define pte_page(x) (mem_map+pte_pagenr(x))
252 * The following only work if pte_present() is true.
253 * Undefined behaviour if not..
255 extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
256 extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
257 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
258 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
259 extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
261 extern inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
262 extern inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
263 extern inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
264 extern inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
265 extern inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
266 extern inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
267 extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
268 extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
269 extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
270 extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
273 * Conversion functions: convert a page and protection to a page entry,
274 * and a page entry and page directory to the page they refer to.
277 #define mk_pte(page,pgprot) \
278 ({ \
279 pte_t __pte; \
281 set_pte(&__pte, __pte(((page)-mem_map) * \
282 (unsigned long long)PAGE_SIZE + pgprot_val(pgprot))); \
283 __pte; \
286 /* This takes a physical page address that is used by the remapping functions */
287 #define mk_pte_phys(physpage, pgprot) \
288 ({ pte_t __pte; set_pte(&__pte, __pte(physpage + pgprot_val(pgprot))); __pte; })
290 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
291 { set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
293 #define page_pte(page) page_pte_prot(page, __pgprot(0))
295 #define pmd_page(pmd) \
296 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
298 /* to find an entry in a page-table-directory. */
299 #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
301 #define __pgd_offset(address) pgd_index(address)
303 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
305 /* to find an entry in a kernel page-table-directory */
306 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
308 #define __pmd_offset(address) \
309 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
311 /* Find an entry in the third-level page table.. */
312 #define __pte_offset(address) \
313 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
314 #define pte_offset(dir, address) ((pte_t *) pmd_page(*(dir)) + \
315 __pte_offset(address))
318 * The i386 doesn't have any external MMU info: the kernel page
319 * tables contain all the necessary information.
321 #define update_mmu_cache(vma,address,pte) do { } while (0)
323 /* Encode and de-code a swap entry */
324 #define SWP_TYPE(x) (((x).val >> 1) & 0x3f)
325 #define SWP_OFFSET(x) ((x).val >> 8)
326 #define SWP_ENTRY(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
327 #define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
328 #define swp_entry_to_pte(x) ((pte_t) { (x).val })
330 #define module_map vmalloc
331 #define module_unmap vfree
333 #endif /* !__ASSEMBLY__ */
335 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
336 #define PageSkip(page) (0)
337 #define kern_addr_valid(addr) (1)
339 #define io_remap_page_range remap_page_range
341 #endif /* _I386_PGTABLE_H */