- David Miller: sparc and net updates. Fix merge_segments.
[davej-history.git] / include / asm-i386 / pgtable.h
blob5d7d5717a63c6e7aa3113747bbf64a5fafcb3200
1 #ifndef _I386_PGTABLE_H
2 #define _I386_PGTABLE_H
4 #include <linux/config.h>
6 /*
7 * The Linux memory management assumes a three-level page table setup. On
8 * the i386, we use that, but "fold" the mid level into the top-level page
9 * table, so that we physically have the same two-level page table as the
10 * i386 mmu expects.
12 * This file contains the functions and defines necessary to modify and use
13 * the i386 page table tree.
15 #ifndef __ASSEMBLY__
16 #include <asm/processor.h>
17 #include <asm/fixmap.h>
18 #include <linux/threads.h>
20 #ifndef _I386_BITOPS_H
21 #include <asm/bitops.h>
22 #endif
24 extern pgd_t swapper_pg_dir[1024];
25 extern void paging_init(void);
27 /* Caches aren't brain-dead on the intel. */
28 #define flush_cache_all() do { } while (0)
29 #define flush_cache_mm(mm) do { } while (0)
30 #define flush_cache_range(mm, start, end) do { } while (0)
31 #define flush_cache_page(vma, vmaddr) do { } while (0)
32 #define flush_page_to_ram(page) do { } while (0)
33 #define flush_dcache_page(page) do { } while (0)
34 #define flush_icache_range(start, end) do { } while (0)
35 #define flush_icache_page(vma,pg) do { } while (0)
37 #define __flush_tlb() \
38 do { \
39 unsigned int tmpreg; \
41 __asm__ __volatile__( \
42 "movl %%cr3, %0; # flush TLB \n" \
43 "movl %0, %%cr3; \n" \
44 : "=r" (tmpreg) \
45 :: "memory"); \
46 } while (0)
49 * Global pages have to be flushed a bit differently. Not a real
50 * performance problem because this does not happen often.
52 #define __flush_tlb_global() \
53 do { \
54 unsigned int tmpreg; \
56 __asm__ __volatile__( \
57 "movl %1, %%cr4; # turn off PGE \n" \
58 "movl %%cr3, %0; # flush TLB \n" \
59 "movl %0, %%cr3; \n" \
60 "movl %2, %%cr4; # turn PGE back on \n" \
61 : "=&r" (tmpreg) \
62 : "r" (mmu_cr4_features & ~X86_CR4_PGE), \
63 "r" (mmu_cr4_features) \
64 : "memory"); \
65 } while (0)
67 extern unsigned long pgkern_mask;
70 * Do not check the PGE bit unnecesserily if this is a PPro+ kernel.
72 #ifdef CONFIG_X86_PGE
73 # define __flush_tlb_all() __flush_tlb_global()
74 #else
75 # define __flush_tlb_all() \
76 do { \
77 if (cpu_has_pge) \
78 __flush_tlb_global(); \
79 else \
80 __flush_tlb(); \
81 } while (0)
82 #endif
84 #ifndef CONFIG_X86_INVLPG
85 #define __flush_tlb_one(addr) __flush_tlb()
86 #else
87 #define __flush_tlb_one(addr) \
88 __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
89 #endif
92 * ZERO_PAGE is a global shared page that is always zero: used
93 * for zero-mapped memory areas etc..
95 extern unsigned long empty_zero_page[1024];
96 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
98 #endif /* !__ASSEMBLY__ */
101 * The Linux x86 paging architecture is 'compile-time dual-mode', it
102 * implements both the traditional 2-level x86 page tables and the
103 * newer 3-level PAE-mode page tables.
105 #ifndef __ASSEMBLY__
106 #if CONFIG_X86_PAE
107 # include <asm/pgtable-3level.h>
108 #else
109 # include <asm/pgtable-2level.h>
110 #endif
111 #endif
113 #define __beep() asm("movb $0x3,%al; outb %al,$0x61")
115 #define PMD_SIZE (1UL << PMD_SHIFT)
116 #define PMD_MASK (~(PMD_SIZE-1))
117 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
118 #define PGDIR_MASK (~(PGDIR_SIZE-1))
120 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
121 #define FIRST_USER_PGD_NR 0
123 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
124 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
126 #define TWOLEVEL_PGDIR_SHIFT 22
127 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
128 #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
131 #ifndef __ASSEMBLY__
132 /* Just any arbitrary offset to the start of the vmalloc VM area: the
133 * current 8MB value just means that there will be a 8MB "hole" after the
134 * physical memory until the kernel virtual memory starts. That means that
135 * any out-of-bounds memory accesses will hopefully be caught.
136 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
137 * area for the same reason. ;)
139 #define VMALLOC_OFFSET (8*1024*1024)
140 #define VMALLOC_START (((unsigned long) high_memory + 2*VMALLOC_OFFSET-1) & \
141 ~(VMALLOC_OFFSET-1))
142 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
143 #define VMALLOC_END (FIXADDR_START)
146 * The 4MB page is guessing.. Detailed in the infamous "Chapter H"
147 * of the Pentium details, but assuming intel did the straightforward
148 * thing, this bit set in the page directory entry just means that
149 * the page directory entry points directly to a 4MB-aligned block of
150 * memory.
152 #define _PAGE_BIT_PRESENT 0
153 #define _PAGE_BIT_RW 1
154 #define _PAGE_BIT_USER 2
155 #define _PAGE_BIT_PWT 3
156 #define _PAGE_BIT_PCD 4
157 #define _PAGE_BIT_ACCESSED 5
158 #define _PAGE_BIT_DIRTY 6
159 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
160 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
162 #define _PAGE_PRESENT 0x001
163 #define _PAGE_RW 0x002
164 #define _PAGE_USER 0x004
165 #define _PAGE_PWT 0x008
166 #define _PAGE_PCD 0x010
167 #define _PAGE_ACCESSED 0x020
168 #define _PAGE_DIRTY 0x040
169 #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
170 #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
172 #define _PAGE_PROTNONE 0x080 /* If not present */
174 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
175 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
176 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
178 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
179 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
180 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
181 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
183 #define __PAGE_KERNEL \
184 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
185 #define __PAGE_KERNEL_NOCACHE \
186 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
187 #define __PAGE_KERNEL_RO \
188 (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
190 #ifdef CONFIG_X86_PGE
191 # define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
192 #else
193 # define MAKE_GLOBAL(x) \
194 ({ \
195 pgprot_t __ret; \
197 if (cpu_has_pge) \
198 __ret = __pgprot((x) | _PAGE_GLOBAL); \
199 else \
200 __ret = __pgprot(x); \
201 __ret; \
203 #endif
205 #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
206 #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
207 #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
210 * The i386 can't do page protection for execute, and considers that
211 * the same are read. Also, write permissions imply read permissions.
212 * This is the closest we can get..
214 #define __P000 PAGE_NONE
215 #define __P001 PAGE_READONLY
216 #define __P010 PAGE_COPY
217 #define __P011 PAGE_COPY
218 #define __P100 PAGE_READONLY
219 #define __P101 PAGE_READONLY
220 #define __P110 PAGE_COPY
221 #define __P111 PAGE_COPY
223 #define __S000 PAGE_NONE
224 #define __S001 PAGE_READONLY
225 #define __S010 PAGE_SHARED
226 #define __S011 PAGE_SHARED
227 #define __S100 PAGE_READONLY
228 #define __S101 PAGE_READONLY
229 #define __S110 PAGE_SHARED
230 #define __S111 PAGE_SHARED
233 * Define this if things work differently on an i386 and an i486:
234 * it will (on an i486) warn about kernel memory accesses that are
235 * done without a 'verify_area(VERIFY_WRITE,..)'
237 #undef TEST_VERIFY_AREA
239 /* page table for 0-4MB for everybody */
240 extern unsigned long pg0[1024];
243 * Handling allocation failures during page table setup.
245 extern void __handle_bad_pmd(pmd_t * pmd);
246 extern void __handle_bad_pmd_kernel(pmd_t * pmd);
248 #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
249 #define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0)
251 #define pmd_none(x) (!pmd_val(x))
252 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
253 #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
254 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
257 * Permanent address of a page. Obviously must never be
258 * called on a highmem page.
260 #define page_address(page) ((page)->virtual)
261 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
264 * The following only work if pte_present() is true.
265 * Undefined behaviour if not..
267 static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
268 static inline int pte_exec(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
269 static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
270 static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
271 static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
273 static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
274 static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
275 static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
276 static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
277 static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
278 static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
279 static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
280 static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
281 static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
282 static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
284 static inline int ptep_test_and_clear_dirty(pte_t *ptep) { return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep); }
285 static inline int ptep_test_and_clear_young(pte_t *ptep) { return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); }
286 static inline void ptep_set_wrprotect(pte_t *ptep) { clear_bit(_PAGE_BIT_RW, ptep); }
287 static inline void ptep_mkdirty(pte_t *ptep) { set_bit(_PAGE_BIT_RW, ptep); }
290 * Conversion functions: convert a page and protection to a page entry,
291 * and a page entry and page directory to the page they refer to.
294 #define mk_pte(page, pgprot) __mk_pte((page) - mem_map, (pgprot))
296 /* This takes a physical page address that is used by the remapping functions */
297 #define mk_pte_phys(physpage, pgprot) __mk_pte((physpage) >> PAGE_SHIFT, pgprot)
299 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
301 pte.pte_low &= _PAGE_CHG_MASK;
302 pte.pte_low |= pgprot_val(newprot);
303 return pte;
306 #define page_pte(page) page_pte_prot(page, __pgprot(0))
308 #define pmd_page(pmd) \
309 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
311 /* to find an entry in a page-table-directory. */
312 #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
314 #define __pgd_offset(address) pgd_index(address)
316 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
318 /* to find an entry in a kernel page-table-directory */
319 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
321 #define __pmd_offset(address) \
322 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
324 /* Find an entry in the third-level page table.. */
325 #define __pte_offset(address) \
326 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
327 #define pte_offset(dir, address) ((pte_t *) pmd_page(*(dir)) + \
328 __pte_offset(address))
331 * The i386 doesn't have any external MMU info: the kernel page
332 * tables contain all the necessary information.
334 #define update_mmu_cache(vma,address,pte) do { } while (0)
336 /* Encode and de-code a swap entry */
337 #define SWP_TYPE(x) (((x).val >> 1) & 0x3f)
338 #define SWP_OFFSET(x) ((x).val >> 8)
339 #define SWP_ENTRY(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
340 #define pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
341 #define swp_entry_to_pte(x) ((pte_t) { (x).val })
343 #endif /* !__ASSEMBLY__ */
345 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
346 #define PageSkip(page) (0)
347 #define kern_addr_valid(addr) (1)
349 #define io_remap_page_range remap_page_range
351 #endif /* _I386_PGTABLE_H */