1 #ifndef _I386_PGTABLE_H
2 #define _I386_PGTABLE_H
6 * The Linux memory management assumes a three-level page table setup. On
7 * the i386, we use that, but "fold" the mid level into the top-level page
8 * table, so that we physically have the same two-level page table as the
11 * This file contains the functions and defines necessary to modify and use
12 * the i386 page table tree.
15 #include <asm/processor.h>
16 #include <asm/fixmap.h>
17 #include <linux/threads.h>
19 #ifndef _I386_BITOPS_H
20 #include <asm/bitops.h>
23 #include <linux/slab.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
28 struct vm_area_struct
;
31 * ZERO_PAGE is a global shared page that is always zero: used
32 * for zero-mapped memory areas etc..
34 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
35 extern unsigned long empty_zero_page
[1024];
36 extern pgd_t swapper_pg_dir
[1024];
37 extern kmem_cache_t
*pgd_cache
;
38 extern kmem_cache_t
*pmd_cache
;
39 extern spinlock_t pgd_lock
;
40 extern struct page
*pgd_list
;
42 void pmd_ctor(void *, kmem_cache_t
*, unsigned long);
43 void pgd_ctor(void *, kmem_cache_t
*, unsigned long);
44 void pgd_dtor(void *, kmem_cache_t
*, unsigned long);
45 void pgtable_cache_init(void);
46 void paging_init(void);
49 * The Linux x86 paging architecture is 'compile-time dual-mode', it
50 * implements both the traditional 2-level x86 page tables and the
51 * newer 3-level PAE-mode page tables.
54 # include <asm/pgtable-3level-defs.h>
55 # define PMD_SIZE (1UL << PMD_SHIFT)
56 # define PMD_MASK (~(PMD_SIZE-1))
58 # include <asm/pgtable-2level-defs.h>
61 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
62 #define PGDIR_MASK (~(PGDIR_SIZE-1))
64 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
65 #define FIRST_USER_ADDRESS 0
67 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
68 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
70 #define TWOLEVEL_PGDIR_SHIFT 22
71 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
72 #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
74 /* Just any arbitrary offset to the start of the vmalloc VM area: the
75 * current 8MB value just means that there will be a 8MB "hole" after the
76 * physical memory until the kernel virtual memory starts. That means that
77 * any out-of-bounds memory accesses will hopefully be caught.
78 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
79 * area for the same reason. ;)
81 #define VMALLOC_OFFSET (8*1024*1024)
82 #define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \
83 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
85 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
87 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
91 * _PAGE_PSE set in the page directory entry just means that
92 * the page directory entry points directly to a 4MB-aligned block of
95 #define _PAGE_BIT_PRESENT 0
96 #define _PAGE_BIT_RW 1
97 #define _PAGE_BIT_USER 2
98 #define _PAGE_BIT_PWT 3
99 #define _PAGE_BIT_PCD 4
100 #define _PAGE_BIT_ACCESSED 5
101 #define _PAGE_BIT_DIRTY 6
102 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
103 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
104 #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
105 #define _PAGE_BIT_UNUSED2 10
106 #define _PAGE_BIT_UNUSED3 11
107 #define _PAGE_BIT_NX 63
109 #define _PAGE_PRESENT 0x001
110 #define _PAGE_RW 0x002
111 #define _PAGE_USER 0x004
112 #define _PAGE_PWT 0x008
113 #define _PAGE_PCD 0x010
114 #define _PAGE_ACCESSED 0x020
115 #define _PAGE_DIRTY 0x040
116 #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
117 #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
118 #define _PAGE_UNUSED1 0x200 /* available for programmer */
119 #define _PAGE_UNUSED2 0x400
120 #define _PAGE_UNUSED3 0x800
122 /* If _PAGE_PRESENT is clear, we use these: */
123 #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
124 #define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE;
125 pte_present gives true */
126 #ifdef CONFIG_X86_PAE
127 #define _PAGE_NX (1ULL<<_PAGE_BIT_NX)
132 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
133 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
134 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
137 __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
138 #define PAGE_SHARED \
139 __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
141 #define PAGE_SHARED_EXEC \
142 __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
143 #define PAGE_COPY_NOEXEC \
144 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
145 #define PAGE_COPY_EXEC \
146 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
149 #define PAGE_READONLY \
150 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
151 #define PAGE_READONLY_EXEC \
152 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
154 #define _PAGE_KERNEL \
155 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
156 #define _PAGE_KERNEL_EXEC \
157 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
159 extern unsigned long long __PAGE_KERNEL
, __PAGE_KERNEL_EXEC
;
160 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
161 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
162 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
163 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
165 #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
166 #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
167 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
168 #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
169 #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
170 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
173 * The i386 can't do page protection for execute, and considers that
174 * the same are read. Also, write permissions imply read permissions.
175 * This is the closest we can get..
177 #define __P000 PAGE_NONE
178 #define __P001 PAGE_READONLY
179 #define __P010 PAGE_COPY
180 #define __P011 PAGE_COPY
181 #define __P100 PAGE_READONLY_EXEC
182 #define __P101 PAGE_READONLY_EXEC
183 #define __P110 PAGE_COPY_EXEC
184 #define __P111 PAGE_COPY_EXEC
186 #define __S000 PAGE_NONE
187 #define __S001 PAGE_READONLY
188 #define __S010 PAGE_SHARED
189 #define __S011 PAGE_SHARED
190 #define __S100 PAGE_READONLY_EXEC
191 #define __S101 PAGE_READONLY_EXEC
192 #define __S110 PAGE_SHARED_EXEC
193 #define __S111 PAGE_SHARED_EXEC
196 * Define this if things work differently on an i386 and an i486:
197 * it will (on an i486) warn about kernel memory accesses that are
198 * done without a 'access_ok(VERIFY_WRITE,..)'
200 #undef TEST_ACCESS_OK
202 /* The boot page tables (all created as a single array) */
203 extern unsigned long pg0
[];
205 #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
207 /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
208 #define pmd_none(x) (!(unsigned long)pmd_val(x))
209 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
210 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
213 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
216 * The following only work if pte_present() is true.
217 * Undefined behaviour if not..
219 static inline int pte_user(pte_t pte
) { return (pte
).pte_low
& _PAGE_USER
; }
220 static inline int pte_read(pte_t pte
) { return (pte
).pte_low
& _PAGE_USER
; }
221 static inline int pte_dirty(pte_t pte
) { return (pte
).pte_low
& _PAGE_DIRTY
; }
222 static inline int pte_young(pte_t pte
) { return (pte
).pte_low
& _PAGE_ACCESSED
; }
223 static inline int pte_write(pte_t pte
) { return (pte
).pte_low
& _PAGE_RW
; }
224 static inline int pte_huge(pte_t pte
) { return (pte
).pte_low
& _PAGE_PSE
; }
227 * The following only works if pte_present() is not true.
229 static inline int pte_file(pte_t pte
) { return (pte
).pte_low
& _PAGE_FILE
; }
231 static inline pte_t
pte_rdprotect(pte_t pte
) { (pte
).pte_low
&= ~_PAGE_USER
; return pte
; }
232 static inline pte_t
pte_exprotect(pte_t pte
) { (pte
).pte_low
&= ~_PAGE_USER
; return pte
; }
233 static inline pte_t
pte_mkclean(pte_t pte
) { (pte
).pte_low
&= ~_PAGE_DIRTY
; return pte
; }
234 static inline pte_t
pte_mkold(pte_t pte
) { (pte
).pte_low
&= ~_PAGE_ACCESSED
; return pte
; }
235 static inline pte_t
pte_wrprotect(pte_t pte
) { (pte
).pte_low
&= ~_PAGE_RW
; return pte
; }
236 static inline pte_t
pte_mkread(pte_t pte
) { (pte
).pte_low
|= _PAGE_USER
; return pte
; }
237 static inline pte_t
pte_mkexec(pte_t pte
) { (pte
).pte_low
|= _PAGE_USER
; return pte
; }
238 static inline pte_t
pte_mkdirty(pte_t pte
) { (pte
).pte_low
|= _PAGE_DIRTY
; return pte
; }
239 static inline pte_t
pte_mkyoung(pte_t pte
) { (pte
).pte_low
|= _PAGE_ACCESSED
; return pte
; }
240 static inline pte_t
pte_mkwrite(pte_t pte
) { (pte
).pte_low
|= _PAGE_RW
; return pte
; }
241 static inline pte_t
pte_mkhuge(pte_t pte
) { (pte
).pte_low
|= _PAGE_PSE
; return pte
; }
243 #ifdef CONFIG_X86_PAE
244 # include <asm/pgtable-3level.h>
246 # include <asm/pgtable-2level.h>
250 * We only update the dirty/accessed state if we set
251 * the dirty bit by hand in the kernel, since the hardware
252 * will do the accessed bit for us, and we don't want to
253 * race with other CPU's that might be updating the dirty
254 * bit at the same time.
256 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
257 #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
260 (ptep)->pte_low = (entry).pte_low; \
261 flush_tlb_page(vma, address); \
265 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
266 static inline int ptep_test_and_clear_dirty(struct vm_area_struct
*vma
, unsigned long addr
, pte_t
*ptep
)
268 if (!pte_dirty(*ptep
))
270 return test_and_clear_bit(_PAGE_BIT_DIRTY
, &ptep
->pte_low
);
273 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
274 static inline int ptep_test_and_clear_young(struct vm_area_struct
*vma
, unsigned long addr
, pte_t
*ptep
)
276 if (!pte_young(*ptep
))
278 return test_and_clear_bit(_PAGE_BIT_ACCESSED
, &ptep
->pte_low
);
281 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
282 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
, int full
)
287 pte_clear(mm
, addr
, ptep
);
289 pte
= ptep_get_and_clear(mm
, addr
, ptep
);
294 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
295 static inline void ptep_set_wrprotect(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
297 clear_bit(_PAGE_BIT_RW
, &ptep
->pte_low
);
301 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
303 * dst - pointer to pgd range anwhere on a pgd page
305 * count - the number of pgds to copy.
307 * dst and src can be on the same page, but the range must not overlap,
308 * and must not cross a page boundary.
310 static inline void clone_pgd_range(pgd_t
*dst
, pgd_t
*src
, int count
)
312 memcpy(dst
, src
, count
* sizeof(pgd_t
));
316 * Macro to mark a page protection value as "uncacheable". On processors which do not support
317 * it, this is a no-op.
319 #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
320 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
323 * Conversion functions: convert a page and protection to a page entry,
324 * and a page entry and page directory to the page they refer to.
327 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
329 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
331 pte
.pte_low
&= _PAGE_CHG_MASK
;
332 pte
.pte_low
|= pgprot_val(newprot
);
333 #ifdef CONFIG_X86_PAE
335 * Chop off the NX bit (if present), and add the NX portion of
336 * the newprot (if present):
338 pte
.pte_high
&= ~(1 << (_PAGE_BIT_NX
- 32));
339 pte
.pte_high
|= (pgprot_val(newprot
) >> 32) & \
340 (__supported_pte_mask
>> 32);
345 #define pmd_large(pmd) \
346 ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
349 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
351 * this macro returns the index of the entry in the pgd page which would
352 * control the given virtual address
354 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
355 #define pgd_index_k(addr) pgd_index(addr)
358 * pgd_offset() returns a (pgd_t *)
359 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
361 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
364 * a shortcut which implies the use of the kernel's pgd, instead
367 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
370 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
372 * this macro returns the index of the entry in the pmd page which would
373 * control the given virtual address
375 #define pmd_index(address) \
376 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
379 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
381 * this macro returns the index of the entry in the pte page which would
382 * control the given virtual address
384 #define pte_index(address) \
385 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
386 #define pte_offset_kernel(dir, address) \
387 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
389 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
391 #define pmd_page_vaddr(pmd) \
392 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
395 * Helper function that returns the kernel pagetable entry controlling
396 * the virtual address 'address'. NULL means no pagetable entry present.
397 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
400 extern pte_t
*lookup_address(unsigned long address
);
403 * Make a given kernel text page executable/non-executable.
404 * Returns the previous executability setting of that page (which
405 * is used to restore the previous state). Used by the SMP bootup code.
406 * NOTE: this is an __init function for security reasons.
408 #ifdef CONFIG_X86_PAE
409 extern int set_kernel_exec(unsigned long vaddr
, int enable
);
411 static inline int set_kernel_exec(unsigned long vaddr
, int enable
) { return 0;}
414 #if defined(CONFIG_HIGHPTE)
415 #define pte_offset_map(dir, address) \
416 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
417 #define pte_offset_map_nested(dir, address) \
418 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
419 #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
420 #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
422 #define pte_offset_map(dir, address) \
423 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
424 #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
425 #define pte_unmap(pte) do { } while (0)
426 #define pte_unmap_nested(pte) do { } while (0)
430 * The i386 doesn't have any external MMU info: the kernel page
431 * tables contain all the necessary information.
433 #define update_mmu_cache(vma,address,pte) do { } while (0)
434 #endif /* !__ASSEMBLY__ */
436 #ifdef CONFIG_FLATMEM
437 #define kern_addr_valid(addr) (1)
438 #endif /* CONFIG_FLATMEM */
440 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
441 remap_pfn_range(vma, vaddr, pfn, size, prot)
443 #define MK_IOSPACE_PFN(space, pfn) (pfn)
444 #define GET_IOSPACE(pfn) 0
445 #define GET_PFN(pfn) (pfn)
447 #include <asm-generic/pgtable.h>
449 #endif /* _I386_PGTABLE_H */