1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
4 #define FIRST_USER_ADDRESS 0
6 #define _PAGE_BIT_PRESENT 0 /* is present */
7 #define _PAGE_BIT_RW 1 /* writeable */
8 #define _PAGE_BIT_USER 2 /* userspace addressable */
9 #define _PAGE_BIT_PWT 3 /* page write through */
10 #define _PAGE_BIT_PCD 4 /* page cache disabled */
11 #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
12 #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
13 #define _PAGE_BIT_FILE 6
14 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
15 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
16 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
17 #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
18 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
19 #define _PAGE_BIT_UNUSED3 11
20 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
21 #define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
22 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
23 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
25 #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
26 #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
27 #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
28 #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
29 #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
30 #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
31 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
32 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
33 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
34 #define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
35 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
36 #define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
37 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
38 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
39 #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
40 #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
41 #define __HAVE_ARCH_PTE_SPECIAL
43 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
44 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
46 #define _PAGE_NX (_AT(pteval_t, 0))
49 /* If _PAGE_PRESENT is clear, we use these: */
50 #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping,
51 * saved PTE; unset:swap */
52 #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
53 pte_present gives true */
55 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
56 _PAGE_ACCESSED | _PAGE_DIRTY)
57 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
60 /* Set of bits not changed in pte_modify */
61 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
62 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
64 #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
65 #define _PAGE_CACHE_WB (0)
66 #define _PAGE_CACHE_WC (_PAGE_PWT)
67 #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
68 #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
70 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
71 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
72 _PAGE_ACCESSED | _PAGE_NX)
74 #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
75 _PAGE_USER | _PAGE_ACCESSED)
76 #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
77 _PAGE_ACCESSED | _PAGE_NX)
78 #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
80 #define PAGE_COPY PAGE_COPY_NOEXEC
81 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
82 _PAGE_ACCESSED | _PAGE_NX)
83 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
86 #define __PAGE_KERNEL_EXEC \
87 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
88 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
90 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
91 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
92 #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
93 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
94 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
95 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
96 #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
97 #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
98 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
99 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
100 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
102 #define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP)
103 #define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
104 #define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
105 #define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP)
107 #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
108 #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
109 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
110 #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
111 #define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
112 #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
113 #define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
114 #define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
115 #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
116 #define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
117 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
118 #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
119 #define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
121 #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
122 #define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
123 #define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
124 #define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC)
127 #define __P000 PAGE_NONE
128 #define __P001 PAGE_READONLY
129 #define __P010 PAGE_COPY
130 #define __P011 PAGE_COPY
131 #define __P100 PAGE_READONLY_EXEC
132 #define __P101 PAGE_READONLY_EXEC
133 #define __P110 PAGE_COPY_EXEC
134 #define __P111 PAGE_COPY_EXEC
136 #define __S000 PAGE_NONE
137 #define __S001 PAGE_READONLY
138 #define __S010 PAGE_SHARED
139 #define __S011 PAGE_SHARED
140 #define __S100 PAGE_READONLY_EXEC
141 #define __S101 PAGE_READONLY_EXEC
142 #define __S110 PAGE_SHARED_EXEC
143 #define __S111 PAGE_SHARED_EXEC
146 * early identity mapping pte attrib macros.
149 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
152 * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
153 * bits are combined, this will alow user to access the high address mapped
154 * VDSO in the presence of CONFIG_COMPAT_VDSO
156 #define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
157 #define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
158 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
162 * Macro to mark a page protection value as UC-
164 #define pgprot_noncached(prot) \
165 ((boot_cpu_data.x86 > 3) \
166 ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
171 #define pgprot_writecombine pgprot_writecombine
172 extern pgprot_t
pgprot_writecombine(pgprot_t prot
);
175 * ZERO_PAGE is a global shared page that is always zero: used
176 * for zero-mapped memory areas etc..
178 extern unsigned long empty_zero_page
[PAGE_SIZE
/ sizeof(unsigned long)];
179 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
181 extern spinlock_t pgd_lock
;
182 extern struct list_head pgd_list
;
185 * The following only work if pte_present() is true.
186 * Undefined behaviour if not..
188 static inline int pte_dirty(pte_t pte
)
190 return pte_flags(pte
) & _PAGE_DIRTY
;
193 static inline int pte_young(pte_t pte
)
195 return pte_flags(pte
) & _PAGE_ACCESSED
;
198 static inline int pte_write(pte_t pte
)
200 return pte_flags(pte
) & _PAGE_RW
;
203 static inline int pte_file(pte_t pte
)
205 return pte_flags(pte
) & _PAGE_FILE
;
208 static inline int pte_huge(pte_t pte
)
210 return pte_flags(pte
) & _PAGE_PSE
;
213 static inline int pte_global(pte_t pte
)
215 return pte_flags(pte
) & _PAGE_GLOBAL
;
218 static inline int pte_exec(pte_t pte
)
220 return !(pte_flags(pte
) & _PAGE_NX
);
223 static inline int pte_special(pte_t pte
)
225 return pte_flags(pte
) & _PAGE_SPECIAL
;
228 static inline unsigned long pte_pfn(pte_t pte
)
230 return (pte_val(pte
) & PTE_PFN_MASK
) >> PAGE_SHIFT
;
233 static inline u64
pte_pa(pte_t pte
)
235 return pte_val(pte
) & PTE_PFN_MASK
;
238 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
240 static inline int pmd_large(pmd_t pte
)
242 return (pmd_val(pte
) & (_PAGE_PSE
| _PAGE_PRESENT
)) ==
243 (_PAGE_PSE
| _PAGE_PRESENT
);
246 static inline pte_t
pte_mkclean(pte_t pte
)
248 return __pte(pte_val(pte
) & ~_PAGE_DIRTY
);
251 static inline pte_t
pte_mkold(pte_t pte
)
253 return __pte(pte_val(pte
) & ~_PAGE_ACCESSED
);
256 static inline pte_t
pte_wrprotect(pte_t pte
)
258 return __pte(pte_val(pte
) & ~_PAGE_RW
);
261 static inline pte_t
pte_mkexec(pte_t pte
)
263 return __pte(pte_val(pte
) & ~_PAGE_NX
);
266 static inline pte_t
pte_mkdirty(pte_t pte
)
268 return __pte(pte_val(pte
) | _PAGE_DIRTY
);
271 static inline pte_t
pte_mkyoung(pte_t pte
)
273 return __pte(pte_val(pte
) | _PAGE_ACCESSED
);
276 static inline pte_t
pte_mkwrite(pte_t pte
)
278 return __pte(pte_val(pte
) | _PAGE_RW
);
281 static inline pte_t
pte_mkhuge(pte_t pte
)
283 return __pte(pte_val(pte
) | _PAGE_PSE
);
286 static inline pte_t
pte_clrhuge(pte_t pte
)
288 return __pte(pte_val(pte
) & ~_PAGE_PSE
);
291 static inline pte_t
pte_mkglobal(pte_t pte
)
293 return __pte(pte_val(pte
) | _PAGE_GLOBAL
);
296 static inline pte_t
pte_clrglobal(pte_t pte
)
298 return __pte(pte_val(pte
) & ~_PAGE_GLOBAL
);
301 static inline pte_t
pte_mkspecial(pte_t pte
)
303 return __pte(pte_val(pte
) | _PAGE_SPECIAL
);
306 extern pteval_t __supported_pte_mask
;
308 static inline pte_t
pfn_pte(unsigned long page_nr
, pgprot_t pgprot
)
310 return __pte((((phys_addr_t
)page_nr
<< PAGE_SHIFT
) |
311 pgprot_val(pgprot
)) & __supported_pte_mask
);
314 static inline pmd_t
pfn_pmd(unsigned long page_nr
, pgprot_t pgprot
)
316 return __pmd((((phys_addr_t
)page_nr
<< PAGE_SHIFT
) |
317 pgprot_val(pgprot
)) & __supported_pte_mask
);
320 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
322 pteval_t val
= pte_val(pte
);
325 * Chop off the NX bit (if present), and add the NX portion of
326 * the newprot (if present):
328 val
&= _PAGE_CHG_MASK
;
329 val
|= pgprot_val(newprot
) & (~_PAGE_CHG_MASK
) & __supported_pte_mask
;
334 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
335 #define pgprot_modify pgprot_modify
336 static inline pgprot_t
pgprot_modify(pgprot_t oldprot
, pgprot_t newprot
)
338 pgprotval_t preservebits
= pgprot_val(oldprot
) & _PAGE_CHG_MASK
;
339 pgprotval_t addbits
= pgprot_val(newprot
);
340 return __pgprot(preservebits
| addbits
);
343 #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
345 #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
347 /* Indicate that x86 has its own track and untrack pfn vma functions */
348 #define track_pfn_vma_new track_pfn_vma_new
349 #define track_pfn_vma_copy track_pfn_vma_copy
350 #define untrack_pfn_vma untrack_pfn_vma
353 #define __HAVE_PHYS_MEM_ACCESS_PROT
355 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
356 unsigned long size
, pgprot_t vma_prot
);
357 int phys_mem_access_prot_allowed(struct file
*file
, unsigned long pfn
,
358 unsigned long size
, pgprot_t
*vma_prot
);
361 /* Install a pte for a particular vaddr in kernel space. */
362 void set_pte_vaddr(unsigned long vaddr
, pte_t pte
);
365 extern void native_pagetable_setup_start(pgd_t
*base
);
366 extern void native_pagetable_setup_done(pgd_t
*base
);
368 static inline void native_pagetable_setup_start(pgd_t
*base
) {}
369 static inline void native_pagetable_setup_done(pgd_t
*base
) {}
373 extern void arch_report_meminfo(struct seq_file
*m
);
375 #ifdef CONFIG_PARAVIRT
376 #include <asm/paravirt.h>
377 #else /* !CONFIG_PARAVIRT */
378 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
379 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
381 #define set_pte_present(mm, addr, ptep, pte) \
382 native_set_pte_present(mm, addr, ptep, pte)
383 #define set_pte_atomic(ptep, pte) \
384 native_set_pte_atomic(ptep, pte)
386 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
388 #ifndef __PAGETABLE_PUD_FOLDED
389 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
390 #define pgd_clear(pgd) native_pgd_clear(pgd)
394 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
397 #ifndef __PAGETABLE_PMD_FOLDED
398 #define pud_clear(pud) native_pud_clear(pud)
401 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
402 #define pmd_clear(pmd) native_pmd_clear(pmd)
404 #define pte_update(mm, addr, ptep) do { } while (0)
405 #define pte_update_defer(mm, addr, ptep) do { } while (0)
407 static inline void __init
paravirt_pagetable_setup_start(pgd_t
*base
)
409 native_pagetable_setup_start(base
);
412 static inline void __init
paravirt_pagetable_setup_done(pgd_t
*base
)
414 native_pagetable_setup_done(base
);
416 #endif /* CONFIG_PARAVIRT */
418 #endif /* __ASSEMBLY__ */
421 # include "pgtable_32.h"
423 # include "pgtable_64.h"
427 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
429 * this macro returns the index of the entry in the pgd page which would
430 * control the given virtual address
432 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
435 * pgd_offset() returns a (pgd_t *)
436 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
438 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
440 * a shortcut which implies the use of the kernel's pgd, instead
443 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
446 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
447 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
459 #ifdef CONFIG_PROC_FS
460 extern void update_page_count(int level
, unsigned long pages
);
462 static inline void update_page_count(int level
, unsigned long pages
) { }
466 * Helper function that returns the kernel pagetable entry controlling
467 * the virtual address 'address'. NULL means no pagetable entry present.
468 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
471 extern pte_t
*lookup_address(unsigned long address
, unsigned int *level
);
473 /* local pte updates need not use xchg for locking */
474 static inline pte_t
native_local_ptep_get_and_clear(pte_t
*ptep
)
478 /* Pure native function needs no input for mm, addr */
479 native_pte_clear(NULL
, 0, ptep
);
483 static inline void native_set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
484 pte_t
*ptep
, pte_t pte
)
486 native_set_pte(ptep
, pte
);
489 #ifndef CONFIG_PARAVIRT
491 * Rules for using pte_update - it must be called after any PTE update which
492 * has not been done using the set_pte / clear_pte interfaces. It is used by
493 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
494 * updates should either be sets, clears, or set_pte_atomic for P->P
495 * transitions, which means this hook should only be called for user PTEs.
496 * This hook implies a P->P protection or access change has taken place, which
497 * requires a subsequent TLB flush. The notification can optionally be delayed
498 * until the TLB flush event by using the pte_update_defer form of the
499 * interface, but care must be taken to assure that the flush happens while
500 * still holding the same page table lock so that the shadow and primary pages
501 * do not become out of sync on SMP.
503 #define pte_update(mm, addr, ptep) do { } while (0)
504 #define pte_update_defer(mm, addr, ptep) do { } while (0)
508 * We only update the dirty/accessed state if we set
509 * the dirty bit by hand in the kernel, since the hardware
510 * will do the accessed bit for us, and we don't want to
511 * race with other CPU's that might be updating the dirty
512 * bit at the same time.
514 struct vm_area_struct
;
516 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
517 extern int ptep_set_access_flags(struct vm_area_struct
*vma
,
518 unsigned long address
, pte_t
*ptep
,
519 pte_t entry
, int dirty
);
521 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
522 extern int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
523 unsigned long addr
, pte_t
*ptep
);
525 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
526 extern int ptep_clear_flush_young(struct vm_area_struct
*vma
,
527 unsigned long address
, pte_t
*ptep
);
529 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
530 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
533 pte_t pte
= native_ptep_get_and_clear(ptep
);
534 pte_update(mm
, addr
, ptep
);
538 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
539 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
,
540 unsigned long addr
, pte_t
*ptep
,
546 * Full address destruction in progress; paravirt does not
547 * care about updates and native needs no locking
549 pte
= native_local_ptep_get_and_clear(ptep
);
551 pte
= ptep_get_and_clear(mm
, addr
, ptep
);
556 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
557 static inline void ptep_set_wrprotect(struct mm_struct
*mm
,
558 unsigned long addr
, pte_t
*ptep
)
560 clear_bit(_PAGE_BIT_RW
, (unsigned long *)&ptep
->pte
);
561 pte_update(mm
, addr
, ptep
);
565 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
567 * dst - pointer to pgd range anwhere on a pgd page
569 * count - the number of pgds to copy.
571 * dst and src can be on the same page, but the range must not overlap,
572 * and must not cross a page boundary.
574 static inline void clone_pgd_range(pgd_t
*dst
, pgd_t
*src
, int count
)
576 memcpy(dst
, src
, count
* sizeof(pgd_t
));
580 #include <asm-generic/pgtable.h>
581 #endif /* __ASSEMBLY__ */
583 #endif /* _ASM_X86_PGTABLE_H */