1 #ifndef _X86_64_PGALLOC_H
2 #define _X86_64_PGALLOC_H
5 #include <linux/threads.h>
8 #define pmd_populate_kernel(mm, pmd, pte) \
9 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
10 #define pud_populate(mm, pud, pmd) \
11 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)))
12 #define pgd_populate(mm, pgd, pud) \
13 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)))
15 static inline void pmd_populate(struct mm_struct
*mm
, pmd_t
*pmd
, struct page
*pte
)
17 set_pmd(pmd
, __pmd(_PAGE_TABLE
| (page_to_pfn(pte
) << PAGE_SHIFT
)));
20 static inline void pmd_free(pmd_t
*pmd
)
22 BUG_ON((unsigned long)pmd
& (PAGE_SIZE
-1));
23 free_page((unsigned long)pmd
);
26 static inline pmd_t
*pmd_alloc_one (struct mm_struct
*mm
, unsigned long addr
)
28 return (pmd_t
*)get_zeroed_page(GFP_KERNEL
|__GFP_REPEAT
);
31 static inline pud_t
*pud_alloc_one(struct mm_struct
*mm
, unsigned long addr
)
33 return (pud_t
*)get_zeroed_page(GFP_KERNEL
|__GFP_REPEAT
);
36 static inline void pud_free (pud_t
*pud
)
38 BUG_ON((unsigned long)pud
& (PAGE_SIZE
-1));
39 free_page((unsigned long)pud
);
42 static inline void pgd_list_add(pgd_t
*pgd
)
44 struct page
*page
= virt_to_page(pgd
);
47 list_add(&page
->lru
, &pgd_list
);
48 spin_unlock(&pgd_lock
);
51 static inline void pgd_list_del(pgd_t
*pgd
)
53 struct page
*page
= virt_to_page(pgd
);
57 spin_unlock(&pgd_lock
);
60 static inline pgd_t
*pgd_alloc(struct mm_struct
*mm
)
63 pgd_t
*pgd
= (pgd_t
*)__get_free_page(GFP_KERNEL
|__GFP_REPEAT
);
68 * Copy kernel pointers in from init.
69 * Could keep a freelist or slab cache of those because the kernel
72 boundary
= pgd_index(__PAGE_OFFSET
);
73 memset(pgd
, 0, boundary
* sizeof(pgd_t
));
74 memcpy(pgd
+ boundary
,
75 init_level4_pgt
+ boundary
,
76 (PTRS_PER_PGD
- boundary
) * sizeof(pgd_t
));
80 static inline void pgd_free(pgd_t
*pgd
)
82 BUG_ON((unsigned long)pgd
& (PAGE_SIZE
-1));
84 free_page((unsigned long)pgd
);
87 static inline pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
89 return (pte_t
*)get_zeroed_page(GFP_KERNEL
|__GFP_REPEAT
);
92 static inline struct page
*pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
94 void *p
= (void *)get_zeroed_page(GFP_KERNEL
|__GFP_REPEAT
);
97 return virt_to_page(p
);
100 /* Should really implement gc for free page table pages. This could be
101 done with a reference count in struct page. */
103 static inline void pte_free_kernel(pte_t
*pte
)
105 BUG_ON((unsigned long)pte
& (PAGE_SIZE
-1));
106 free_page((unsigned long)pte
);
109 static inline void pte_free(struct page
*pte
)
114 #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
116 #define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
117 #define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
119 #endif /* _X86_64_PGALLOC_H */