1 #ifndef __ASM_SH_PGALLOC_H
2 #define __ASM_SH_PGALLOC_H
4 #include <asm/processor.h>
5 #include <linux/threads.h>
7 #define pgd_quicklist (current_cpu_data.pgd_quick)
8 #define pmd_quicklist ((unsigned long *)0)
9 #define pte_quicklist (current_cpu_data.pte_quick)
10 #define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
12 #include <asm/pgalloc-2level.h>
15 * Allocate and free page tables. The xxx_kernel() versions are
16 * used to allocate a kernel page table - this turns on ASN bits
20 extern __inline__ pgd_t
*get_pgd_slow(void)
22 pgd_t
*ret
= (pgd_t
*)__get_free_page(GFP_KERNEL
);
25 memset(ret
, 0, USER_PTRS_PER_PGD
* sizeof(pgd_t
));
26 memcpy(ret
+ USER_PTRS_PER_PGD
, swapper_pg_dir
+ USER_PTRS_PER_PGD
, (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
31 extern __inline__ pgd_t
*get_pgd_fast(void)
35 if ((ret
= pgd_quicklist
) != NULL
) {
36 pgd_quicklist
= (unsigned long *)(*ret
);
40 ret
= (unsigned long *)get_pgd_slow();
44 extern __inline__
void free_pgd_fast(pgd_t
*pgd
)
46 *(unsigned long *)pgd
= (unsigned long) pgd_quicklist
;
47 pgd_quicklist
= (unsigned long *) pgd
;
51 extern __inline__
void free_pgd_slow(pgd_t
*pgd
)
53 free_page((unsigned long)pgd
);
56 extern pte_t
*get_pte_slow(pmd_t
*pmd
, unsigned long address_preadjusted
);
57 extern pte_t
*get_pte_kernel_slow(pmd_t
*pmd
, unsigned long address_preadjusted
);
59 extern __inline__ pte_t
*get_pte_fast(void)
63 if((ret
= (unsigned long *)pte_quicklist
) != NULL
) {
64 pte_quicklist
= (unsigned long *)(*ret
);
71 extern __inline__
void free_pte_fast(pte_t
*pte
)
73 *(unsigned long *)pte
= (unsigned long) pte_quicklist
;
74 pte_quicklist
= (unsigned long *) pte
;
78 extern __inline__
void free_pte_slow(pte_t
*pte
)
80 free_page((unsigned long)pte
);
83 #define pte_free_kernel(pte) free_pte_slow(pte)
84 #define pte_free(pte) free_pte_slow(pte)
85 #define pgd_free(pgd) free_pgd_slow(pgd)
86 #define pgd_alloc() get_pgd_fast()
88 extern inline pte_t
* pte_alloc_kernel(pmd_t
* pmd
, unsigned long address
)
92 address
= (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
94 pte_t
* page
= (pte_t
*) get_pte_fast();
97 return get_pte_kernel_slow(pmd
, address
);
98 set_pmd(pmd
, __pmd(_KERNPG_TABLE
+ __pa(page
)));
99 return page
+ address
;
102 __handle_bad_pmd_kernel(pmd
);
105 return (pte_t
*) pmd_page(*pmd
) + address
;
108 extern inline pte_t
* pte_alloc(pmd_t
* pmd
, unsigned long address
)
110 address
= (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
116 return (pte_t
*)pmd_page(*pmd
) + address
;
119 unsigned long page
= (unsigned long) get_pte_fast();
122 return get_pte_slow(pmd
, address
);
123 set_pmd(pmd
, __pmd(_PAGE_TABLE
+ __pa(page
)));
124 return (pte_t
*)page
+ address
;
127 __handle_bad_pmd(pmd
);
132 * allocating and freeing a pmd is trivial: the 1-entry pmd is
133 * inside the pgd, so has no extra memory associated with it.
135 extern inline void pmd_free(pmd_t
* pmd
)
139 #define pmd_free_kernel pmd_free
140 #define pmd_alloc_kernel pmd_alloc
142 extern int do_check_pgt_cache(int, int);
144 extern inline void set_pgdir(unsigned long address
, pgd_t entry
)
146 struct task_struct
* p
;
149 read_lock(&tasklist_lock
);
153 *pgd_offset(p
->mm
,address
) = entry
;
155 read_unlock(&tasklist_lock
);
156 for (pgd
= (pgd_t
*)pgd_quicklist
; pgd
; pgd
= (pgd_t
*)*(unsigned long *)pgd
)
157 pgd
[address
>> PGDIR_SHIFT
] = entry
;
163 * - flush_tlb() flushes the current mm struct TLBs
164 * - flush_tlb_all() flushes all processes TLBs
165 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
166 * - flush_tlb_page(vma, vmaddr) flushes one page
167 * - flush_tlb_range(mm, start, end) flushes a range of pages
171 extern void flush_tlb(void);
172 extern void flush_tlb_all(void);
173 extern void flush_tlb_mm(struct mm_struct
*mm
);
174 extern void flush_tlb_range(struct mm_struct
*mm
, unsigned long start
,
176 extern void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
);
177 extern inline void flush_tlb_pgtables(struct mm_struct
*mm
,
178 unsigned long start
, unsigned long end
)
182 #endif /* __ASM_SH_PGALLOC_H */