[PATCH] Kprobes: Track kprobe on a per_cpu basis - i386 changes
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-sparc64 / tlb.h
blob66138d959df5e25be7ceaed865f3873f8ae51a18
1 #ifndef _SPARC64_TLB_H
2 #define _SPARC64_TLB_H
4 #include <linux/config.h>
5 #include <linux/swap.h>
6 #include <asm/pgalloc.h>
7 #include <asm/tlbflush.h>
8 #include <asm/mmu_context.h>
10 #define TLB_BATCH_NR 192
13 * For UP we don't need to worry about TLB flush
14 * and page free order so much..
16 #ifdef CONFIG_SMP
17 #define FREE_PTE_NR 506
18 #define tlb_fast_mode(bp) ((bp)->pages_nr == ~0U)
19 #else
20 #define FREE_PTE_NR 1
21 #define tlb_fast_mode(bp) 1
22 #endif
24 struct mmu_gather {
25 struct mm_struct *mm;
26 unsigned int pages_nr;
27 unsigned int need_flush;
28 unsigned int fullmm;
29 unsigned int tlb_nr;
30 unsigned long vaddrs[TLB_BATCH_NR];
31 struct page *pages[FREE_PTE_NR];
34 DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
36 #ifdef CONFIG_SMP
37 extern void smp_flush_tlb_pending(struct mm_struct *,
38 unsigned long, unsigned long *);
39 #endif
41 extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
42 extern void flush_tlb_pending(void);
44 static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
46 struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
48 BUG_ON(mp->tlb_nr);
50 mp->mm = mm;
51 mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U;
52 mp->fullmm = full_mm_flush;
54 return mp;
58 static inline void tlb_flush_mmu(struct mmu_gather *mp)
60 if (mp->need_flush) {
61 mp->need_flush = 0;
62 if (!tlb_fast_mode(mp)) {
63 free_pages_and_swap_cache(mp->pages, mp->pages_nr);
64 mp->pages_nr = 0;
70 #ifdef CONFIG_SMP
71 extern void smp_flush_tlb_mm(struct mm_struct *mm);
72 #define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
73 #else
74 #define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
75 #endif
77 static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end)
79 tlb_flush_mmu(mp);
81 if (mp->fullmm) {
82 if (CTX_VALID(mp->mm->context))
83 do_flush_tlb_mm(mp->mm);
84 mp->fullmm = 0;
85 } else
86 flush_tlb_pending();
88 /* keep the page table cache within bounds */
89 check_pgt_cache();
91 put_cpu_var(mmu_gathers);
94 static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
96 mp->need_flush = 1;
97 if (tlb_fast_mode(mp)) {
98 free_page_and_swap_cache(page);
99 return;
101 mp->pages[mp->pages_nr++] = page;
102 if (mp->pages_nr >= FREE_PTE_NR)
103 tlb_flush_mmu(mp);
106 #define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
107 #define pte_free_tlb(mp,ptepage) pte_free(ptepage)
108 #define pmd_free_tlb(mp,pmdp) pmd_free(pmdp)
109 #define pud_free_tlb(tlb,pudp) __pud_free_tlb(tlb,pudp)
111 #define tlb_migrate_finish(mm) do { } while (0)
112 #define tlb_start_vma(tlb, vma) do { } while (0)
113 #define tlb_end_vma(tlb, vma) do { } while (0)
115 #endif /* _SPARC64_TLB_H */