x86: compat_binfmt_elf
[linux-2.6/libata-dev.git] / include / asm-alpha / tlbflush.h
blobeefab3fb51ae87f76050c34118fabe6d31c87078
1 #ifndef _ALPHA_TLBFLUSH_H
2 #define _ALPHA_TLBFLUSH_H
4 #include <linux/mm.h>
5 #include <asm/compiler.h>
7 #ifndef __EXTERN_INLINE
8 #define __EXTERN_INLINE extern inline
9 #define __MMU_EXTERN_INLINE
10 #endif
12 extern void __load_new_mm_context(struct mm_struct *);
15 /* Use a few helper functions to hide the ugly broken ASN
16 numbers on early Alphas (ev4 and ev45). */
18 __EXTERN_INLINE void
19 ev4_flush_tlb_current(struct mm_struct *mm)
21 __load_new_mm_context(mm);
22 tbiap();
25 __EXTERN_INLINE void
26 ev5_flush_tlb_current(struct mm_struct *mm)
28 __load_new_mm_context(mm);
31 /* Flush just one page in the current TLB set. We need to be very
32 careful about the icache here, there is no way to invalidate a
33 specific icache page. */
35 __EXTERN_INLINE void
36 ev4_flush_tlb_current_page(struct mm_struct * mm,
37 struct vm_area_struct *vma,
38 unsigned long addr)
40 int tbi_flag = 2;
41 if (vma->vm_flags & VM_EXEC) {
42 __load_new_mm_context(mm);
43 tbi_flag = 3;
45 tbi(tbi_flag, addr);
48 __EXTERN_INLINE void
49 ev5_flush_tlb_current_page(struct mm_struct * mm,
50 struct vm_area_struct *vma,
51 unsigned long addr)
53 if (vma->vm_flags & VM_EXEC)
54 __load_new_mm_context(mm);
55 else
56 tbi(2, addr);
60 #ifdef CONFIG_ALPHA_GENERIC
61 # define flush_tlb_current alpha_mv.mv_flush_tlb_current
62 # define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
63 #else
64 # ifdef CONFIG_ALPHA_EV4
65 # define flush_tlb_current ev4_flush_tlb_current
66 # define flush_tlb_current_page ev4_flush_tlb_current_page
67 # else
68 # define flush_tlb_current ev5_flush_tlb_current
69 # define flush_tlb_current_page ev5_flush_tlb_current_page
70 # endif
71 #endif
73 #ifdef __MMU_EXTERN_INLINE
74 #undef __EXTERN_INLINE
75 #undef __MMU_EXTERN_INLINE
76 #endif
78 /* Flush current user mapping. */
79 static inline void
80 flush_tlb(void)
82 flush_tlb_current(current->active_mm);
85 /* Flush someone else's user mapping. */
86 static inline void
87 flush_tlb_other(struct mm_struct *mm)
89 unsigned long *mmc = &mm->context[smp_processor_id()];
90 /* Check it's not zero first to avoid cacheline ping pong
91 when possible. */
92 if (*mmc) *mmc = 0;
95 #ifndef CONFIG_SMP
96 /* Flush everything (kernel mapping may also have changed
97 due to vmalloc/vfree). */
98 static inline void flush_tlb_all(void)
100 tbia();
103 /* Flush a specified user mapping. */
104 static inline void
105 flush_tlb_mm(struct mm_struct *mm)
107 if (mm == current->active_mm)
108 flush_tlb_current(mm);
109 else
110 flush_tlb_other(mm);
113 /* Page-granular tlb flush. */
114 static inline void
115 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
117 struct mm_struct *mm = vma->vm_mm;
119 if (mm == current->active_mm)
120 flush_tlb_current_page(mm, vma, addr);
121 else
122 flush_tlb_other(mm);
125 /* Flush a specified range of user mapping. On the Alpha we flush
126 the whole user tlb. */
127 static inline void
128 flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
129 unsigned long end)
131 flush_tlb_mm(vma->vm_mm);
134 #else /* CONFIG_SMP */
136 extern void flush_tlb_all(void);
137 extern void flush_tlb_mm(struct mm_struct *);
138 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
139 extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
140 unsigned long);
142 #endif /* CONFIG_SMP */
144 #define flush_tlb_kernel_range(start, end) flush_tlb_all()
146 #endif /* _ALPHA_TLBFLUSH_H */