1 #ifndef _ALPHA_TLBFLUSH_H
2 #define _ALPHA_TLBFLUSH_H
5 #include <asm/compiler.h>
7 #ifndef __EXTERN_INLINE
8 #define __EXTERN_INLINE extern inline
9 #define __MMU_EXTERN_INLINE
12 extern void __load_new_mm_context(struct mm_struct
*);
15 /* Use a few helper functions to hide the ugly broken ASN
16 numbers on early Alphas (ev4 and ev45). */
19 ev4_flush_tlb_current(struct mm_struct
*mm
)
21 __load_new_mm_context(mm
);
26 ev5_flush_tlb_current(struct mm_struct
*mm
)
28 __load_new_mm_context(mm
);
31 /* Flush just one page in the current TLB set. We need to be very
32 careful about the icache here, there is no way to invalidate a
33 specific icache page. */
36 ev4_flush_tlb_current_page(struct mm_struct
* mm
,
37 struct vm_area_struct
*vma
,
41 if (vma
->vm_flags
& VM_EXEC
) {
42 __load_new_mm_context(mm
);
49 ev5_flush_tlb_current_page(struct mm_struct
* mm
,
50 struct vm_area_struct
*vma
,
53 if (vma
->vm_flags
& VM_EXEC
)
54 __load_new_mm_context(mm
);
60 #ifdef CONFIG_ALPHA_GENERIC
61 # define flush_tlb_current alpha_mv.mv_flush_tlb_current
62 # define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
64 # ifdef CONFIG_ALPHA_EV4
65 # define flush_tlb_current ev4_flush_tlb_current
66 # define flush_tlb_current_page ev4_flush_tlb_current_page
68 # define flush_tlb_current ev5_flush_tlb_current
69 # define flush_tlb_current_page ev5_flush_tlb_current_page
73 #ifdef __MMU_EXTERN_INLINE
74 #undef __EXTERN_INLINE
75 #undef __MMU_EXTERN_INLINE
78 /* Flush current user mapping. */
82 flush_tlb_current(current
->active_mm
);
85 /* Flush someone else's user mapping. */
87 flush_tlb_other(struct mm_struct
*mm
)
89 unsigned long *mmc
= &mm
->context
[smp_processor_id()];
90 /* Check it's not zero first to avoid cacheline ping pong
95 /* Flush a specified range of user mapping page tables from TLB.
96 Although Alpha uses VPTE caches, this can be a nop, as Alpha does
97 not have finegrained tlb flushing, so it will flush VPTE stuff
98 during next flush_tlb_range. */
101 flush_tlb_pgtables(struct mm_struct
*mm
, unsigned long start
,
107 /* Flush everything (kernel mapping may also have changed
108 due to vmalloc/vfree). */
109 static inline void flush_tlb_all(void)
114 /* Flush a specified user mapping. */
116 flush_tlb_mm(struct mm_struct
*mm
)
118 if (mm
== current
->active_mm
)
119 flush_tlb_current(mm
);
124 /* Page-granular tlb flush. */
126 flush_tlb_page(struct vm_area_struct
*vma
, unsigned long addr
)
128 struct mm_struct
*mm
= vma
->vm_mm
;
130 if (mm
== current
->active_mm
)
131 flush_tlb_current_page(mm
, vma
, addr
);
136 /* Flush a specified range of user mapping. On the Alpha we flush
137 the whole user tlb. */
139 flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
142 flush_tlb_mm(vma
->vm_mm
);
145 #else /* CONFIG_SMP */
147 extern void flush_tlb_all(void);
148 extern void flush_tlb_mm(struct mm_struct
*);
149 extern void flush_tlb_page(struct vm_area_struct
*, unsigned long);
150 extern void flush_tlb_range(struct vm_area_struct
*, unsigned long,
153 #endif /* CONFIG_SMP */
155 #define flush_tlb_kernel_range(start, end) flush_tlb_all()
157 #endif /* _ALPHA_TLBFLUSH_H */