1 #ifndef _S390_TLBFLUSH_H
2 #define _S390_TLBFLUSH_H
5 #include <asm/processor.h>
6 #include <asm/pgalloc.h>
11 * - flush_tlb() flushes the current mm struct TLBs
12 * - flush_tlb_all() flushes all processes TLBs
13 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
14 * - flush_tlb_page(vma, vmaddr) flushes one page
15 * - flush_tlb_range(vma, start, end) flushes a range of pages
16 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
17 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
21 * S/390 has three ways of flushing TLBs
22 * 'ptlb' does a flush of the local processor
23 * 'csp' flushes the TLBs on all PUs of a SMP
24 * 'ipte' invalidates a pte in a page table and flushes that out of
25 * the TLBs of all PUs of a SMP
28 #define local_flush_tlb() \
29 do { asm volatile("ptlb": : :"memory"); } while (0)
34 * We always need to flush, since s390 does not flush tlb
35 * on each context switch
38 static inline void flush_tlb(void)
42 static inline void flush_tlb_all(void)
46 static inline void flush_tlb_mm(struct mm_struct
*mm
)
50 static inline void flush_tlb_page(struct vm_area_struct
*vma
,
55 static inline void flush_tlb_range(struct vm_area_struct
*vma
,
56 unsigned long start
, unsigned long end
)
61 #define flush_tlb_kernel_range(start, end) \
68 extern void smp_ptlb_all(void);
70 static inline void global_flush_tlb(void)
72 register unsigned long reg2
asm("2");
73 register unsigned long reg3
asm("3");
74 register unsigned long reg4
asm("4");
78 if (!MACHINE_HAS_CSP
) {
82 #endif /* __s390x__ */
86 reg4
= ((unsigned long) &dummy
) + 1;
89 : : "d" (reg2
), "d" (reg3
), "d" (reg4
), "m" (dummy
) : "cc" );
93 * We only have to do global flush of tlb if process run since last
94 * flush on any other pu than current.
95 * If we have threads (mm->count > 1) we always do a global flush,
96 * since the process runs on more than one processor at the same time.
99 static inline void __flush_tlb_mm(struct mm_struct
* mm
)
101 cpumask_t local_cpumask
;
103 if (unlikely(cpus_empty(mm
->cpu_vm_mask
)))
105 if (MACHINE_HAS_IDTE
) {
106 pgd_t
*shadow_pgd
= get_shadow_pgd(mm
->pgd
);
110 " .insn rrf,0xb98e0000,0,%0,%1,0"
112 "a" (__pa(shadow_pgd
) & PAGE_MASK
) : "cc" );
115 " .insn rrf,0xb98e0000,0,%0,%1,0"
116 : : "a" (2048), "a" (__pa(mm
->pgd
)&PAGE_MASK
) : "cc");
120 local_cpumask
= cpumask_of_cpu(smp_processor_id());
121 if (cpus_equal(mm
->cpu_vm_mask
, local_cpumask
))
128 static inline void flush_tlb(void)
130 __flush_tlb_mm(current
->mm
);
132 static inline void flush_tlb_all(void)
136 static inline void flush_tlb_mm(struct mm_struct
*mm
)
140 static inline void flush_tlb_page(struct vm_area_struct
*vma
,
143 __flush_tlb_mm(vma
->vm_mm
);
145 static inline void flush_tlb_range(struct vm_area_struct
*vma
,
146 unsigned long start
, unsigned long end
)
148 __flush_tlb_mm(vma
->vm_mm
);
151 #define flush_tlb_kernel_range(start, end) global_flush_tlb()
155 static inline void flush_tlb_pgtables(struct mm_struct
*mm
,
156 unsigned long start
, unsigned long end
)
158 /* S/390 does not keep any page table caches in TLB */
161 #endif /* _S390_TLBFLUSH_H */