Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / include / asm-s390 / tlbflush.h
blob35fb4f9127b2ae41e35683eb0dccb245413cdcec
1 #ifndef _S390_TLBFLUSH_H
2 #define _S390_TLBFLUSH_H
4 #include <linux/mm.h>
5 #include <asm/processor.h>
6 #include <asm/pgalloc.h>
8 /*
9 * Flush all tlb entries on the local cpu.
11 static inline void __tlb_flush_local(void)
13 asm volatile("ptlb" : : : "memory");
17 * Flush all tlb entries on all cpus.
19 static inline void __tlb_flush_global(void)
21 extern void smp_ptlb_all(void);
22 register unsigned long reg2 asm("2");
23 register unsigned long reg3 asm("3");
24 register unsigned long reg4 asm("4");
25 long dummy;
27 #ifndef __s390x__
28 if (!MACHINE_HAS_CSP) {
29 smp_ptlb_all();
30 return;
32 #endif /* __s390x__ */
34 dummy = 0;
35 reg2 = reg3 = 0;
36 reg4 = ((unsigned long) &dummy) + 1;
37 asm volatile(
38 " csp %0,%2"
39 : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
43 * Flush all tlb entries of a page table on all cpus.
45 static inline void __tlb_flush_idte(unsigned long asce)
47 asm volatile(
48 " .insn rrf,0xb98e0000,0,%0,%1,0"
49 : : "a" (2048), "a" (asce) : "cc" );
52 static inline void __tlb_flush_mm(struct mm_struct * mm)
54 cpumask_t local_cpumask;
56 if (unlikely(cpus_empty(mm->cpu_vm_mask)))
57 return;
59 * If the machine has IDTE we prefer to do a per mm flush
60 * on all cpus instead of doing a local flush if the mm
61 * only ran on the local cpu.
63 if (MACHINE_HAS_IDTE) {
64 if (mm->context.noexec)
65 __tlb_flush_idte((unsigned long)
66 get_shadow_table(mm->pgd) |
67 mm->context.asce_bits);
68 __tlb_flush_idte((unsigned long) mm->pgd |
69 mm->context.asce_bits);
70 return;
72 preempt_disable();
74 * If the process only ran on the local cpu, do a local flush.
76 local_cpumask = cpumask_of_cpu(smp_processor_id());
77 if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
78 __tlb_flush_local();
79 else
80 __tlb_flush_global();
81 preempt_enable();
84 static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
86 if (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm)
87 __tlb_flush_mm(mm);
91 * TLB flushing:
92 * flush_tlb() - flushes the current mm struct TLBs
93 * flush_tlb_all() - flushes all processes TLBs
94 * flush_tlb_mm(mm) - flushes the specified mm context TLB's
95 * flush_tlb_page(vma, vmaddr) - flushes one page
96 * flush_tlb_range(vma, start, end) - flushes a range of pages
97 * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
101 * flush_tlb_mm goes together with ptep_set_wrprotect for the
102 * copy_page_range operation and flush_tlb_range is related to
103 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
104 * ptep_get_and_clear do not flush the TLBs directly if the mm has
105 * only one user. At the end of the update the flush_tlb_mm and
106 * flush_tlb_range functions need to do the flush.
108 #define flush_tlb() do { } while (0)
109 #define flush_tlb_all() do { } while (0)
110 #define flush_tlb_page(vma, addr) do { } while (0)
112 static inline void flush_tlb_mm(struct mm_struct *mm)
114 __tlb_flush_mm_cond(mm);
117 static inline void flush_tlb_range(struct vm_area_struct *vma,
118 unsigned long start, unsigned long end)
120 __tlb_flush_mm_cond(vma->vm_mm);
123 static inline void flush_tlb_kernel_range(unsigned long start,
124 unsigned long end)
126 __tlb_flush_mm(&init_mm);
129 #endif /* _S390_TLBFLUSH_H */