[NETFILTER]: sip conntrack: do case insensitive SIP header search
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-s390 / tlbflush.h
blobfa4dc916a9bfe5de40fd2bb5f1bc31dd8015dadb
1 #ifndef _S390_TLBFLUSH_H
2 #define _S390_TLBFLUSH_H
4 #include <linux/mm.h>
5 #include <asm/processor.h>
7 /*
8 * TLB flushing:
10 * - flush_tlb() flushes the current mm struct TLBs
11 * - flush_tlb_all() flushes all processes TLBs
12 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
13 * - flush_tlb_page(vma, vmaddr) flushes one page
14 * - flush_tlb_range(vma, start, end) flushes a range of pages
15 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
16 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
20 * S/390 has three ways of flushing TLBs
21 * 'ptlb' does a flush of the local processor
22 * 'csp' flushes the TLBs on all PUs of a SMP
23 * 'ipte' invalidates a pte in a page table and flushes that out of
24 * the TLBs of all PUs of a SMP
27 #define local_flush_tlb() \
28 do { asm volatile("ptlb": : :"memory"); } while (0)
30 #ifndef CONFIG_SMP
33 * We always need to flush, since s390 does not flush tlb
34 * on each context switch
37 static inline void flush_tlb(void)
39 local_flush_tlb();
41 static inline void flush_tlb_all(void)
43 local_flush_tlb();
45 static inline void flush_tlb_mm(struct mm_struct *mm)
47 local_flush_tlb();
49 static inline void flush_tlb_page(struct vm_area_struct *vma,
50 unsigned long addr)
52 local_flush_tlb();
54 static inline void flush_tlb_range(struct vm_area_struct *vma,
55 unsigned long start, unsigned long end)
57 local_flush_tlb();
60 #define flush_tlb_kernel_range(start, end) \
61 local_flush_tlb();
63 #else
65 #include <asm/smp.h>
67 extern void smp_ptlb_all(void);
69 static inline void global_flush_tlb(void)
71 register unsigned long reg2 asm("2");
72 register unsigned long reg3 asm("3");
73 register unsigned long reg4 asm("4");
74 long dummy;
76 #ifndef __s390x__
77 if (!MACHINE_HAS_CSP) {
78 smp_ptlb_all();
79 return;
81 #endif /* __s390x__ */
83 dummy = 0;
84 reg2 = reg3 = 0;
85 reg4 = ((unsigned long) &dummy) + 1;
86 asm volatile(
87 " csp %0,%2"
88 : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
92 * We only have to do global flush of tlb if process run since last
93 * flush on any other pu than current.
94 * If we have threads (mm->count > 1) we always do a global flush,
95 * since the process runs on more than one processor at the same time.
98 static inline void __flush_tlb_mm(struct mm_struct * mm)
100 cpumask_t local_cpumask;
102 if (unlikely(cpus_empty(mm->cpu_vm_mask)))
103 return;
104 if (MACHINE_HAS_IDTE) {
105 asm volatile(
106 " .insn rrf,0xb98e0000,0,%0,%1,0"
107 : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc");
108 return;
110 preempt_disable();
111 local_cpumask = cpumask_of_cpu(smp_processor_id());
112 if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
113 local_flush_tlb();
114 else
115 global_flush_tlb();
116 preempt_enable();
119 static inline void flush_tlb(void)
121 __flush_tlb_mm(current->mm);
123 static inline void flush_tlb_all(void)
125 global_flush_tlb();
127 static inline void flush_tlb_mm(struct mm_struct *mm)
129 __flush_tlb_mm(mm);
131 static inline void flush_tlb_page(struct vm_area_struct *vma,
132 unsigned long addr)
134 __flush_tlb_mm(vma->vm_mm);
136 static inline void flush_tlb_range(struct vm_area_struct *vma,
137 unsigned long start, unsigned long end)
139 __flush_tlb_mm(vma->vm_mm);
142 #define flush_tlb_kernel_range(start, end) global_flush_tlb()
144 #endif
146 static inline void flush_tlb_pgtables(struct mm_struct *mm,
147 unsigned long start, unsigned long end)
149 /* S/390 does not keep any page table caches in TLB */
152 #endif /* _S390_TLBFLUSH_H */