1 #ifndef _ASM_IA64_MMU_CONTEXT_H
2 #define _ASM_IA64_MMU_CONTEXT_H
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
10 * Routines to manage the allocation of task context numbers. Task context numbers are
11 * used to reduce or eliminate the need to perform TLB flushes due to context switches.
12 * Context numbers are implemented using ia-64 region ids. Since the IA-64 TLB does not
13 * consider the region number when performing a TLB lookup, we need to assign a unique
14 * region id to each region in a process. We use the least significant three bits in a
15 * region id for this purpose.
18 #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
20 #define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
24 #include <linux/compiler.h>
25 #include <linux/percpu.h>
26 #include <linux/sched.h>
27 #include <linux/spinlock.h>
29 #include <asm/processor.h>
31 #define MMU_CONTEXT_DEBUG 0
35 #include <ia64intrin.h>
37 extern struct mmu_trace_entry
{
44 extern volatile int mmu_tbuf_index
;
46 # define MMU_TRACE(_op,_cpu,_mm,_ctx) \
48 int i = __sync_fetch_and_add(&mmu_tbuf_index, 1) % ARRAY_SIZE(mmu_tbuf); \
49 struct mmu_trace_entry e; \
58 # define MMU_TRACE(op,cpu,mm,ctx) do { ; } while (0)
63 unsigned int next
; /* next context number to use */
64 unsigned int limit
; /* next >= limit => must call wrap_mmu_context() */
65 unsigned int max_ctx
; /* max. context value supported by all CPUs */
68 extern struct ia64_ctx ia64_ctx
;
69 DECLARE_PER_CPU(u8
, ia64_need_tlb_flush
);
71 extern void wrap_mmu_context (struct mm_struct
*mm
);
74 enter_lazy_tlb (struct mm_struct
*mm
, struct task_struct
*tsk
)
79 * When the context counter wraps around all TLBs need to be flushed because an old
80 * context number might have been reused. This is signalled by the ia64_need_tlb_flush
81 * per-CPU variable, which is checked in the routine below. Called by activate_mm().
85 delayed_tlb_flush (void)
87 extern void local_flush_tlb_all (void);
89 if (unlikely(__get_cpu_var(ia64_need_tlb_flush
))) {
90 local_flush_tlb_all();
91 __get_cpu_var(ia64_need_tlb_flush
) = 0;
95 static inline mm_context_t
96 get_mmu_context (struct mm_struct
*mm
)
98 mm_context_t context
= mm
->context
;
103 spin_lock(&ia64_ctx
.lock
);
105 /* re-check, now that we've got the lock: */
106 context
= mm
->context
;
108 if (ia64_ctx
.next
>= ia64_ctx
.limit
)
109 wrap_mmu_context(mm
);
110 mm
->context
= context
= ia64_ctx
.next
++;
113 spin_unlock(&ia64_ctx
.lock
);
118 * Initialize context number to some sane value. MM is guaranteed to be a brand-new
119 * address-space, so no TLB flushing is needed, ever.
122 init_new_context (struct task_struct
*p
, struct mm_struct
*mm
)
124 MMU_TRACE('N', smp_processor_id(), mm
, 0);
130 destroy_context (struct mm_struct
*mm
)
133 MMU_TRACE('D', smp_processor_id(), mm
, mm
->context
);
137 reload_context (mm_context_t context
)
140 unsigned long rid_incr
= 0;
141 unsigned long rr0
, rr1
, rr2
, rr3
, rr4
;
143 rid
= context
<< 3; /* make space for encoding the region number */
146 /* encode the region id, preferred page size, and VHPT enable bit: */
147 rr0
= (rid
<< 8) | (PAGE_SHIFT
<< 2) | 1;
148 rr1
= rr0
+ 1*rid_incr
;
149 rr2
= rr0
+ 2*rid_incr
;
150 rr3
= rr0
+ 3*rid_incr
;
151 rr4
= rr0
+ 4*rid_incr
;
152 #ifdef CONFIG_HUGETLB_PAGE
153 rr4
= (rr4
& (~(0xfcUL
))) | (HPAGE_SHIFT
<< 2);
156 ia64_set_rr(0x0000000000000000, rr0
);
157 ia64_set_rr(0x2000000000000000, rr1
);
158 ia64_set_rr(0x4000000000000000, rr2
);
159 ia64_set_rr(0x6000000000000000, rr3
);
160 ia64_set_rr(0x8000000000000000, rr4
);
161 ia64_insn_group_barrier();
162 ia64_srlz_i(); /* srlz.i implies srlz.d */
163 ia64_insn_group_barrier();
167 activate_context (struct mm_struct
*mm
)
169 mm_context_t context
;
172 context
= get_mmu_context(mm
);
173 MMU_TRACE('A', smp_processor_id(), mm
, context
);
174 reload_context(context
);
175 MMU_TRACE('a', smp_processor_id(), mm
, context
);
176 /* in the unlikely event of a TLB-flush by another thread, redo the load: */
177 } while (unlikely(context
!= mm
->context
));
180 #define deactivate_mm(tsk,mm) \
182 MMU_TRACE('d', smp_processor_id(), mm, mm->context); \
186 * Switch from address space PREV to address space NEXT.
189 activate_mm (struct mm_struct
*prev
, struct mm_struct
*next
)
194 * We may get interrupts here, but that's OK because interrupt handlers cannot
197 ia64_set_kr(IA64_KR_PT_BASE
, __pa(next
->pgd
));
198 activate_context(next
);
201 #define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
203 # endif /* ! __ASSEMBLY__ */
204 #endif /* _ASM_IA64_MMU_CONTEXT_H */