1 #ifndef _ASM_IA64_MMU_CONTEXT_H
2 #define _ASM_IA64_MMU_CONTEXT_H
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
10 * Routines to manage the allocation of task context numbers. Task context
11 * numbers are used to reduce or eliminate the need to perform TLB flushes
12 * due to context switches. Context numbers are implemented using ia-64
13 * region ids. Since the IA-64 TLB does not consider the region number when
14 * performing a TLB lookup, we need to assign a unique region id to each
15 * region in a process. We use the least significant three bits in aregion
16 * id for this purpose.
19 #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
21 #define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
23 # include <asm/page.h>
26 #include <linux/compiler.h>
27 #include <linux/percpu.h>
28 #include <linux/sched.h>
29 #include <linux/spinlock.h>
31 #include <asm/processor.h>
32 #include <asm-generic/mm_hooks.h>
36 unsigned int next
; /* next context number to use */
37 unsigned int limit
; /* available free range */
38 unsigned int max_ctx
; /* max. context value supported by all CPUs */
39 /* call wrap_mmu_context when next >= max */
40 unsigned long *bitmap
; /* bitmap size is max_ctx+1 */
41 unsigned long *flushmap
;/* pending rid to be flushed */
44 extern struct ia64_ctx ia64_ctx
;
45 DECLARE_PER_CPU(u8
, ia64_need_tlb_flush
);
47 extern void mmu_context_init (void);
48 extern void wrap_mmu_context (struct mm_struct
*mm
);
51 enter_lazy_tlb (struct mm_struct
*mm
, struct task_struct
*tsk
)
56 * When the context counter wraps around all TLBs need to be flushed because
57 * an old context number might have been reused. This is signalled by the
58 * ia64_need_tlb_flush per-CPU variable, which is checked in the routine
59 * below. Called by activate_mm(). <efocht@ess.nec.de>
62 delayed_tlb_flush (void)
64 extern void local_flush_tlb_all (void);
67 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush
))) {
68 spin_lock_irqsave(&ia64_ctx
.lock
, flags
);
69 if (__ia64_per_cpu_var(ia64_need_tlb_flush
)) {
70 local_flush_tlb_all();
71 __ia64_per_cpu_var(ia64_need_tlb_flush
) = 0;
73 spin_unlock_irqrestore(&ia64_ctx
.lock
, flags
);
77 static inline nv_mm_context_t
78 get_mmu_context (struct mm_struct
*mm
)
81 nv_mm_context_t context
= mm
->context
;
86 spin_lock_irqsave(&ia64_ctx
.lock
, flags
);
87 /* re-check, now that we've got the lock: */
88 context
= mm
->context
;
90 cpus_clear(mm
->cpu_vm_mask
);
91 if (ia64_ctx
.next
>= ia64_ctx
.limit
) {
92 ia64_ctx
.next
= find_next_zero_bit(ia64_ctx
.bitmap
,
93 ia64_ctx
.max_ctx
, ia64_ctx
.next
);
94 ia64_ctx
.limit
= find_next_bit(ia64_ctx
.bitmap
,
95 ia64_ctx
.max_ctx
, ia64_ctx
.next
);
96 if (ia64_ctx
.next
>= ia64_ctx
.max_ctx
)
99 mm
->context
= context
= ia64_ctx
.next
++;
100 __set_bit(context
, ia64_ctx
.bitmap
);
102 spin_unlock_irqrestore(&ia64_ctx
.lock
, flags
);
105 * Ensure we're not starting to use "context" before any old
106 * uses of it are gone from our TLB.
114 * Initialize context number to some sane value. MM is guaranteed to be a
115 * brand-new address-space, so no TLB flushing is needed, ever.
118 init_new_context (struct task_struct
*p
, struct mm_struct
*mm
)
125 destroy_context (struct mm_struct
*mm
)
131 reload_context (nv_mm_context_t context
)
134 unsigned long rid_incr
= 0;
135 unsigned long rr0
, rr1
, rr2
, rr3
, rr4
, old_rr4
;
137 old_rr4
= ia64_get_rr(RGN_BASE(RGN_HPAGE
));
138 rid
= context
<< 3; /* make space for encoding the region number */
141 /* encode the region id, preferred page size, and VHPT enable bit: */
142 rr0
= (rid
<< 8) | (PAGE_SHIFT
<< 2) | 1;
143 rr1
= rr0
+ 1*rid_incr
;
144 rr2
= rr0
+ 2*rid_incr
;
145 rr3
= rr0
+ 3*rid_incr
;
146 rr4
= rr0
+ 4*rid_incr
;
147 #ifdef CONFIG_HUGETLB_PAGE
148 rr4
= (rr4
& (~(0xfcUL
))) | (old_rr4
& 0xfc);
151 # error "reload_context assumes RGN_HPAGE is 4"
155 ia64_set_rr(0x0000000000000000UL
, rr0
);
156 ia64_set_rr(0x2000000000000000UL
, rr1
);
157 ia64_set_rr(0x4000000000000000UL
, rr2
);
158 ia64_set_rr(0x6000000000000000UL
, rr3
);
159 ia64_set_rr(0x8000000000000000UL
, rr4
);
160 ia64_srlz_i(); /* srlz.i implies srlz.d */
164 * Must be called with preemption off
167 activate_context (struct mm_struct
*mm
)
169 nv_mm_context_t context
;
172 context
= get_mmu_context(mm
);
173 if (!cpu_isset(smp_processor_id(), mm
->cpu_vm_mask
))
174 cpu_set(smp_processor_id(), mm
->cpu_vm_mask
);
175 reload_context(context
);
177 * in the unlikely event of a TLB-flush by another thread,
180 } while (unlikely(context
!= mm
->context
));
183 #define deactivate_mm(tsk,mm) do { } while (0)
186 * Switch from address space PREV to address space NEXT.
189 activate_mm (struct mm_struct
*prev
, struct mm_struct
*next
)
192 * We may get interrupts here, but that's OK because interrupt
193 * handlers cannot touch user-space.
195 ia64_set_kr(IA64_KR_PT_BASE
, __pa(next
->pgd
));
196 activate_context(next
);
199 #define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
201 # endif /* ! __ASSEMBLY__ */
202 #endif /* _ASM_IA64_MMU_CONTEXT_H */