- pre3:
[davej-history.git] / include / asm-ppc / mmu_context.h
blobfed474d14e92064076610e72a15c758069a8524a
1 #include <linux/config.h>
3 #ifdef __KERNEL__
4 #ifndef __PPC_MMU_CONTEXT_H
5 #define __PPC_MMU_CONTEXT_H
7 /* the way contexts are handled on the ppc they are vsid's and
8 don't need any special treatment right now.
9 perhaps I can defer flushing the tlb by keeping a list of
10 zombie vsid/context's and handling that through destroy_context
11 later -- Cort
13 The MPC8xx has only 16 contexts. We rotate through them on each
14 task switch. A better way would be to keep track of tasks that
15 own contexts, and implement an LRU usage. That way very active
16 tasks don't always have to pay the TLB reload overhead. The
17 kernel pages are mapped shared, so the kernel can run on behalf
18 of any task that makes a kernel entry. Shared does not mean they
19 are not protected, just that the ASID comparison is not performed.
20 -- Dan
23 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
26 #ifdef CONFIG_8xx
27 #define NO_CONTEXT 16
28 #define LAST_CONTEXT 15
29 #define MUNGE_CONTEXT(n) (n)
31 #else
33 /* PPC 6xx, 7xx CPUs */
34 #define NO_CONTEXT 0
35 #define LAST_CONTEXT 0xfffff
38 * Allocating context numbers this way tends to spread out
39 * the entries in the hash table better than a simple linear
40 * allocation.
42 #define MUNGE_CONTEXT(n) (((n) * 897) & LAST_CONTEXT)
43 #endif
45 extern atomic_t next_mmu_context;
46 extern void mmu_context_overflow(void);
49 * Set the current MMU context.
50 * On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by
51 * loading up the segment registers for the user part of the address space.
53 * On the 8xx parts, the context currently includes the page directory,
54 * and once I implement a real TLB context manager this will disappear.
55 * The PGD is ignored on other processors. - Dan
57 extern void set_context(int context, void *pgd);
59 #ifdef CONFIG_8xx
60 extern inline void mmu_context_overflow(void)
62 atomic_set(&next_mmu_context, -1);
64 #endif
67 * Get a new mmu context for task tsk if necessary.
69 #define get_mmu_context(mm) \
70 do { \
71 if (mm->context == NO_CONTEXT) { \
72 if (atomic_read(&next_mmu_context) == LAST_CONTEXT) \
73 mmu_context_overflow(); \
74 mm->context = MUNGE_CONTEXT(atomic_inc_return(&next_mmu_context));\
75 } \
76 } while (0)
79 * Set up the context for a new address space.
81 #define init_new_context(tsk,mm) (((mm)->context = NO_CONTEXT), 0)
84 * We're finished using the context for an address space.
86 #define destroy_context(mm) do { } while (0)
88 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
89 struct task_struct *tsk, int cpu)
91 tsk->thread.pgdir = next->pgd;
92 get_mmu_context(next);
93 set_context(next->context, next->pgd);
97 * After we have set current->mm to a new value, this activates
98 * the context for the new mm so we see the new mappings.
100 static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
102 current->thread.pgdir = mm->pgd;
103 get_mmu_context(mm);
104 set_context(mm->context, mm->pgd);
108 * compute the vsid from the context and segment
109 * segments > 7 are kernel segments and their
110 * vsid is the segment -- Cort
112 #define VSID_FROM_CONTEXT(segment,context) \
113 ((segment < 8) ? ((segment) | (context)<<4) : (segment))
115 #endif
116 #endif /* __KERNEL__ */