ARM: 6943/1: mm: use TTBR1 instead of reserved context ID
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / arm / mm / context.c
blob0d86298c7279b1367171a8b29e92b2c48fc61ed3
1 /*
2 * linux/arch/arm/mm/context.c
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/init.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/smp.h>
14 #include <linux/percpu.h>
16 #include <asm/mmu_context.h>
17 #include <asm/tlbflush.h>
19 static DEFINE_SPINLOCK(cpu_asid_lock);
20 unsigned int cpu_last_asid = ASID_FIRST_VERSION;
21 #ifdef CONFIG_SMP
22 DEFINE_PER_CPU(struct mm_struct *, current_mm);
23 #endif
26 * We fork()ed a process, and we need a new context for the child
27 * to run in.
29 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
31 mm->context.id = 0;
32 spin_lock_init(&mm->context.id_lock);
35 static void flush_context(void)
37 u32 ttb;
38 /* Copy TTBR1 into TTBR0 */
39 asm volatile("mrc p15, 0, %0, c2, c0, 1\n"
40 "mcr p15, 0, %0, c2, c0, 0"
41 : "=r" (ttb));
42 isb();
43 local_flush_tlb_all();
44 if (icache_is_vivt_asid_tagged()) {
45 __flush_icache_all();
46 dsb();
50 #ifdef CONFIG_SMP
52 static void set_mm_context(struct mm_struct *mm, unsigned int asid)
54 unsigned long flags;
57 * Locking needed for multi-threaded applications where the
58 * same mm->context.id could be set from different CPUs during
59 * the broadcast. This function is also called via IPI so the
60 * mm->context.id_lock has to be IRQ-safe.
62 spin_lock_irqsave(&mm->context.id_lock, flags);
63 if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
65 * Old version of ASID found. Set the new one and
66 * reset mm_cpumask(mm).
68 mm->context.id = asid;
69 cpumask_clear(mm_cpumask(mm));
71 spin_unlock_irqrestore(&mm->context.id_lock, flags);
74 * Set the mm_cpumask(mm) bit for the current CPU.
76 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
80 * Reset the ASID on the current CPU. This function call is broadcast
81 * from the CPU handling the ASID rollover and holding cpu_asid_lock.
83 static void reset_context(void *info)
85 unsigned int asid;
86 unsigned int cpu = smp_processor_id();
87 struct mm_struct *mm = per_cpu(current_mm, cpu);
90 * Check if a current_mm was set on this CPU as it might still
91 * be in the early booting stages and using the reserved ASID.
93 if (!mm)
94 return;
96 smp_rmb();
97 asid = cpu_last_asid + cpu + 1;
99 flush_context();
100 set_mm_context(mm, asid);
102 /* set the new ASID */
103 asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id));
104 isb();
107 #else
109 static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
111 mm->context.id = asid;
112 cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
115 #endif
117 void __new_context(struct mm_struct *mm)
119 unsigned int asid;
121 spin_lock(&cpu_asid_lock);
122 #ifdef CONFIG_SMP
124 * Check the ASID again, in case the change was broadcast from
125 * another CPU before we acquired the lock.
127 if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
128 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
129 spin_unlock(&cpu_asid_lock);
130 return;
132 #endif
134 * At this point, it is guaranteed that the current mm (with
135 * an old ASID) isn't active on any other CPU since the ASIDs
136 * are changed simultaneously via IPI.
138 asid = ++cpu_last_asid;
139 if (asid == 0)
140 asid = cpu_last_asid = ASID_FIRST_VERSION;
143 * If we've used up all our ASIDs, we need
144 * to start a new version and flush the TLB.
146 if (unlikely((asid & ~ASID_MASK) == 0)) {
147 asid = cpu_last_asid + smp_processor_id() + 1;
148 flush_context();
149 #ifdef CONFIG_SMP
150 smp_wmb();
151 smp_call_function(reset_context, NULL, 1);
152 #endif
153 cpu_last_asid += NR_CPUS;
156 set_mm_context(mm, asid);
157 spin_unlock(&cpu_asid_lock);