- Linus: more PageDirty / swapcache handling
[davej-history.git] / arch / mips64 / kernel / smp.c
blob655d2add26b03dea76209f6f4c5b785a075aea6d
1 #include <linux/config.h>
2 #include <linux/init.h>
3 #include <linux/spinlock.h>
4 #include <linux/threads.h>
5 #include <linux/time.h>
6 #include <linux/timex.h>
7 #include <linux/sched.h>
9 #include <asm/atomic.h>
10 #include <asm/processor.h>
11 #include <asm/system.h>
12 #include <asm/hardirq.h>
13 #include <asm/softirq.h>
14 #include <asm/mmu_context.h>
16 #ifdef CONFIG_SGI_IP27
18 #include <asm/sn/arch.h>
19 #include <asm/sn/intr.h>
20 #include <asm/sn/addrs.h>
21 #include <asm/sn/agent.h>
22 #include <asm/sn/sn0/ip27.h>
24 #define DORESCHED 0xab
25 #define DOCALL 0xbc
27 #define IRQ_TO_SWLEVEL(i) i + 7 /* Delete this from here */
29 static void sendintr(int destid, unsigned char status)
31 int irq;
33 #if (CPUS_PER_NODE == 2)
34 switch (status) {
35 case DORESCHED: irq = CPU_RESCHED_A_IRQ; break;
36 case DOCALL: irq = CPU_CALL_A_IRQ; break;
37 default: panic("sendintr");
39 irq += cputoslice(destid);
42 * Convert the compact hub number to the NASID to get the correct
43 * part of the address space. Then set the interrupt bit associated
44 * with the CPU we want to send the interrupt to.
46 REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cputocnode(destid)),
47 IRQ_TO_SWLEVEL(irq));
48 #else
49 << Bomb! Must redefine this for more than 2 CPUS. >>
50 #endif
53 #endif /* CONFIG_SGI_IP27 */
55 /* The 'big kernel lock' */
56 spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
57 int smp_threads_ready; /* Not used */
58 atomic_t smp_commenced = ATOMIC_INIT(0);
59 struct cpuinfo_mips cpu_data[NR_CPUS];
60 int smp_num_cpus; /* Number that came online. */
61 int __cpu_number_map[NR_CPUS];
62 int __cpu_logical_map[NR_CPUS];
63 cycles_t cacheflush_time;
65 static void smp_tune_scheduling (void)
69 void __init smp_boot_cpus(void)
71 extern void allowboot(void);
73 init_new_context(current, &init_mm);
74 current->processor = 0;
75 init_idle();
76 smp_tune_scheduling();
77 allowboot();
80 void __init smp_commence(void)
82 wmb();
83 atomic_set(&smp_commenced,1);
86 static void stop_this_cpu(void *dummy)
89 * Remove this CPU
91 for (;;);
94 void smp_send_stop(void)
96 smp_call_function(stop_this_cpu, NULL, 1, 0);
97 smp_num_cpus = 1;
101 * this function sends a 'reschedule' IPI to another CPU.
102 * it goes straight through and wastes no time serializing
103 * anything. Worst case is that we lose a reschedule ...
105 void smp_send_reschedule(int cpu)
107 sendintr(cpu, DORESCHED);
110 /* Not really SMP stuff ... */
111 int setup_profiling_timer(unsigned int multiplier)
113 return 0;
117 * Run a function on all other CPUs.
118 * <func> The function to run. This must be fast and non-blocking.
119 * <info> An arbitrary pointer to pass to the function.
120 * <retry> If true, keep retrying until ready.
121 * <wait> If true, wait until function has completed on other CPUs.
122 * [RETURNS] 0 on success, else a negative status code.
124 * Does not return until remote CPUs are nearly ready to execute <func>
125 * or are or have executed.
127 static volatile struct call_data_struct {
128 void (*func) (void *info);
129 void *info;
130 atomic_t started;
131 atomic_t finished;
132 int wait;
133 } *call_data;
135 int smp_call_function (void (*func) (void *info), void *info, int retry,
136 int wait)
138 struct call_data_struct data;
139 int i, cpus = smp_num_cpus-1;
140 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
142 if (cpus == 0)
143 return 0;
145 data.func = func;
146 data.info = info;
147 atomic_set(&data.started, 0);
148 data.wait = wait;
149 if (wait)
150 atomic_set(&data.finished, 0);
152 spin_lock_bh(&lock);
153 call_data = &data;
154 /* Send a message to all other CPUs and wait for them to respond */
155 for (i = 0; i < smp_num_cpus; i++)
156 if (smp_processor_id() != i)
157 sendintr(i, DOCALL);
159 /* Wait for response */
160 /* FIXME: lock-up detection, backtrace on lock-up */
161 while (atomic_read(&data.started) != cpus)
162 barrier();
164 if (wait)
165 while (atomic_read(&data.finished) != cpus)
166 barrier();
167 spin_unlock_bh(&lock);
168 return 0;
171 extern void smp_call_function_interrupt(int irq, void *d, struct pt_regs *r)
173 void (*func) (void *info) = call_data->func;
174 void *info = call_data->info;
175 int wait = call_data->wait;
178 * Notify initiating CPU that I've grabbed the data and am
179 * about to execute the function.
181 atomic_inc(&call_data->started);
184 * At this point the info structure may be out of scope unless wait==1.
186 (*func)(info);
187 if (wait)
188 atomic_inc(&call_data->finished);
192 static void flush_tlb_all_ipi(void *info)
194 _flush_tlb_all();
197 void flush_tlb_all(void)
199 smp_call_function(flush_tlb_all_ipi, 0, 1, 1);
200 _flush_tlb_all();
203 static void flush_tlb_mm_ipi(void *mm)
205 _flush_tlb_mm((struct mm_struct *)mm);
209 * The following tlb flush calls are invoked when old translations are
210 * being torn down, or pte attributes are changing. For single threaded
211 * address spaces, a new context is obtained on the current cpu, and tlb
212 * context on other cpus are invalidated to force a new context allocation
213 * at switch_mm time, should the mm ever be used on other cpus. For
214 * multithreaded address spaces, intercpu interrupts have to be sent.
215 * Another case where intercpu interrupts are required is when the target
216 * mm might be active on another cpu (eg debuggers doing the flushes on
217 * behalf of debugees, kswapd stealing pages from another process etc).
218 * Kanoj 07/00.
221 void flush_tlb_mm(struct mm_struct *mm)
223 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
224 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
225 } else {
226 int i;
227 for (i = 0; i < smp_num_cpus; i++)
228 if (smp_processor_id() != i)
229 CPU_CONTEXT(i, mm) = 0;
231 _flush_tlb_mm(mm);
234 struct flush_tlb_data {
235 struct mm_struct *mm;
236 struct vm_area_struct *vma;
237 unsigned long addr1;
238 unsigned long addr2;
241 static void flush_tlb_range_ipi(void *info)
243 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
245 _flush_tlb_range(fd->mm, fd->addr1, fd->addr2);
248 void flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
250 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
251 struct flush_tlb_data fd;
253 fd.mm = mm;
254 fd.addr1 = start;
255 fd.addr2 = end;
256 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
257 } else {
258 int i;
259 for (i = 0; i < smp_num_cpus; i++)
260 if (smp_processor_id() != i)
261 CPU_CONTEXT(i, mm) = 0;
263 _flush_tlb_range(mm, start, end);
266 static void flush_tlb_page_ipi(void *info)
268 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
270 _flush_tlb_page(fd->vma, fd->addr1);
273 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
275 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
276 struct flush_tlb_data fd;
278 fd.vma = vma;
279 fd.addr1 = page;
280 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
281 } else {
282 int i;
283 for (i = 0; i < smp_num_cpus; i++)
284 if (smp_processor_id() != i)
285 CPU_CONTEXT(i, vma->vm_mm) = 0;
287 _flush_tlb_page(vma, page);