Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / sh / kernel / smp.c
blob509b36b451155ee5a7174c1d43fc0259d66e8ccd
1 /*
2 * arch/sh/kernel/smp.c
4 * SMP support for the SuperH processors.
6 * Copyright (C) 2002 - 2010 Paul Mundt
7 * Copyright (C) 2006 - 2007 Akio Idehara
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
13 #include <linux/err.h>
14 #include <linux/cache.h>
15 #include <linux/cpumask.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/cpu.h>
22 #include <linux/interrupt.h>
23 #include <asm/atomic.h>
24 #include <asm/processor.h>
25 #include <asm/system.h>
26 #include <asm/mmu_context.h>
27 #include <asm/smp.h>
28 #include <asm/cacheflush.h>
29 #include <asm/sections.h>
31 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
32 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
34 struct plat_smp_ops *mp_ops = NULL;
36 /* State of each CPU */
37 DEFINE_PER_CPU(int, cpu_state) = { 0 };
39 void __cpuinit register_smp_ops(struct plat_smp_ops *ops)
41 if (mp_ops)
42 printk(KERN_WARNING "Overriding previously set SMP ops\n");
44 mp_ops = ops;
47 static inline void __cpuinit smp_store_cpu_info(unsigned int cpu)
49 struct sh_cpuinfo *c = cpu_data + cpu;
51 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
53 c->loops_per_jiffy = loops_per_jiffy;
56 void __init smp_prepare_cpus(unsigned int max_cpus)
58 unsigned int cpu = smp_processor_id();
60 init_new_context(current, &init_mm);
61 current_thread_info()->cpu = cpu;
62 mp_ops->prepare_cpus(max_cpus);
64 #ifndef CONFIG_HOTPLUG_CPU
65 init_cpu_present(&cpu_possible_map);
66 #endif
69 void __init smp_prepare_boot_cpu(void)
71 unsigned int cpu = smp_processor_id();
73 __cpu_number_map[0] = cpu;
74 __cpu_logical_map[0] = cpu;
76 set_cpu_online(cpu, true);
77 set_cpu_possible(cpu, true);
79 per_cpu(cpu_state, cpu) = CPU_ONLINE;
82 #ifdef CONFIG_HOTPLUG_CPU
83 void native_cpu_die(unsigned int cpu)
85 unsigned int i;
87 for (i = 0; i < 10; i++) {
88 smp_rmb();
89 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
90 if (system_state == SYSTEM_RUNNING)
91 pr_info("CPU %u is now offline\n", cpu);
93 return;
96 msleep(100);
99 pr_err("CPU %u didn't die...\n", cpu);
102 int native_cpu_disable(unsigned int cpu)
104 return cpu == 0 ? -EPERM : 0;
107 void play_dead_common(void)
109 idle_task_exit();
110 irq_ctx_exit(raw_smp_processor_id());
111 mb();
113 __get_cpu_var(cpu_state) = CPU_DEAD;
114 local_irq_disable();
117 void native_play_dead(void)
119 play_dead_common();
122 int __cpu_disable(void)
124 unsigned int cpu = smp_processor_id();
125 struct task_struct *p;
126 int ret;
128 ret = mp_ops->cpu_disable(cpu);
129 if (ret)
130 return ret;
133 * Take this CPU offline. Once we clear this, we can't return,
134 * and we must not schedule until we're ready to give up the cpu.
136 set_cpu_online(cpu, false);
139 * OK - migrate IRQs away from this CPU
141 migrate_irqs();
144 * Stop the local timer for this CPU.
146 local_timer_stop(cpu);
149 * Flush user cache and TLB mappings, and then remove this CPU
150 * from the vm mask set of all processes.
152 flush_cache_all();
153 local_flush_tlb_all();
155 read_lock(&tasklist_lock);
156 for_each_process(p)
157 if (p->mm)
158 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
159 read_unlock(&tasklist_lock);
161 return 0;
163 #else /* ... !CONFIG_HOTPLUG_CPU */
164 int native_cpu_disable(unsigned int cpu)
166 return -ENOSYS;
169 void native_cpu_die(unsigned int cpu)
171 /* We said "no" in __cpu_disable */
172 BUG();
175 void native_play_dead(void)
177 BUG();
179 #endif
181 asmlinkage void __cpuinit start_secondary(void)
183 unsigned int cpu = smp_processor_id();
184 struct mm_struct *mm = &init_mm;
186 enable_mmu();
187 atomic_inc(&mm->mm_count);
188 atomic_inc(&mm->mm_users);
189 current->active_mm = mm;
190 enter_lazy_tlb(mm, current);
191 local_flush_tlb_all();
193 per_cpu_trap_init();
195 preempt_disable();
197 notify_cpu_starting(cpu);
199 local_irq_enable();
201 /* Enable local timers */
202 local_timer_setup(cpu);
203 calibrate_delay();
205 smp_store_cpu_info(cpu);
207 set_cpu_online(cpu, true);
208 per_cpu(cpu_state, cpu) = CPU_ONLINE;
210 cpu_idle();
213 extern struct {
214 unsigned long sp;
215 unsigned long bss_start;
216 unsigned long bss_end;
217 void *start_kernel_fn;
218 void *cpu_init_fn;
219 void *thread_info;
220 } stack_start;
222 int __cpuinit __cpu_up(unsigned int cpu)
224 struct task_struct *tsk;
225 unsigned long timeout;
227 tsk = cpu_data[cpu].idle;
228 if (!tsk) {
229 tsk = fork_idle(cpu);
230 if (IS_ERR(tsk)) {
231 pr_err("Failed forking idle task for cpu %d\n", cpu);
232 return PTR_ERR(tsk);
235 cpu_data[cpu].idle = tsk;
238 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
240 /* Fill in data in head.S for secondary cpus */
241 stack_start.sp = tsk->thread.sp;
242 stack_start.thread_info = tsk->stack;
243 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
244 stack_start.start_kernel_fn = start_secondary;
246 flush_icache_range((unsigned long)&stack_start,
247 (unsigned long)&stack_start + sizeof(stack_start));
248 wmb();
250 mp_ops->start_cpu(cpu, (unsigned long)_stext);
252 timeout = jiffies + HZ;
253 while (time_before(jiffies, timeout)) {
254 if (cpu_online(cpu))
255 break;
257 udelay(10);
258 barrier();
261 if (cpu_online(cpu))
262 return 0;
264 return -ENOENT;
267 void __init smp_cpus_done(unsigned int max_cpus)
269 unsigned long bogosum = 0;
270 int cpu;
272 for_each_online_cpu(cpu)
273 bogosum += cpu_data[cpu].loops_per_jiffy;
275 printk(KERN_INFO "SMP: Total of %d processors activated "
276 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
277 bogosum / (500000/HZ),
278 (bogosum / (5000/HZ)) % 100);
281 void smp_send_reschedule(int cpu)
283 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
286 void smp_send_stop(void)
288 smp_call_function(stop_this_cpu, 0, 0);
291 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
293 int cpu;
295 for_each_cpu(cpu, mask)
296 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
299 void arch_send_call_function_single_ipi(int cpu)
301 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
304 void smp_timer_broadcast(const struct cpumask *mask)
306 int cpu;
308 for_each_cpu(cpu, mask)
309 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
312 static void ipi_timer(void)
314 irq_enter();
315 local_timer_interrupt();
316 irq_exit();
319 void smp_message_recv(unsigned int msg)
321 switch (msg) {
322 case SMP_MSG_FUNCTION:
323 generic_smp_call_function_interrupt();
324 break;
325 case SMP_MSG_RESCHEDULE:
326 break;
327 case SMP_MSG_FUNCTION_SINGLE:
328 generic_smp_call_function_single_interrupt();
329 break;
330 case SMP_MSG_TIMER:
331 ipi_timer();
332 break;
333 default:
334 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
335 smp_processor_id(), __func__, msg);
336 break;
340 /* Not really SMP stuff ... */
341 int setup_profiling_timer(unsigned int multiplier)
343 return 0;
346 static void flush_tlb_all_ipi(void *info)
348 local_flush_tlb_all();
351 void flush_tlb_all(void)
353 on_each_cpu(flush_tlb_all_ipi, 0, 1);
356 static void flush_tlb_mm_ipi(void *mm)
358 local_flush_tlb_mm((struct mm_struct *)mm);
362 * The following tlb flush calls are invoked when old translations are
363 * being torn down, or pte attributes are changing. For single threaded
364 * address spaces, a new context is obtained on the current cpu, and tlb
365 * context on other cpus are invalidated to force a new context allocation
366 * at switch_mm time, should the mm ever be used on other cpus. For
367 * multithreaded address spaces, intercpu interrupts have to be sent.
368 * Another case where intercpu interrupts are required is when the target
369 * mm might be active on another cpu (eg debuggers doing the flushes on
370 * behalf of debugees, kswapd stealing pages from another process etc).
371 * Kanoj 07/00.
373 void flush_tlb_mm(struct mm_struct *mm)
375 preempt_disable();
377 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
378 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
379 } else {
380 int i;
381 for (i = 0; i < num_online_cpus(); i++)
382 if (smp_processor_id() != i)
383 cpu_context(i, mm) = 0;
385 local_flush_tlb_mm(mm);
387 preempt_enable();
390 struct flush_tlb_data {
391 struct vm_area_struct *vma;
392 unsigned long addr1;
393 unsigned long addr2;
396 static void flush_tlb_range_ipi(void *info)
398 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
400 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
403 void flush_tlb_range(struct vm_area_struct *vma,
404 unsigned long start, unsigned long end)
406 struct mm_struct *mm = vma->vm_mm;
408 preempt_disable();
409 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
410 struct flush_tlb_data fd;
412 fd.vma = vma;
413 fd.addr1 = start;
414 fd.addr2 = end;
415 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
416 } else {
417 int i;
418 for (i = 0; i < num_online_cpus(); i++)
419 if (smp_processor_id() != i)
420 cpu_context(i, mm) = 0;
422 local_flush_tlb_range(vma, start, end);
423 preempt_enable();
426 static void flush_tlb_kernel_range_ipi(void *info)
428 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
430 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
433 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
435 struct flush_tlb_data fd;
437 fd.addr1 = start;
438 fd.addr2 = end;
439 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
442 static void flush_tlb_page_ipi(void *info)
444 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
446 local_flush_tlb_page(fd->vma, fd->addr1);
449 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
451 preempt_disable();
452 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
453 (current->mm != vma->vm_mm)) {
454 struct flush_tlb_data fd;
456 fd.vma = vma;
457 fd.addr1 = page;
458 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
459 } else {
460 int i;
461 for (i = 0; i < num_online_cpus(); i++)
462 if (smp_processor_id() != i)
463 cpu_context(i, vma->vm_mm) = 0;
465 local_flush_tlb_page(vma, page);
466 preempt_enable();
469 static void flush_tlb_one_ipi(void *info)
471 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
472 local_flush_tlb_one(fd->addr1, fd->addr2);
475 void flush_tlb_one(unsigned long asid, unsigned long vaddr)
477 struct flush_tlb_data fd;
479 fd.addr1 = asid;
480 fd.addr2 = vaddr;
482 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
483 local_flush_tlb_one(asid, vaddr);