2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
20 #include <linux/err.h>
21 #include <linux/cpu.h>
22 #include <linux/smp.h>
23 #include <linux/seq_file.h>
24 #include <linux/irq.h>
26 #include <asm/atomic.h>
27 #include <asm/cacheflush.h>
29 #include <asm/mmu_context.h>
30 #include <asm/pgtable.h>
31 #include <asm/pgalloc.h>
32 #include <asm/processor.h>
33 #include <asm/tlbflush.h>
34 #include <asm/ptrace.h>
37 * as from 2.5, kernels no longer have an init_tasks structure
38 * so we need some other way of telling a new secondary core
39 * where to place its SVC stack
41 struct secondary_data secondary_data
;
44 * structures for inter-processor calls
45 * - A collection of single bit ipi messages.
49 unsigned long ipi_count
;
53 static DEFINE_PER_CPU(struct ipi_data
, ipi_data
) = {
54 .lock
= SPIN_LOCK_UNLOCKED
,
65 int __cpuinit
__cpu_up(unsigned int cpu
)
67 struct cpuinfo_arm
*ci
= &per_cpu(cpu_data
, cpu
);
68 struct task_struct
*idle
= ci
->idle
;
74 * Spawn a new process manually, if not already done.
75 * Grab a pointer to its task struct so we can mess with it
78 idle
= fork_idle(cpu
);
80 printk(KERN_ERR
"CPU%u: fork() failed\n", cpu
);
87 * Allocate initial page tables to allow the new CPU to
88 * enable the MMU safely. This essentially means a set
89 * of our "standard" page tables, with the addition of
90 * a 1:1 mapping for the physical address of the kernel.
92 pgd
= pgd_alloc(&init_mm
);
93 pmd
= pmd_offset(pgd
+ pgd_index(PHYS_OFFSET
), PHYS_OFFSET
);
94 *pmd
= __pmd((PHYS_OFFSET
& PGDIR_MASK
) |
95 PMD_TYPE_SECT
| PMD_SECT_AP_WRITE
);
98 * We need to tell the secondary core where to find
99 * its stack and the page tables.
101 secondary_data
.stack
= task_stack_page(idle
) + THREAD_START_SP
;
102 secondary_data
.pgdir
= virt_to_phys(pgd
);
106 * Now bring the CPU into our world.
108 ret
= boot_secondary(cpu
, idle
);
110 unsigned long timeout
;
113 * CPU was successfully started, wait for it
114 * to come online or time out.
116 timeout
= jiffies
+ HZ
;
117 while (time_before(jiffies
, timeout
)) {
125 if (!cpu_online(cpu
))
129 secondary_data
.stack
= NULL
;
130 secondary_data
.pgdir
= 0;
133 pgd_free(&init_mm
, pgd
);
136 printk(KERN_CRIT
"CPU%u: processor failed to boot\n", cpu
);
139 * FIXME: We need to clean up the new idle thread. --rmk
146 #ifdef CONFIG_HOTPLUG_CPU
148 * __cpu_disable runs on the processor to be shutdown.
150 int __cpuexit
__cpu_disable(void)
152 unsigned int cpu
= smp_processor_id();
153 struct task_struct
*p
;
156 ret
= mach_cpu_disable(cpu
);
161 * Take this CPU offline. Once we clear this, we can't return,
162 * and we must not schedule until we're ready to give up the cpu.
164 cpu_clear(cpu
, cpu_online_map
);
167 * OK - migrate IRQs away from this CPU
172 * Stop the local timer for this CPU.
177 * Flush user cache and TLB mappings, and then remove this CPU
178 * from the vm mask set of all processes.
181 local_flush_tlb_all();
183 read_lock(&tasklist_lock
);
184 for_each_process(p
) {
186 cpu_clear(cpu
, p
->mm
->cpu_vm_mask
);
188 read_unlock(&tasklist_lock
);
194 * called on the thread which is asking for a CPU to be shutdown -
195 * waits until shutdown has completed, or it is timed out.
197 void __cpuexit
__cpu_die(unsigned int cpu
)
199 if (!platform_cpu_kill(cpu
))
200 printk("CPU%u: unable to kill\n", cpu
);
204 * Called from the idle thread for the CPU which has been shutdown.
206 * Note that we disable IRQs here, but do not re-enable them
207 * before returning to the caller. This is also the behaviour
208 * of the other hotplug-cpu capable cores, so presumably coming
209 * out of idle fixes this.
211 void __cpuexit
cpu_die(void)
213 unsigned int cpu
= smp_processor_id();
219 * actual CPU shutdown procedure is at least platform (if not
222 platform_cpu_die(cpu
);
225 * Do not return to the idle loop - jump back to the secondary
226 * cpu initialisation. There's some initialisation which needs
227 * to be repeated to undo the effects of taking the CPU offline.
229 __asm__("mov sp, %0\n"
230 " b secondary_start_kernel"
232 : "r" (task_stack_page(current
) + THREAD_SIZE
- 8));
234 #endif /* CONFIG_HOTPLUG_CPU */
237 * This is the secondary CPU boot entry. We're using this CPUs
238 * idle thread stack, but a set of temporary page tables.
240 asmlinkage
void __cpuinit
secondary_start_kernel(void)
242 struct mm_struct
*mm
= &init_mm
;
243 unsigned int cpu
= smp_processor_id();
245 printk("CPU%u: Booted secondary processor\n", cpu
);
248 * All kernel threads share the same mm context; grab a
249 * reference and switch to it.
251 atomic_inc(&mm
->mm_users
);
252 atomic_inc(&mm
->mm_count
);
253 current
->active_mm
= mm
;
254 cpu_set(cpu
, mm
->cpu_vm_mask
);
255 cpu_switch_mm(mm
->pgd
, mm
);
256 enter_lazy_tlb(mm
, current
);
257 local_flush_tlb_all();
263 * Give the platform a chance to do its own initialisation.
265 platform_secondary_init(cpu
);
268 * Enable local interrupts.
270 notify_cpu_starting(cpu
);
275 * Setup local timer for this CPU.
281 smp_store_cpu_info(cpu
);
284 * OK, now it's safe to let the boot CPU continue
286 cpu_set(cpu
, cpu_online_map
);
289 * OK, it's off to the idle thread for us
295 * Called by both boot and secondaries to move global data into
296 * per-processor storage.
298 void __cpuinit
smp_store_cpu_info(unsigned int cpuid
)
300 struct cpuinfo_arm
*cpu_info
= &per_cpu(cpu_data
, cpuid
);
302 cpu_info
->loops_per_jiffy
= loops_per_jiffy
;
305 void __init
smp_cpus_done(unsigned int max_cpus
)
308 unsigned long bogosum
= 0;
310 for_each_online_cpu(cpu
)
311 bogosum
+= per_cpu(cpu_data
, cpu
).loops_per_jiffy
;
313 printk(KERN_INFO
"SMP: Total of %d processors activated "
314 "(%lu.%02lu BogoMIPS).\n",
316 bogosum
/ (500000/HZ
),
317 (bogosum
/ (5000/HZ
)) % 100);
320 void __init
smp_prepare_boot_cpu(void)
322 unsigned int cpu
= smp_processor_id();
324 per_cpu(cpu_data
, cpu
).idle
= current
;
327 static void send_ipi_message(cpumask_t callmap
, enum ipi_msg_type msg
)
332 local_irq_save(flags
);
334 for_each_cpu_mask(cpu
, callmap
) {
335 struct ipi_data
*ipi
= &per_cpu(ipi_data
, cpu
);
337 spin_lock(&ipi
->lock
);
338 ipi
->bits
|= 1 << msg
;
339 spin_unlock(&ipi
->lock
);
343 * Call the platform specific cross-CPU call function.
345 smp_cross_call(callmap
);
347 local_irq_restore(flags
);
350 void arch_send_call_function_ipi(cpumask_t mask
)
352 send_ipi_message(mask
, IPI_CALL_FUNC
);
355 void arch_send_call_function_single_ipi(int cpu
)
357 send_ipi_message(cpumask_of_cpu(cpu
), IPI_CALL_FUNC_SINGLE
);
360 void show_ipi_list(struct seq_file
*p
)
366 for_each_present_cpu(cpu
)
367 seq_printf(p
, " %10lu", per_cpu(ipi_data
, cpu
).ipi_count
);
372 void show_local_irqs(struct seq_file
*p
)
376 seq_printf(p
, "LOC: ");
378 for_each_present_cpu(cpu
)
379 seq_printf(p
, "%10u ", irq_stat
[cpu
].local_timer_irqs
);
384 static void ipi_timer(void)
387 local_timer_interrupt();
391 #ifdef CONFIG_LOCAL_TIMERS
392 asmlinkage
void __exception
do_local_timer(struct pt_regs
*regs
)
394 struct pt_regs
*old_regs
= set_irq_regs(regs
);
395 int cpu
= smp_processor_id();
397 if (local_timer_ack()) {
398 irq_stat
[cpu
].local_timer_irqs
++;
402 set_irq_regs(old_regs
);
406 static DEFINE_SPINLOCK(stop_lock
);
409 * ipi_cpu_stop - handle IPI from smp_send_stop()
411 static void ipi_cpu_stop(unsigned int cpu
)
413 spin_lock(&stop_lock
);
414 printk(KERN_CRIT
"CPU%u: stopping\n", cpu
);
416 spin_unlock(&stop_lock
);
418 cpu_clear(cpu
, cpu_online_map
);
428 * Main handler for inter-processor interrupts
430 * For ARM, the ipimask now only identifies a single
431 * category of IPI (Bit 1 IPIs have been replaced by a
432 * different mechanism):
434 * Bit 0 - Inter-processor function call
436 asmlinkage
void __exception
do_IPI(struct pt_regs
*regs
)
438 unsigned int cpu
= smp_processor_id();
439 struct ipi_data
*ipi
= &per_cpu(ipi_data
, cpu
);
440 struct pt_regs
*old_regs
= set_irq_regs(regs
);
447 spin_lock(&ipi
->lock
);
450 spin_unlock(&ipi
->lock
);
458 nextmsg
= msgs
& -msgs
;
460 nextmsg
= ffz(~nextmsg
);
469 * nothing more to do - eveything is
470 * done on the interrupt return path
475 generic_smp_call_function_interrupt();
478 case IPI_CALL_FUNC_SINGLE
:
479 generic_smp_call_function_single_interrupt();
487 printk(KERN_CRIT
"CPU%u: Unknown IPI message 0x%x\n",
494 set_irq_regs(old_regs
);
497 void smp_send_reschedule(int cpu
)
499 send_ipi_message(cpumask_of_cpu(cpu
), IPI_RESCHEDULE
);
502 void smp_send_timer(void)
504 cpumask_t mask
= cpu_online_map
;
505 cpu_clear(smp_processor_id(), mask
);
506 send_ipi_message(mask
, IPI_TIMER
);
509 void smp_timer_broadcast(cpumask_t mask
)
511 send_ipi_message(mask
, IPI_TIMER
);
514 void smp_send_stop(void)
516 cpumask_t mask
= cpu_online_map
;
517 cpu_clear(smp_processor_id(), mask
);
518 send_ipi_message(mask
, IPI_CPU_STOP
);
524 int setup_profiling_timer(unsigned int multiplier
)
530 on_each_cpu_mask(void (*func
)(void *), void *info
, int wait
, cpumask_t mask
)
536 ret
= smp_call_function_mask(mask
, func
, info
, wait
);
537 if (cpu_isset(smp_processor_id(), mask
))
545 /**********************************************************************/
551 struct vm_area_struct
*ta_vma
;
552 unsigned long ta_start
;
553 unsigned long ta_end
;
556 static inline void ipi_flush_tlb_all(void *ignored
)
558 local_flush_tlb_all();
561 static inline void ipi_flush_tlb_mm(void *arg
)
563 struct mm_struct
*mm
= (struct mm_struct
*)arg
;
565 local_flush_tlb_mm(mm
);
568 static inline void ipi_flush_tlb_page(void *arg
)
570 struct tlb_args
*ta
= (struct tlb_args
*)arg
;
572 local_flush_tlb_page(ta
->ta_vma
, ta
->ta_start
);
575 static inline void ipi_flush_tlb_kernel_page(void *arg
)
577 struct tlb_args
*ta
= (struct tlb_args
*)arg
;
579 local_flush_tlb_kernel_page(ta
->ta_start
);
582 static inline void ipi_flush_tlb_range(void *arg
)
584 struct tlb_args
*ta
= (struct tlb_args
*)arg
;
586 local_flush_tlb_range(ta
->ta_vma
, ta
->ta_start
, ta
->ta_end
);
589 static inline void ipi_flush_tlb_kernel_range(void *arg
)
591 struct tlb_args
*ta
= (struct tlb_args
*)arg
;
593 local_flush_tlb_kernel_range(ta
->ta_start
, ta
->ta_end
);
596 void flush_tlb_all(void)
598 on_each_cpu(ipi_flush_tlb_all
, NULL
, 1);
601 void flush_tlb_mm(struct mm_struct
*mm
)
603 cpumask_t mask
= mm
->cpu_vm_mask
;
605 on_each_cpu_mask(ipi_flush_tlb_mm
, mm
, 1, mask
);
608 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long uaddr
)
610 cpumask_t mask
= vma
->vm_mm
->cpu_vm_mask
;
616 on_each_cpu_mask(ipi_flush_tlb_page
, &ta
, 1, mask
);
619 void flush_tlb_kernel_page(unsigned long kaddr
)
625 on_each_cpu(ipi_flush_tlb_kernel_page
, &ta
, 1);
628 void flush_tlb_range(struct vm_area_struct
*vma
,
629 unsigned long start
, unsigned long end
)
631 cpumask_t mask
= vma
->vm_mm
->cpu_vm_mask
;
638 on_each_cpu_mask(ipi_flush_tlb_range
, &ta
, 1, mask
);
641 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
648 on_each_cpu(ipi_flush_tlb_kernel_range
, &ta
, 1);