2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/delay.h>
11 #include <linux/init.h>
12 #include <linux/spinlock.h>
13 #include <linux/sched.h>
14 #include <linux/interrupt.h>
15 #include <linux/cache.h>
16 #include <linux/profile.h>
17 #include <linux/errno.h>
19 #include <linux/cpu.h>
20 #include <linux/smp.h>
21 #include <linux/seq_file.h>
23 #include <asm/atomic.h>
24 #include <asm/cacheflush.h>
26 #include <asm/mmu_context.h>
27 #include <asm/pgtable.h>
28 #include <asm/pgalloc.h>
29 #include <asm/processor.h>
30 #include <asm/tlbflush.h>
31 #include <asm/ptrace.h>
34 * bitmask of present and online CPUs.
35 * The present bitmask indicates that the CPU is physically present.
36 * The online bitmask indicates that the CPU is up and running.
38 cpumask_t cpu_possible_map
;
39 EXPORT_SYMBOL(cpu_possible_map
);
40 cpumask_t cpu_online_map
;
41 EXPORT_SYMBOL(cpu_online_map
);
44 * as from 2.5, kernels no longer have an init_tasks structure
45 * so we need some other way of telling a new secondary core
46 * where to place its SVC stack
48 struct secondary_data secondary_data
;
51 * structures for inter-processor calls
52 * - A collection of single bit ipi messages.
56 unsigned long ipi_count
;
60 static DEFINE_PER_CPU(struct ipi_data
, ipi_data
) = {
61 .lock
= SPIN_LOCK_UNLOCKED
,
71 struct smp_call_struct
{
72 void (*func
)(void *info
);
79 static struct smp_call_struct
* volatile smp_call_function_data
;
80 static DEFINE_SPINLOCK(smp_call_function_lock
);
82 int __cpuinit
__cpu_up(unsigned int cpu
)
84 struct cpuinfo_arm
*ci
= &per_cpu(cpu_data
, cpu
);
85 struct task_struct
*idle
= ci
->idle
;
91 * Spawn a new process manually, if not already done.
92 * Grab a pointer to its task struct so we can mess with it
95 idle
= fork_idle(cpu
);
97 printk(KERN_ERR
"CPU%u: fork() failed\n", cpu
);
104 * Allocate initial page tables to allow the new CPU to
105 * enable the MMU safely. This essentially means a set
106 * of our "standard" page tables, with the addition of
107 * a 1:1 mapping for the physical address of the kernel.
109 pgd
= pgd_alloc(&init_mm
);
110 pmd
= pmd_offset(pgd
, PHYS_OFFSET
);
111 *pmd
= __pmd((PHYS_OFFSET
& PGDIR_MASK
) |
112 PMD_TYPE_SECT
| PMD_SECT_AP_WRITE
);
115 * We need to tell the secondary core where to find
116 * its stack and the page tables.
118 secondary_data
.stack
= task_stack_page(idle
) + THREAD_START_SP
;
119 secondary_data
.pgdir
= virt_to_phys(pgd
);
123 * Now bring the CPU into our world.
125 ret
= boot_secondary(cpu
, idle
);
127 unsigned long timeout
;
130 * CPU was successfully started, wait for it
131 * to come online or time out.
133 timeout
= jiffies
+ HZ
;
134 while (time_before(jiffies
, timeout
)) {
142 if (!cpu_online(cpu
))
146 secondary_data
.stack
= NULL
;
147 secondary_data
.pgdir
= 0;
149 *pmd_offset(pgd
, PHYS_OFFSET
) = __pmd(0);
153 printk(KERN_CRIT
"CPU%u: processor failed to boot\n", cpu
);
156 * FIXME: We need to clean up the new idle thread. --rmk
163 #ifdef CONFIG_HOTPLUG_CPU
165 * __cpu_disable runs on the processor to be shutdown.
167 int __cpuexit
__cpu_disable(void)
169 unsigned int cpu
= smp_processor_id();
170 struct task_struct
*p
;
173 ret
= mach_cpu_disable(cpu
);
178 * Take this CPU offline. Once we clear this, we can't return,
179 * and we must not schedule until we're ready to give up the cpu.
181 cpu_clear(cpu
, cpu_online_map
);
184 * OK - migrate IRQs away from this CPU
189 * Stop the local timer for this CPU.
191 local_timer_stop(cpu
);
194 * Flush user cache and TLB mappings, and then remove this CPU
195 * from the vm mask set of all processes.
198 local_flush_tlb_all();
200 read_lock(&tasklist_lock
);
201 for_each_process(p
) {
203 cpu_clear(cpu
, p
->mm
->cpu_vm_mask
);
205 read_unlock(&tasklist_lock
);
211 * called on the thread which is asking for a CPU to be shutdown -
212 * waits until shutdown has completed, or it is timed out.
214 void __cpuexit
__cpu_die(unsigned int cpu
)
216 if (!platform_cpu_kill(cpu
))
217 printk("CPU%u: unable to kill\n", cpu
);
221 * Called from the idle thread for the CPU which has been shutdown.
223 * Note that we disable IRQs here, but do not re-enable them
224 * before returning to the caller. This is also the behaviour
225 * of the other hotplug-cpu capable cores, so presumably coming
226 * out of idle fixes this.
228 void __cpuexit
cpu_die(void)
230 unsigned int cpu
= smp_processor_id();
236 * actual CPU shutdown procedure is at least platform (if not
239 platform_cpu_die(cpu
);
242 * Do not return to the idle loop - jump back to the secondary
243 * cpu initialisation. There's some initialisation which needs
244 * to be repeated to undo the effects of taking the CPU offline.
246 __asm__("mov sp, %0\n"
247 " b secondary_start_kernel"
249 : "r" (task_stack_page(current
) + THREAD_SIZE
- 8));
251 #endif /* CONFIG_HOTPLUG_CPU */
254 * This is the secondary CPU boot entry. We're using this CPUs
255 * idle thread stack, but a set of temporary page tables.
257 asmlinkage
void __cpuinit
secondary_start_kernel(void)
259 struct mm_struct
*mm
= &init_mm
;
260 unsigned int cpu
= smp_processor_id();
262 printk("CPU%u: Booted secondary processor\n", cpu
);
265 * All kernel threads share the same mm context; grab a
266 * reference and switch to it.
268 atomic_inc(&mm
->mm_users
);
269 atomic_inc(&mm
->mm_count
);
270 current
->active_mm
= mm
;
271 cpu_set(cpu
, mm
->cpu_vm_mask
);
272 cpu_switch_mm(mm
->pgd
, mm
);
273 enter_lazy_tlb(mm
, current
);
274 local_flush_tlb_all();
280 * Give the platform a chance to do its own initialisation.
282 platform_secondary_init(cpu
);
285 * Enable local interrupts.
292 smp_store_cpu_info(cpu
);
295 * OK, now it's safe to let the boot CPU continue
297 cpu_set(cpu
, cpu_online_map
);
300 * Setup local timer for this CPU.
302 local_timer_setup(cpu
);
305 * OK, it's off to the idle thread for us
311 * Called by both boot and secondaries to move global data into
312 * per-processor storage.
314 void __cpuinit
smp_store_cpu_info(unsigned int cpuid
)
316 struct cpuinfo_arm
*cpu_info
= &per_cpu(cpu_data
, cpuid
);
318 cpu_info
->loops_per_jiffy
= loops_per_jiffy
;
321 void __init
smp_cpus_done(unsigned int max_cpus
)
324 unsigned long bogosum
= 0;
326 for_each_online_cpu(cpu
)
327 bogosum
+= per_cpu(cpu_data
, cpu
).loops_per_jiffy
;
329 printk(KERN_INFO
"SMP: Total of %d processors activated "
330 "(%lu.%02lu BogoMIPS).\n",
332 bogosum
/ (500000/HZ
),
333 (bogosum
/ (5000/HZ
)) % 100);
336 void __init
smp_prepare_boot_cpu(void)
338 unsigned int cpu
= smp_processor_id();
340 per_cpu(cpu_data
, cpu
).idle
= current
;
343 static void send_ipi_message(cpumask_t callmap
, enum ipi_msg_type msg
)
348 local_irq_save(flags
);
350 for_each_cpu_mask(cpu
, callmap
) {
351 struct ipi_data
*ipi
= &per_cpu(ipi_data
, cpu
);
353 spin_lock(&ipi
->lock
);
354 ipi
->bits
|= 1 << msg
;
355 spin_unlock(&ipi
->lock
);
359 * Call the platform specific cross-CPU call function.
361 smp_cross_call(callmap
);
363 local_irq_restore(flags
);
367 * You must not call this function with disabled interrupts, from a
368 * hardware interrupt handler, nor from a bottom half handler.
370 static int smp_call_function_on_cpu(void (*func
)(void *info
), void *info
,
371 int retry
, int wait
, cpumask_t callmap
)
373 struct smp_call_struct data
;
374 unsigned long timeout
;
381 cpu_clear(smp_processor_id(), callmap
);
382 if (cpus_empty(callmap
))
385 data
.pending
= callmap
;
387 data
.unfinished
= callmap
;
390 * try to get the mutex on smp_call_function_data
392 spin_lock(&smp_call_function_lock
);
393 smp_call_function_data
= &data
;
395 send_ipi_message(callmap
, IPI_CALL_FUNC
);
397 timeout
= jiffies
+ HZ
;
398 while (!cpus_empty(data
.pending
) && time_before(jiffies
, timeout
))
404 if (!cpus_empty(data
.pending
)) {
406 * this may be causing our panic - report it
409 "CPU%u: smp_call_function timeout for %p(%p)\n"
410 " callmap %lx pending %lx, %swait\n",
411 smp_processor_id(), func
, info
, *cpus_addr(callmap
),
412 *cpus_addr(data
.pending
), wait
? "" : "no ");
417 timeout
= jiffies
+ (5 * HZ
);
418 while (!cpus_empty(data
.pending
) && time_before(jiffies
, timeout
))
421 if (cpus_empty(data
.pending
))
422 printk(KERN_CRIT
" RESOLVED\n");
424 printk(KERN_CRIT
" STILL STUCK\n");
428 * whatever happened, we're done with the data, so release it
430 smp_call_function_data
= NULL
;
431 spin_unlock(&smp_call_function_lock
);
433 if (!cpus_empty(data
.pending
)) {
439 while (!cpus_empty(data
.unfinished
))
446 int smp_call_function(void (*func
)(void *info
), void *info
, int retry
,
449 return smp_call_function_on_cpu(func
, info
, retry
, wait
,
453 void show_ipi_list(struct seq_file
*p
)
459 for_each_present_cpu(cpu
)
460 seq_printf(p
, " %10lu", per_cpu(ipi_data
, cpu
).ipi_count
);
465 void show_local_irqs(struct seq_file
*p
)
469 seq_printf(p
, "LOC: ");
471 for_each_present_cpu(cpu
)
472 seq_printf(p
, "%10u ", irq_stat
[cpu
].local_timer_irqs
);
477 static void ipi_timer(struct pt_regs
*regs
)
479 int user
= user_mode(regs
);
482 profile_tick(CPU_PROFILING
, regs
);
483 update_process_times(user
);
487 #ifdef CONFIG_LOCAL_TIMERS
488 asmlinkage
void do_local_timer(struct pt_regs
*regs
)
490 int cpu
= smp_processor_id();
492 if (local_timer_ack()) {
493 irq_stat
[cpu
].local_timer_irqs
++;
500 * ipi_call_function - handle IPI from smp_call_function()
502 * Note that we copy data out of the cross-call structure and then
503 * let the caller know that we're here and have done with their data
505 static void ipi_call_function(unsigned int cpu
)
507 struct smp_call_struct
*data
= smp_call_function_data
;
508 void (*func
)(void *info
) = data
->func
;
509 void *info
= data
->info
;
510 int wait
= data
->wait
;
512 cpu_clear(cpu
, data
->pending
);
517 cpu_clear(cpu
, data
->unfinished
);
520 static DEFINE_SPINLOCK(stop_lock
);
523 * ipi_cpu_stop - handle IPI from smp_send_stop()
525 static void ipi_cpu_stop(unsigned int cpu
)
527 spin_lock(&stop_lock
);
528 printk(KERN_CRIT
"CPU%u: stopping\n", cpu
);
530 spin_unlock(&stop_lock
);
532 cpu_clear(cpu
, cpu_online_map
);
542 * Main handler for inter-processor interrupts
544 * For ARM, the ipimask now only identifies a single
545 * category of IPI (Bit 1 IPIs have been replaced by a
546 * different mechanism):
548 * Bit 0 - Inter-processor function call
550 asmlinkage
void do_IPI(struct pt_regs
*regs
)
552 unsigned int cpu
= smp_processor_id();
553 struct ipi_data
*ipi
= &per_cpu(ipi_data
, cpu
);
560 spin_lock(&ipi
->lock
);
563 spin_unlock(&ipi
->lock
);
571 nextmsg
= msgs
& -msgs
;
573 nextmsg
= ffz(~nextmsg
);
582 * nothing more to do - eveything is
583 * done on the interrupt return path
588 ipi_call_function(cpu
);
596 printk(KERN_CRIT
"CPU%u: Unknown IPI message 0x%x\n",
604 void smp_send_reschedule(int cpu
)
606 send_ipi_message(cpumask_of_cpu(cpu
), IPI_RESCHEDULE
);
609 void smp_send_timer(void)
611 cpumask_t mask
= cpu_online_map
;
612 cpu_clear(smp_processor_id(), mask
);
613 send_ipi_message(mask
, IPI_TIMER
);
616 void smp_send_stop(void)
618 cpumask_t mask
= cpu_online_map
;
619 cpu_clear(smp_processor_id(), mask
);
620 send_ipi_message(mask
, IPI_CPU_STOP
);
626 int __init
setup_profiling_timer(unsigned int multiplier
)
632 on_each_cpu_mask(void (*func
)(void *), void *info
, int retry
, int wait
,
639 ret
= smp_call_function_on_cpu(func
, info
, retry
, wait
, mask
);
640 if (cpu_isset(smp_processor_id(), mask
))
648 /**********************************************************************/
654 struct vm_area_struct
*ta_vma
;
655 unsigned long ta_start
;
656 unsigned long ta_end
;
659 static inline void ipi_flush_tlb_all(void *ignored
)
661 local_flush_tlb_all();
664 static inline void ipi_flush_tlb_mm(void *arg
)
666 struct mm_struct
*mm
= (struct mm_struct
*)arg
;
668 local_flush_tlb_mm(mm
);
671 static inline void ipi_flush_tlb_page(void *arg
)
673 struct tlb_args
*ta
= (struct tlb_args
*)arg
;
675 local_flush_tlb_page(ta
->ta_vma
, ta
->ta_start
);
678 static inline void ipi_flush_tlb_kernel_page(void *arg
)
680 struct tlb_args
*ta
= (struct tlb_args
*)arg
;
682 local_flush_tlb_kernel_page(ta
->ta_start
);
685 static inline void ipi_flush_tlb_range(void *arg
)
687 struct tlb_args
*ta
= (struct tlb_args
*)arg
;
689 local_flush_tlb_range(ta
->ta_vma
, ta
->ta_start
, ta
->ta_end
);
692 static inline void ipi_flush_tlb_kernel_range(void *arg
)
694 struct tlb_args
*ta
= (struct tlb_args
*)arg
;
696 local_flush_tlb_kernel_range(ta
->ta_start
, ta
->ta_end
);
699 void flush_tlb_all(void)
701 on_each_cpu(ipi_flush_tlb_all
, NULL
, 1, 1);
704 void flush_tlb_mm(struct mm_struct
*mm
)
706 cpumask_t mask
= mm
->cpu_vm_mask
;
708 on_each_cpu_mask(ipi_flush_tlb_mm
, mm
, 1, 1, mask
);
711 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long uaddr
)
713 cpumask_t mask
= vma
->vm_mm
->cpu_vm_mask
;
719 on_each_cpu_mask(ipi_flush_tlb_page
, &ta
, 1, 1, mask
);
722 void flush_tlb_kernel_page(unsigned long kaddr
)
728 on_each_cpu(ipi_flush_tlb_kernel_page
, &ta
, 1, 1);
731 void flush_tlb_range(struct vm_area_struct
*vma
,
732 unsigned long start
, unsigned long end
)
734 cpumask_t mask
= vma
->vm_mm
->cpu_vm_mask
;
741 on_each_cpu_mask(ipi_flush_tlb_range
, &ta
, 1, 1, mask
);
744 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
751 on_each_cpu(ipi_flush_tlb_kernel_range
, &ta
, 1, 1);