2 * Intel SMP support routines.
4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5 * (c) 1998-99, 2000, 2009 Ingo Molnar <mingo@redhat.com>
6 * (c) 2002,2003 Andi Kleen, SuSE Labs.
8 * i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
10 * This code is released under the GNU General Public License version 2 or
14 #include <linux/init.h>
17 #include <linux/delay.h>
18 #include <linux/spinlock.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mc146818rtc.h>
21 #include <linux/cache.h>
22 #include <linux/interrupt.h>
23 #include <linux/cpu.h>
24 #include <linux/gfp.h>
27 #include <asm/tlbflush.h>
28 #include <asm/mmu_context.h>
29 #include <asm/proto.h>
33 * this function sends a 'reschedule' IPI to another CPU.
34 * it goes straight through and wastes no time serializing
35 * anything. Worst case is that we lose a reschedule ...
37 static void native_smp_send_reschedule(int cpu
)
39 if (unlikely(cpu_is_offline(cpu
))) {
43 apic
->send_IPI_mask(cpumask_of(cpu
), RESCHEDULE_VECTOR
);
46 void native_send_call_func_single_ipi(int cpu
)
48 apic
->send_IPI_mask(cpumask_of(cpu
), CALL_FUNCTION_SINGLE_VECTOR
);
51 void native_send_call_func_ipi(const struct cpumask
*mask
)
53 cpumask_var_t allbutself
;
55 if (!alloc_cpumask_var(&allbutself
, GFP_ATOMIC
)) {
56 apic
->send_IPI_mask(mask
, CALL_FUNCTION_VECTOR
);
60 cpumask_copy(allbutself
, cpu_online_mask
);
61 cpumask_clear_cpu(smp_processor_id(), allbutself
);
63 if (cpumask_equal(mask
, allbutself
) &&
64 cpumask_equal(cpu_online_mask
, cpu_callout_mask
))
65 apic
->send_IPI_allbutself(CALL_FUNCTION_VECTOR
);
67 apic
->send_IPI_mask(mask
, CALL_FUNCTION_VECTOR
);
69 free_cpumask_var(allbutself
);
73 * this function calls the 'stop' function on all other CPUs in the system.
76 asmlinkage
void smp_reboot_interrupt(void)
84 static void native_stop_other_cpus(int wait
)
87 unsigned long timeout
;
93 * Use an own vector here because smp_call_function
94 * does lots of things not suitable in a panic situation.
95 * On most systems we could also use an NMI here,
96 * but there are a few systems around where NMI
97 * is problematic so stay with an non NMI for now
98 * (this implies we cannot stop CPUs spinning with irq off
101 if (num_online_cpus() > 1) {
102 apic
->send_IPI_allbutself(REBOOT_VECTOR
);
105 * Don't wait longer than a second if the caller
106 * didn't ask us to wait.
108 timeout
= USEC_PER_SEC
;
109 while (num_online_cpus() > 1 && (wait
|| timeout
--))
113 local_irq_save(flags
);
114 disable_local_APIC();
115 local_irq_restore(flags
);
119 * Reschedule call back. Nothing to do,
120 * all the work is done automatically when
121 * we return from the interrupt.
123 void smp_reschedule_interrupt(struct pt_regs
*regs
)
126 inc_irq_stat(irq_resched_count
);
128 * KVM uses this interrupt to force a cpu out of guest mode
132 void smp_call_function_interrupt(struct pt_regs
*regs
)
136 generic_smp_call_function_interrupt();
137 inc_irq_stat(irq_call_count
);
141 void smp_call_function_single_interrupt(struct pt_regs
*regs
)
145 generic_smp_call_function_single_interrupt();
146 inc_irq_stat(irq_call_count
);
150 struct smp_ops smp_ops
= {
151 .smp_prepare_boot_cpu
= native_smp_prepare_boot_cpu
,
152 .smp_prepare_cpus
= native_smp_prepare_cpus
,
153 .smp_cpus_done
= native_smp_cpus_done
,
155 .stop_other_cpus
= native_stop_other_cpus
,
156 .smp_send_reschedule
= native_smp_send_reschedule
,
158 .cpu_up
= native_cpu_up
,
159 .cpu_die
= native_cpu_die
,
160 .cpu_disable
= native_cpu_disable
,
161 .play_dead
= native_play_dead
,
163 .send_call_func_ipi
= native_send_call_func_ipi
,
164 .send_call_func_single_ipi
= native_send_call_func_single_ipi
,
166 EXPORT_SYMBOL_GPL(smp_ops
);