4 * This file implements the Xen versions of smp_ops. SMP under Xen is
5 * very straightforward. Bringing a CPU up is simply a matter of
6 * loading its initial context and setting it running.
8 * IPIs are handled through the Xen event mechanism.
10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and
15 * This does not handle HOTPLUG_CPU yet.
17 #include <linux/sched.h>
18 #include <linux/err.h>
19 #include <linux/smp.h>
21 #include <asm/paravirt.h>
23 #include <asm/pgtable.h>
26 #include <xen/interface/xen.h>
27 #include <xen/interface/vcpu.h>
29 #include <asm/xen/interface.h>
30 #include <asm/xen/hypercall.h>
33 #include <xen/events.h>
38 cpumask_t xen_cpu_initialized_map
;
40 static DEFINE_PER_CPU(int, resched_irq
);
41 static DEFINE_PER_CPU(int, callfunc_irq
);
42 static DEFINE_PER_CPU(int, callfuncsingle_irq
);
43 static DEFINE_PER_CPU(int, debug_irq
) = -1;
45 static irqreturn_t
xen_call_function_interrupt(int irq
, void *dev_id
);
46 static irqreturn_t
xen_call_function_single_interrupt(int irq
, void *dev_id
);
49 * Reschedule call back. Nothing to do,
50 * all the work is done automatically when
51 * we return from the interrupt.
53 static irqreturn_t
xen_reschedule_interrupt(int irq
, void *dev_id
)
56 __get_cpu_var(irq_stat
).irq_resched_count
++;
58 add_pda(irq_resched_count
, 1);
64 static __cpuinit
void cpu_bringup_and_idle(void)
66 int cpu
= smp_processor_id();
71 xen_enable_sysenter();
74 cpu
= smp_processor_id();
75 smp_store_cpu_info(cpu
);
76 cpu_data(cpu
).x86_max_cores
= 1;
77 set_cpu_sibling_map(cpu
);
79 xen_setup_cpu_clockevents();
81 cpu_set(cpu
, cpu_online_map
);
82 x86_write_percpu(cpu_state
, CPU_ONLINE
);
85 /* We can take interrupts now: we're officially "up". */
88 wmb(); /* make sure everything is out */
92 static int xen_smp_intr_init(unsigned int cpu
)
95 const char *resched_name
, *callfunc_name
, *debug_name
;
97 resched_name
= kasprintf(GFP_KERNEL
, "resched%d", cpu
);
98 rc
= bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR
,
100 xen_reschedule_interrupt
,
101 IRQF_DISABLED
|IRQF_PERCPU
|IRQF_NOBALANCING
,
106 per_cpu(resched_irq
, cpu
) = rc
;
108 callfunc_name
= kasprintf(GFP_KERNEL
, "callfunc%d", cpu
);
109 rc
= bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR
,
111 xen_call_function_interrupt
,
112 IRQF_DISABLED
|IRQF_PERCPU
|IRQF_NOBALANCING
,
117 per_cpu(callfunc_irq
, cpu
) = rc
;
119 debug_name
= kasprintf(GFP_KERNEL
, "debug%d", cpu
);
120 rc
= bind_virq_to_irqhandler(VIRQ_DEBUG
, cpu
, xen_debug_interrupt
,
121 IRQF_DISABLED
| IRQF_PERCPU
| IRQF_NOBALANCING
,
125 per_cpu(debug_irq
, cpu
) = rc
;
127 callfunc_name
= kasprintf(GFP_KERNEL
, "callfuncsingle%d", cpu
);
128 rc
= bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR
,
130 xen_call_function_single_interrupt
,
131 IRQF_DISABLED
|IRQF_PERCPU
|IRQF_NOBALANCING
,
136 per_cpu(callfuncsingle_irq
, cpu
) = rc
;
141 if (per_cpu(resched_irq
, cpu
) >= 0)
142 unbind_from_irqhandler(per_cpu(resched_irq
, cpu
), NULL
);
143 if (per_cpu(callfunc_irq
, cpu
) >= 0)
144 unbind_from_irqhandler(per_cpu(callfunc_irq
, cpu
), NULL
);
145 if (per_cpu(debug_irq
, cpu
) >= 0)
146 unbind_from_irqhandler(per_cpu(debug_irq
, cpu
), NULL
);
147 if (per_cpu(callfuncsingle_irq
, cpu
) >= 0)
148 unbind_from_irqhandler(per_cpu(callfuncsingle_irq
, cpu
), NULL
);
153 static void __init
xen_fill_possible_map(void)
157 for (i
= 0; i
< NR_CPUS
; i
++) {
158 rc
= HYPERVISOR_vcpu_op(VCPUOP_is_up
, i
, NULL
);
161 cpu_set(i
, cpu_possible_map
);
166 static void __init
xen_smp_prepare_boot_cpu(void)
168 BUG_ON(smp_processor_id() != 0);
169 native_smp_prepare_boot_cpu();
171 /* We've switched to the "real" per-cpu gdt, so make sure the
172 old memory can be recycled */
173 make_lowmem_page_readwrite(&per_cpu_var(gdt_page
));
175 xen_setup_vcpu_info_placement();
178 static void __init
xen_smp_prepare_cpus(unsigned int max_cpus
)
182 smp_store_cpu_info(0);
183 cpu_data(0).x86_max_cores
= 1;
184 set_cpu_sibling_map(0);
186 if (xen_smp_intr_init(0))
189 xen_cpu_initialized_map
= cpumask_of_cpu(0);
191 /* Restrict the possible_map according to max_cpus. */
192 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus
)) {
193 for (cpu
= NR_CPUS
- 1; !cpu_possible(cpu
); cpu
--)
195 cpu_clear(cpu
, cpu_possible_map
);
198 for_each_possible_cpu (cpu
) {
199 struct task_struct
*idle
;
204 idle
= fork_idle(cpu
);
206 panic("failed fork for CPU %d", cpu
);
208 cpu_set(cpu
, cpu_present_map
);
211 //init_xenbus_allowed_cpumask();
215 cpu_initialize_context(unsigned int cpu
, struct task_struct
*idle
)
217 struct vcpu_guest_context
*ctxt
;
218 struct desc_struct
*gdt
;
220 if (cpu_test_and_set(cpu
, xen_cpu_initialized_map
))
223 ctxt
= kzalloc(sizeof(*ctxt
), GFP_KERNEL
);
227 gdt
= get_cpu_gdt_table(cpu
);
229 ctxt
->flags
= VGCF_IN_KERNEL
;
230 ctxt
->user_regs
.ds
= __USER_DS
;
231 ctxt
->user_regs
.es
= __USER_DS
;
232 ctxt
->user_regs
.ss
= __KERNEL_DS
;
234 ctxt
->user_regs
.fs
= __KERNEL_PERCPU
;
236 ctxt
->user_regs
.eip
= (unsigned long)cpu_bringup_and_idle
;
237 ctxt
->user_regs
.eflags
= 0x1000; /* IOPL_RING1 */
239 memset(&ctxt
->fpu_ctxt
, 0, sizeof(ctxt
->fpu_ctxt
));
241 xen_copy_trap_info(ctxt
->trap_ctxt
);
245 BUG_ON((unsigned long)gdt
& ~PAGE_MASK
);
246 make_lowmem_page_readonly(gdt
);
248 ctxt
->gdt_frames
[0] = virt_to_mfn(gdt
);
249 ctxt
->gdt_ents
= GDT_ENTRIES
;
251 ctxt
->user_regs
.cs
= __KERNEL_CS
;
252 ctxt
->user_regs
.esp
= idle
->thread
.sp0
- sizeof(struct pt_regs
);
254 ctxt
->kernel_ss
= __KERNEL_DS
;
255 ctxt
->kernel_sp
= idle
->thread
.sp0
;
258 ctxt
->event_callback_cs
= __KERNEL_CS
;
259 ctxt
->failsafe_callback_cs
= __KERNEL_CS
;
261 ctxt
->event_callback_eip
= (unsigned long)xen_hypervisor_callback
;
262 ctxt
->failsafe_callback_eip
= (unsigned long)xen_failsafe_callback
;
264 per_cpu(xen_cr3
, cpu
) = __pa(swapper_pg_dir
);
265 ctxt
->ctrlreg
[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir
));
267 if (HYPERVISOR_vcpu_op(VCPUOP_initialise
, cpu
, ctxt
))
274 static int __cpuinit
xen_cpu_up(unsigned int cpu
)
276 struct task_struct
*idle
= idle_task(cpu
);
280 rc
= cpu_up_check(cpu
);
286 /* Allocate node local memory for AP pdas */
289 rc
= get_local_pda(cpu
);
297 per_cpu(current_task
, cpu
) = idle
;
300 cpu_pda(cpu
)->pcurrent
= idle
;
301 clear_tsk_thread_flag(idle
, TIF_FORK
);
303 xen_setup_timer(cpu
);
305 per_cpu(cpu_state
, cpu
) = CPU_UP_PREPARE
;
307 /* make sure interrupts start blocked */
308 per_cpu(xen_vcpu
, cpu
)->evtchn_upcall_mask
= 1;
310 rc
= cpu_initialize_context(cpu
, idle
);
314 if (num_online_cpus() == 1)
315 alternatives_smp_switch(1);
317 rc
= xen_smp_intr_init(cpu
);
321 rc
= HYPERVISOR_vcpu_op(VCPUOP_up
, cpu
, NULL
);
324 while(per_cpu(cpu_state
, cpu
) != CPU_ONLINE
) {
325 HYPERVISOR_sched_op(SCHEDOP_yield
, 0);
332 static void xen_smp_cpus_done(unsigned int max_cpus
)
336 static void stop_self(void *v
)
338 int cpu
= smp_processor_id();
340 /* make sure we're not pinning something down */
341 load_cr3(swapper_pg_dir
);
342 /* should set up a minimal gdt */
344 HYPERVISOR_vcpu_op(VCPUOP_down
, cpu
, NULL
);
348 static void xen_smp_send_stop(void)
350 smp_call_function(stop_self
, NULL
, 0);
353 static void xen_smp_send_reschedule(int cpu
)
355 xen_send_IPI_one(cpu
, XEN_RESCHEDULE_VECTOR
);
358 static void xen_send_IPI_mask(cpumask_t mask
, enum ipi_vector vector
)
362 cpus_and(mask
, mask
, cpu_online_map
);
364 for_each_cpu_mask(cpu
, mask
)
365 xen_send_IPI_one(cpu
, vector
);
368 static void xen_smp_send_call_function_ipi(cpumask_t mask
)
372 xen_send_IPI_mask(mask
, XEN_CALL_FUNCTION_VECTOR
);
374 /* Make sure other vcpus get a chance to run if they need to. */
375 for_each_cpu_mask(cpu
, mask
) {
376 if (xen_vcpu_stolen(cpu
)) {
377 HYPERVISOR_sched_op(SCHEDOP_yield
, 0);
383 static void xen_smp_send_call_function_single_ipi(int cpu
)
385 xen_send_IPI_mask(cpumask_of_cpu(cpu
), XEN_CALL_FUNCTION_SINGLE_VECTOR
);
388 static irqreturn_t
xen_call_function_interrupt(int irq
, void *dev_id
)
391 generic_smp_call_function_interrupt();
393 __get_cpu_var(irq_stat
).irq_call_count
++;
395 add_pda(irq_call_count
, 1);
402 static irqreturn_t
xen_call_function_single_interrupt(int irq
, void *dev_id
)
405 generic_smp_call_function_single_interrupt();
407 __get_cpu_var(irq_stat
).irq_call_count
++;
409 add_pda(irq_call_count
, 1);
416 static const struct smp_ops xen_smp_ops __initdata
= {
417 .smp_prepare_boot_cpu
= xen_smp_prepare_boot_cpu
,
418 .smp_prepare_cpus
= xen_smp_prepare_cpus
,
419 .cpu_up
= xen_cpu_up
,
420 .smp_cpus_done
= xen_smp_cpus_done
,
422 .smp_send_stop
= xen_smp_send_stop
,
423 .smp_send_reschedule
= xen_smp_send_reschedule
,
425 .send_call_func_ipi
= xen_smp_send_call_function_ipi
,
426 .send_call_func_single_ipi
= xen_smp_send_call_function_single_ipi
,
429 void __init
xen_smp_init(void)
431 smp_ops
= xen_smp_ops
;
432 xen_fill_possible_map();