4 * This file implements the Xen versions of smp_ops. SMP under Xen is
5 * very straightforward. Bringing a CPU up is simply a matter of
6 * loading its initial context and setting it running.
8 * IPIs are handled through the Xen event mechanism.
10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and
15 * This does not handle HOTPLUG_CPU yet.
17 #include <linux/sched.h>
18 #include <linux/err.h>
19 #include <linux/smp.h>
21 #include <asm/paravirt.h>
23 #include <asm/pgtable.h>
26 #include <xen/interface/xen.h>
27 #include <xen/interface/vcpu.h>
29 #include <asm/xen/interface.h>
30 #include <asm/xen/hypercall.h>
33 #include <xen/events.h>
38 static cpumask_t xen_cpu_initialized_map
;
40 static DEFINE_PER_CPU(int, resched_irq
);
41 static DEFINE_PER_CPU(int, callfunc_irq
);
42 static DEFINE_PER_CPU(int, callfuncsingle_irq
);
43 static DEFINE_PER_CPU(int, debug_irq
) = -1;
45 static irqreturn_t
xen_call_function_interrupt(int irq
, void *dev_id
);
46 static irqreturn_t
xen_call_function_single_interrupt(int irq
, void *dev_id
);
49 * Reschedule call back. Nothing to do,
50 * all the work is done automatically when
51 * we return from the interrupt.
53 static irqreturn_t
xen_reschedule_interrupt(int irq
, void *dev_id
)
58 static __cpuinit
void cpu_bringup_and_idle(void)
60 int cpu
= smp_processor_id();
63 xen_enable_sysenter();
66 per_cpu(cpu_state
, cpu
) = CPU_ONLINE
;
68 xen_setup_cpu_clockevents();
70 /* We can take interrupts now: we're officially "up". */
73 wmb(); /* make sure everything is out */
77 static int xen_smp_intr_init(unsigned int cpu
)
80 const char *resched_name
, *callfunc_name
, *debug_name
;
82 resched_name
= kasprintf(GFP_KERNEL
, "resched%d", cpu
);
83 rc
= bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR
,
85 xen_reschedule_interrupt
,
86 IRQF_DISABLED
|IRQF_PERCPU
|IRQF_NOBALANCING
,
91 per_cpu(resched_irq
, cpu
) = rc
;
93 callfunc_name
= kasprintf(GFP_KERNEL
, "callfunc%d", cpu
);
94 rc
= bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR
,
96 xen_call_function_interrupt
,
97 IRQF_DISABLED
|IRQF_PERCPU
|IRQF_NOBALANCING
,
102 per_cpu(callfunc_irq
, cpu
) = rc
;
104 debug_name
= kasprintf(GFP_KERNEL
, "debug%d", cpu
);
105 rc
= bind_virq_to_irqhandler(VIRQ_DEBUG
, cpu
, xen_debug_interrupt
,
106 IRQF_DISABLED
| IRQF_PERCPU
| IRQF_NOBALANCING
,
110 per_cpu(debug_irq
, cpu
) = rc
;
112 callfunc_name
= kasprintf(GFP_KERNEL
, "callfuncsingle%d", cpu
);
113 rc
= bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR
,
115 xen_call_function_single_interrupt
,
116 IRQF_DISABLED
|IRQF_PERCPU
|IRQF_NOBALANCING
,
121 per_cpu(callfuncsingle_irq
, cpu
) = rc
;
126 if (per_cpu(resched_irq
, cpu
) >= 0)
127 unbind_from_irqhandler(per_cpu(resched_irq
, cpu
), NULL
);
128 if (per_cpu(callfunc_irq
, cpu
) >= 0)
129 unbind_from_irqhandler(per_cpu(callfunc_irq
, cpu
), NULL
);
130 if (per_cpu(debug_irq
, cpu
) >= 0)
131 unbind_from_irqhandler(per_cpu(debug_irq
, cpu
), NULL
);
132 if (per_cpu(callfuncsingle_irq
, cpu
) >= 0)
133 unbind_from_irqhandler(per_cpu(callfuncsingle_irq
, cpu
), NULL
);
138 void __init
xen_fill_possible_map(void)
142 for (i
= 0; i
< NR_CPUS
; i
++) {
143 rc
= HYPERVISOR_vcpu_op(VCPUOP_is_up
, i
, NULL
);
145 cpu_set(i
, cpu_possible_map
);
149 void __init
xen_smp_prepare_boot_cpu(void)
153 BUG_ON(smp_processor_id() != 0);
154 native_smp_prepare_boot_cpu();
156 /* We've switched to the "real" per-cpu gdt, so make sure the
157 old memory can be recycled */
158 make_lowmem_page_readwrite(&per_cpu__gdt_page
);
160 for_each_possible_cpu(cpu
) {
161 cpus_clear(per_cpu(cpu_sibling_map
, cpu
));
163 * cpu_core_map lives in a per cpu area that is cleared
164 * when the per cpu array is allocated.
166 * cpus_clear(per_cpu(cpu_core_map, cpu));
170 xen_setup_vcpu_info_placement();
173 void __init
xen_smp_prepare_cpus(unsigned int max_cpus
)
177 for_each_possible_cpu(cpu
) {
178 cpus_clear(per_cpu(cpu_sibling_map
, cpu
));
180 * cpu_core_ map will be zeroed when the per
181 * cpu area is allocated.
183 * cpus_clear(per_cpu(cpu_core_map, cpu));
187 smp_store_cpu_info(0);
188 set_cpu_sibling_map(0);
190 if (xen_smp_intr_init(0))
193 xen_cpu_initialized_map
= cpumask_of_cpu(0);
195 /* Restrict the possible_map according to max_cpus. */
196 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus
)) {
197 for (cpu
= NR_CPUS
- 1; !cpu_possible(cpu
); cpu
--)
199 cpu_clear(cpu
, cpu_possible_map
);
202 for_each_possible_cpu (cpu
) {
203 struct task_struct
*idle
;
208 idle
= fork_idle(cpu
);
210 panic("failed fork for CPU %d", cpu
);
212 cpu_set(cpu
, cpu_present_map
);
215 //init_xenbus_allowed_cpumask();
219 cpu_initialize_context(unsigned int cpu
, struct task_struct
*idle
)
221 struct vcpu_guest_context
*ctxt
;
222 struct gdt_page
*gdt
= &per_cpu(gdt_page
, cpu
);
224 if (cpu_test_and_set(cpu
, xen_cpu_initialized_map
))
227 ctxt
= kzalloc(sizeof(*ctxt
), GFP_KERNEL
);
231 ctxt
->flags
= VGCF_IN_KERNEL
;
232 ctxt
->user_regs
.ds
= __USER_DS
;
233 ctxt
->user_regs
.es
= __USER_DS
;
234 ctxt
->user_regs
.fs
= __KERNEL_PERCPU
;
235 ctxt
->user_regs
.gs
= 0;
236 ctxt
->user_regs
.ss
= __KERNEL_DS
;
237 ctxt
->user_regs
.eip
= (unsigned long)cpu_bringup_and_idle
;
238 ctxt
->user_regs
.eflags
= 0x1000; /* IOPL_RING1 */
240 memset(&ctxt
->fpu_ctxt
, 0, sizeof(ctxt
->fpu_ctxt
));
242 xen_copy_trap_info(ctxt
->trap_ctxt
);
246 BUG_ON((unsigned long)gdt
->gdt
& ~PAGE_MASK
);
247 make_lowmem_page_readonly(gdt
->gdt
);
249 ctxt
->gdt_frames
[0] = virt_to_mfn(gdt
->gdt
);
250 ctxt
->gdt_ents
= ARRAY_SIZE(gdt
->gdt
);
252 ctxt
->user_regs
.cs
= __KERNEL_CS
;
253 ctxt
->user_regs
.esp
= idle
->thread
.sp0
- sizeof(struct pt_regs
);
255 ctxt
->kernel_ss
= __KERNEL_DS
;
256 ctxt
->kernel_sp
= idle
->thread
.sp0
;
258 ctxt
->event_callback_cs
= __KERNEL_CS
;
259 ctxt
->event_callback_eip
= (unsigned long)xen_hypervisor_callback
;
260 ctxt
->failsafe_callback_cs
= __KERNEL_CS
;
261 ctxt
->failsafe_callback_eip
= (unsigned long)xen_failsafe_callback
;
263 per_cpu(xen_cr3
, cpu
) = __pa(swapper_pg_dir
);
264 ctxt
->ctrlreg
[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir
));
266 if (HYPERVISOR_vcpu_op(VCPUOP_initialise
, cpu
, ctxt
))
273 int __cpuinit
xen_cpu_up(unsigned int cpu
)
275 struct task_struct
*idle
= idle_task(cpu
);
279 rc
= cpu_up_check(cpu
);
285 per_cpu(current_task
, cpu
) = idle
;
287 xen_setup_timer(cpu
);
289 /* make sure interrupts start blocked */
290 per_cpu(xen_vcpu
, cpu
)->evtchn_upcall_mask
= 1;
292 rc
= cpu_initialize_context(cpu
, idle
);
296 if (num_online_cpus() == 1)
297 alternatives_smp_switch(1);
299 rc
= xen_smp_intr_init(cpu
);
303 smp_store_cpu_info(cpu
);
304 set_cpu_sibling_map(cpu
);
305 /* This must be done before setting cpu_online_map */
308 cpu_set(cpu
, cpu_online_map
);
310 rc
= HYPERVISOR_vcpu_op(VCPUOP_up
, cpu
, NULL
);
316 void xen_smp_cpus_done(unsigned int max_cpus
)
320 static void stop_self(void *v
)
322 int cpu
= smp_processor_id();
324 /* make sure we're not pinning something down */
325 load_cr3(swapper_pg_dir
);
326 /* should set up a minimal gdt */
328 HYPERVISOR_vcpu_op(VCPUOP_down
, cpu
, NULL
);
332 void xen_smp_send_stop(void)
334 smp_call_function(stop_self
, NULL
, 0, 0);
337 void xen_smp_send_reschedule(int cpu
)
339 xen_send_IPI_one(cpu
, XEN_RESCHEDULE_VECTOR
);
342 static void xen_send_IPI_mask(cpumask_t mask
, enum ipi_vector vector
)
346 cpus_and(mask
, mask
, cpu_online_map
);
348 for_each_cpu_mask(cpu
, mask
)
349 xen_send_IPI_one(cpu
, vector
);
352 void xen_smp_send_call_function_ipi(cpumask_t mask
)
356 xen_send_IPI_mask(mask
, XEN_CALL_FUNCTION_VECTOR
);
358 /* Make sure other vcpus get a chance to run if they need to. */
359 for_each_cpu_mask(cpu
, mask
) {
360 if (xen_vcpu_stolen(cpu
)) {
361 HYPERVISOR_sched_op(SCHEDOP_yield
, 0);
367 void xen_smp_send_call_function_single_ipi(int cpu
)
369 xen_send_IPI_mask(cpumask_of_cpu(cpu
), XEN_CALL_FUNCTION_SINGLE_VECTOR
);
372 static irqreturn_t
xen_call_function_interrupt(int irq
, void *dev_id
)
375 generic_smp_call_function_interrupt();
376 __get_cpu_var(irq_stat
).irq_call_count
++;
382 static irqreturn_t
xen_call_function_single_interrupt(int irq
, void *dev_id
)
385 generic_smp_call_function_single_interrupt();
386 __get_cpu_var(irq_stat
).irq_call_count
++;