1 #include "qemu/osdep.h"
3 #include "migration/vmstate.h"
4 #include "hw/acpi/cpu.h"
5 #include "qapi/error.h"
6 #include "qapi/qapi-events-acpi.h"
8 #include "sysemu/numa.h"
10 #define ACPI_CPU_HOTPLUG_REG_LEN 12
11 #define ACPI_CPU_SELECTOR_OFFSET_WR 0
12 #define ACPI_CPU_FLAGS_OFFSET_RW 4
13 #define ACPI_CPU_CMD_OFFSET_WR 5
14 #define ACPI_CPU_CMD_DATA_OFFSET_RW 8
15 #define ACPI_CPU_CMD_DATA2_OFFSET_R 0
17 #define OVMF_CPUHP_SMI_CMD 4
20 CPHP_GET_NEXT_CPU_WITH_EVENT_CMD
= 0,
21 CPHP_OST_EVENT_CMD
= 1,
22 CPHP_OST_STATUS_CMD
= 2,
23 CPHP_GET_CPU_ID_CMD
= 3,
27 static ACPIOSTInfo
*acpi_cpu_device_status(int idx
, AcpiCpuStatus
*cdev
)
29 ACPIOSTInfo
*info
= g_new0(ACPIOSTInfo
, 1);
31 info
->slot_type
= ACPI_SLOT_TYPE_CPU
;
32 info
->slot
= g_strdup_printf("%d", idx
);
33 info
->source
= cdev
->ost_event
;
34 info
->status
= cdev
->ost_status
;
36 DeviceState
*dev
= DEVICE(cdev
->cpu
);
38 info
->device
= g_strdup(dev
->id
);
39 info
->has_device
= true;
45 void acpi_cpu_ospm_status(CPUHotplugState
*cpu_st
, ACPIOSTInfoList
***list
)
49 for (i
= 0; i
< cpu_st
->dev_count
; i
++) {
50 ACPIOSTInfoList
*elem
= g_new0(ACPIOSTInfoList
, 1);
51 elem
->value
= acpi_cpu_device_status(i
, &cpu_st
->devs
[i
]);
58 static uint64_t cpu_hotplug_rd(void *opaque
, hwaddr addr
, unsigned size
)
61 CPUHotplugState
*cpu_st
= opaque
;
64 if (cpu_st
->selector
>= cpu_st
->dev_count
) {
68 cdev
= &cpu_st
->devs
[cpu_st
->selector
];
70 case ACPI_CPU_FLAGS_OFFSET_RW
: /* pack and return is_* fields */
71 val
|= cdev
->cpu
? 1 : 0;
72 val
|= cdev
->is_inserting
? 2 : 0;
73 val
|= cdev
->is_removing
? 4 : 0;
74 trace_cpuhp_acpi_read_flags(cpu_st
->selector
, val
);
76 case ACPI_CPU_CMD_DATA_OFFSET_RW
:
77 switch (cpu_st
->command
) {
78 case CPHP_GET_NEXT_CPU_WITH_EVENT_CMD
:
79 val
= cpu_st
->selector
;
81 case CPHP_GET_CPU_ID_CMD
:
82 val
= cdev
->arch_id
& 0xFFFFFFFF;
87 trace_cpuhp_acpi_read_cmd_data(cpu_st
->selector
, val
);
89 case ACPI_CPU_CMD_DATA2_OFFSET_R
:
90 switch (cpu_st
->command
) {
91 case CPHP_GET_NEXT_CPU_WITH_EVENT_CMD
:
94 case CPHP_GET_CPU_ID_CMD
:
95 val
= cdev
->arch_id
>> 32;
100 trace_cpuhp_acpi_read_cmd_data2(cpu_st
->selector
, val
);
108 static void cpu_hotplug_wr(void *opaque
, hwaddr addr
, uint64_t data
,
111 CPUHotplugState
*cpu_st
= opaque
;
115 assert(cpu_st
->dev_count
);
118 if (cpu_st
->selector
>= cpu_st
->dev_count
) {
119 trace_cpuhp_acpi_invalid_idx_selected(cpu_st
->selector
);
125 case ACPI_CPU_SELECTOR_OFFSET_WR
: /* current CPU selector */
126 cpu_st
->selector
= data
;
127 trace_cpuhp_acpi_write_idx(cpu_st
->selector
);
129 case ACPI_CPU_FLAGS_OFFSET_RW
: /* set is_* fields */
130 cdev
= &cpu_st
->devs
[cpu_st
->selector
];
131 if (data
& 2) { /* clear insert event */
132 cdev
->is_inserting
= false;
133 trace_cpuhp_acpi_clear_inserting_evt(cpu_st
->selector
);
134 } else if (data
& 4) { /* clear remove event */
135 cdev
->is_removing
= false;
136 trace_cpuhp_acpi_clear_remove_evt(cpu_st
->selector
);
137 } else if (data
& 8) {
138 DeviceState
*dev
= NULL
;
139 HotplugHandler
*hotplug_ctrl
= NULL
;
141 if (!cdev
->cpu
|| cdev
->cpu
== first_cpu
) {
142 trace_cpuhp_acpi_ejecting_invalid_cpu(cpu_st
->selector
);
146 trace_cpuhp_acpi_ejecting_cpu(cpu_st
->selector
);
147 dev
= DEVICE(cdev
->cpu
);
148 hotplug_ctrl
= qdev_get_hotplug_handler(dev
);
149 hotplug_handler_unplug(hotplug_ctrl
, dev
, NULL
);
150 object_unparent(OBJECT(dev
));
153 case ACPI_CPU_CMD_OFFSET_WR
:
154 trace_cpuhp_acpi_write_cmd(cpu_st
->selector
, data
);
155 if (data
< CPHP_CMD_MAX
) {
156 cpu_st
->command
= data
;
157 if (cpu_st
->command
== CPHP_GET_NEXT_CPU_WITH_EVENT_CMD
) {
158 uint32_t iter
= cpu_st
->selector
;
161 cdev
= &cpu_st
->devs
[iter
];
162 if (cdev
->is_inserting
|| cdev
->is_removing
) {
163 cpu_st
->selector
= iter
;
164 trace_cpuhp_acpi_cpu_has_events(cpu_st
->selector
,
165 cdev
->is_inserting
, cdev
->is_removing
);
168 iter
= iter
+ 1 < cpu_st
->dev_count
? iter
+ 1 : 0;
169 } while (iter
!= cpu_st
->selector
);
173 case ACPI_CPU_CMD_DATA_OFFSET_RW
:
174 switch (cpu_st
->command
) {
175 case CPHP_OST_EVENT_CMD
: {
176 cdev
= &cpu_st
->devs
[cpu_st
->selector
];
177 cdev
->ost_event
= data
;
178 trace_cpuhp_acpi_write_ost_ev(cpu_st
->selector
, cdev
->ost_event
);
181 case CPHP_OST_STATUS_CMD
: {
182 cdev
= &cpu_st
->devs
[cpu_st
->selector
];
183 cdev
->ost_status
= data
;
184 info
= acpi_cpu_device_status(cpu_st
->selector
, cdev
);
185 qapi_event_send_acpi_device_ost(info
);
186 qapi_free_ACPIOSTInfo(info
);
187 trace_cpuhp_acpi_write_ost_status(cpu_st
->selector
,
200 static const MemoryRegionOps cpu_hotplug_ops
= {
201 .read
= cpu_hotplug_rd
,
202 .write
= cpu_hotplug_wr
,
203 .endianness
= DEVICE_LITTLE_ENDIAN
,
205 .min_access_size
= 1,
206 .max_access_size
= 4,
210 void cpu_hotplug_hw_init(MemoryRegion
*as
, Object
*owner
,
211 CPUHotplugState
*state
, hwaddr base_addr
)
213 MachineState
*machine
= MACHINE(qdev_get_machine());
214 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
215 const CPUArchIdList
*id_list
;
218 assert(mc
->possible_cpu_arch_ids
);
219 id_list
= mc
->possible_cpu_arch_ids(machine
);
220 state
->dev_count
= id_list
->len
;
221 state
->devs
= g_new0(typeof(*state
->devs
), state
->dev_count
);
222 for (i
= 0; i
< id_list
->len
; i
++) {
223 state
->devs
[i
].cpu
= CPU(id_list
->cpus
[i
].cpu
);
224 state
->devs
[i
].arch_id
= id_list
->cpus
[i
].arch_id
;
226 memory_region_init_io(&state
->ctrl_reg
, owner
, &cpu_hotplug_ops
, state
,
227 "acpi-cpu-hotplug", ACPI_CPU_HOTPLUG_REG_LEN
);
228 memory_region_add_subregion(as
, base_addr
, &state
->ctrl_reg
);
231 static AcpiCpuStatus
*get_cpu_status(CPUHotplugState
*cpu_st
, DeviceState
*dev
)
233 CPUClass
*k
= CPU_GET_CLASS(dev
);
234 uint64_t cpu_arch_id
= k
->get_arch_id(CPU(dev
));
237 for (i
= 0; i
< cpu_st
->dev_count
; i
++) {
238 if (cpu_arch_id
== cpu_st
->devs
[i
].arch_id
) {
239 return &cpu_st
->devs
[i
];
245 void acpi_cpu_plug_cb(HotplugHandler
*hotplug_dev
,
246 CPUHotplugState
*cpu_st
, DeviceState
*dev
, Error
**errp
)
250 cdev
= get_cpu_status(cpu_st
, dev
);
255 cdev
->cpu
= CPU(dev
);
256 if (dev
->hotplugged
) {
257 cdev
->is_inserting
= true;
258 acpi_send_event(DEVICE(hotplug_dev
), ACPI_CPU_HOTPLUG_STATUS
);
262 void acpi_cpu_unplug_request_cb(HotplugHandler
*hotplug_dev
,
263 CPUHotplugState
*cpu_st
,
264 DeviceState
*dev
, Error
**errp
)
268 cdev
= get_cpu_status(cpu_st
, dev
);
273 cdev
->is_removing
= true;
274 acpi_send_event(DEVICE(hotplug_dev
), ACPI_CPU_HOTPLUG_STATUS
);
277 void acpi_cpu_unplug_cb(CPUHotplugState
*cpu_st
,
278 DeviceState
*dev
, Error
**errp
)
282 cdev
= get_cpu_status(cpu_st
, dev
);
290 static const VMStateDescription vmstate_cpuhp_sts
= {
291 .name
= "CPU hotplug device state",
293 .minimum_version_id
= 1,
294 .minimum_version_id_old
= 1,
295 .fields
= (VMStateField
[]) {
296 VMSTATE_BOOL(is_inserting
, AcpiCpuStatus
),
297 VMSTATE_BOOL(is_removing
, AcpiCpuStatus
),
298 VMSTATE_UINT32(ost_event
, AcpiCpuStatus
),
299 VMSTATE_UINT32(ost_status
, AcpiCpuStatus
),
300 VMSTATE_END_OF_LIST()
304 const VMStateDescription vmstate_cpu_hotplug
= {
305 .name
= "CPU hotplug state",
307 .minimum_version_id
= 1,
308 .minimum_version_id_old
= 1,
309 .fields
= (VMStateField
[]) {
310 VMSTATE_UINT32(selector
, CPUHotplugState
),
311 VMSTATE_UINT8(command
, CPUHotplugState
),
312 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(devs
, CPUHotplugState
, dev_count
,
313 vmstate_cpuhp_sts
, AcpiCpuStatus
),
314 VMSTATE_END_OF_LIST()
318 #define CPU_NAME_FMT "C%.03X"
319 #define CPUHP_RES_DEVICE "PRES"
320 #define CPU_LOCK "CPLK"
321 #define CPU_STS_METHOD "CSTA"
322 #define CPU_SCAN_METHOD "CSCN"
323 #define CPU_NOTIFY_METHOD "CTFY"
324 #define CPU_EJECT_METHOD "CEJ0"
325 #define CPU_OST_METHOD "COST"
326 #define CPU_ADDED_LIST "CNEW"
328 #define CPU_ENABLED "CPEN"
329 #define CPU_SELECTOR "CSEL"
330 #define CPU_COMMAND "CCMD"
331 #define CPU_DATA "CDAT"
332 #define CPU_INSERT_EVENT "CINS"
333 #define CPU_REMOVE_EVENT "CRMV"
334 #define CPU_EJECT_EVENT "CEJ0"
336 void build_cpus_aml(Aml
*table
, MachineState
*machine
, CPUHotplugFeatures opts
,
338 const char *res_root
,
339 const char *event_handler_method
)
346 Aml
*zero
= aml_int(0);
347 Aml
*one
= aml_int(1);
348 Aml
*sb_scope
= aml_scope("_SB");
349 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
350 const CPUArchIdList
*arch_ids
= mc
->possible_cpu_arch_ids(machine
);
351 char *cphp_res_path
= g_strdup_printf("%s." CPUHP_RES_DEVICE
, res_root
);
352 Object
*obj
= object_resolve_path_type("", TYPE_ACPI_DEVICE_IF
, NULL
);
353 AcpiDeviceIfClass
*adevc
= ACPI_DEVICE_IF_GET_CLASS(obj
);
354 AcpiDeviceIf
*adev
= ACPI_DEVICE_IF(obj
);
356 cpu_ctrl_dev
= aml_device("%s", cphp_res_path
);
360 aml_append(cpu_ctrl_dev
,
361 aml_name_decl("_HID", aml_eisaid("PNP0A06")));
362 aml_append(cpu_ctrl_dev
,
363 aml_name_decl("_UID", aml_string("CPU Hotplug resources")));
364 aml_append(cpu_ctrl_dev
, aml_mutex(CPU_LOCK
, 0));
366 crs
= aml_resource_template();
367 aml_append(crs
, aml_io(AML_DECODE16
, io_base
, io_base
, 1,
368 ACPI_CPU_HOTPLUG_REG_LEN
));
369 aml_append(cpu_ctrl_dev
, aml_name_decl("_CRS", crs
));
371 /* declare CPU hotplug MMIO region with related access fields */
372 aml_append(cpu_ctrl_dev
,
373 aml_operation_region("PRST", AML_SYSTEM_IO
, aml_int(io_base
),
374 ACPI_CPU_HOTPLUG_REG_LEN
));
376 field
= aml_field("PRST", AML_BYTE_ACC
, AML_NOLOCK
,
378 aml_append(field
, aml_reserved_field(ACPI_CPU_FLAGS_OFFSET_RW
* 8));
379 /* 1 if enabled, read only */
380 aml_append(field
, aml_named_field(CPU_ENABLED
, 1));
381 /* (read) 1 if has a insert event. (write) 1 to clear event */
382 aml_append(field
, aml_named_field(CPU_INSERT_EVENT
, 1));
383 /* (read) 1 if has a remove event. (write) 1 to clear event */
384 aml_append(field
, aml_named_field(CPU_REMOVE_EVENT
, 1));
385 /* initiates device eject, write only */
386 aml_append(field
, aml_named_field(CPU_EJECT_EVENT
, 1));
387 aml_append(field
, aml_reserved_field(4));
388 aml_append(field
, aml_named_field(CPU_COMMAND
, 8));
389 aml_append(cpu_ctrl_dev
, field
);
391 field
= aml_field("PRST", AML_DWORD_ACC
, AML_NOLOCK
, AML_PRESERVE
);
392 /* CPU selector, write only */
393 aml_append(field
, aml_named_field(CPU_SELECTOR
, 32));
394 /* flags + cmd + 2byte align */
395 aml_append(field
, aml_reserved_field(4 * 8));
396 aml_append(field
, aml_named_field(CPU_DATA
, 32));
397 aml_append(cpu_ctrl_dev
, field
);
399 if (opts
.has_legacy_cphp
) {
400 method
= aml_method("_INI", 0, AML_SERIALIZED
);
401 /* switch off legacy CPU hotplug HW and use new one,
402 * on reboot system is in new mode and writing 0
403 * in CPU_SELECTOR selects BSP, which is NOP at
404 * the time _INI is called */
405 aml_append(method
, aml_store(zero
, aml_name(CPU_SELECTOR
)));
406 aml_append(cpu_ctrl_dev
, method
);
409 aml_append(sb_scope
, cpu_ctrl_dev
);
411 cpus_dev
= aml_device("\\_SB.CPUS");
414 Aml
*ctrl_lock
= aml_name("%s.%s", cphp_res_path
, CPU_LOCK
);
415 Aml
*cpu_selector
= aml_name("%s.%s", cphp_res_path
, CPU_SELECTOR
);
416 Aml
*is_enabled
= aml_name("%s.%s", cphp_res_path
, CPU_ENABLED
);
417 Aml
*cpu_cmd
= aml_name("%s.%s", cphp_res_path
, CPU_COMMAND
);
418 Aml
*cpu_data
= aml_name("%s.%s", cphp_res_path
, CPU_DATA
);
419 Aml
*ins_evt
= aml_name("%s.%s", cphp_res_path
, CPU_INSERT_EVENT
);
420 Aml
*rm_evt
= aml_name("%s.%s", cphp_res_path
, CPU_REMOVE_EVENT
);
421 Aml
*ej_evt
= aml_name("%s.%s", cphp_res_path
, CPU_EJECT_EVENT
);
423 aml_append(cpus_dev
, aml_name_decl("_HID", aml_string("ACPI0010")));
424 aml_append(cpus_dev
, aml_name_decl("_CID", aml_eisaid("PNP0A05")));
426 method
= aml_method(CPU_NOTIFY_METHOD
, 2, AML_NOTSERIALIZED
);
427 for (i
= 0; i
< arch_ids
->len
; i
++) {
428 Aml
*cpu
= aml_name(CPU_NAME_FMT
, i
);
429 Aml
*uid
= aml_arg(0);
430 Aml
*event
= aml_arg(1);
432 ifctx
= aml_if(aml_equal(uid
, aml_int(i
)));
434 aml_append(ifctx
, aml_notify(cpu
, event
));
436 aml_append(method
, ifctx
);
438 aml_append(cpus_dev
, method
);
440 method
= aml_method(CPU_STS_METHOD
, 1, AML_SERIALIZED
);
442 Aml
*idx
= aml_arg(0);
443 Aml
*sta
= aml_local(0);
445 aml_append(method
, aml_acquire(ctrl_lock
, 0xFFFF));
446 aml_append(method
, aml_store(idx
, cpu_selector
));
447 aml_append(method
, aml_store(zero
, sta
));
448 ifctx
= aml_if(aml_equal(is_enabled
, one
));
450 aml_append(ifctx
, aml_store(aml_int(0xF), sta
));
452 aml_append(method
, ifctx
);
453 aml_append(method
, aml_release(ctrl_lock
));
454 aml_append(method
, aml_return(sta
));
456 aml_append(cpus_dev
, method
);
458 method
= aml_method(CPU_EJECT_METHOD
, 1, AML_SERIALIZED
);
460 Aml
*idx
= aml_arg(0);
462 aml_append(method
, aml_acquire(ctrl_lock
, 0xFFFF));
463 aml_append(method
, aml_store(idx
, cpu_selector
));
464 aml_append(method
, aml_store(one
, ej_evt
));
465 aml_append(method
, aml_release(ctrl_lock
));
467 aml_append(cpus_dev
, method
);
469 method
= aml_method(CPU_SCAN_METHOD
, 0, AML_SERIALIZED
);
471 const uint8_t max_cpus_per_pass
= 255;
473 Aml
*while_ctx
, *while_ctx2
;
474 Aml
*has_event
= aml_local(0);
475 Aml
*dev_chk
= aml_int(1);
476 Aml
*eject_req
= aml_int(3);
477 Aml
*next_cpu_cmd
= aml_int(CPHP_GET_NEXT_CPU_WITH_EVENT_CMD
);
478 Aml
*num_added_cpus
= aml_local(1);
479 Aml
*cpu_idx
= aml_local(2);
480 Aml
*uid
= aml_local(3);
481 Aml
*has_job
= aml_local(4);
482 Aml
*new_cpus
= aml_name(CPU_ADDED_LIST
);
484 aml_append(method
, aml_acquire(ctrl_lock
, 0xFFFF));
487 * Windows versions newer than XP (including Windows 10/Windows
488 * Server 2019), do support* VarPackageOp but, it is cripled to hold
489 * the same elements number as old PackageOp.
490 * For compatibility with Windows XP (so it won't crash) use ACPI1.0
491 * PackageOp which can hold max 255 elements.
493 * use named package as old Windows don't support it in local var
495 aml_append(method
, aml_name_decl(CPU_ADDED_LIST
,
496 aml_package(max_cpus_per_pass
)));
498 aml_append(method
, aml_store(zero
, uid
));
499 aml_append(method
, aml_store(one
, has_job
));
501 * CPU_ADDED_LIST can hold limited number of elements, outer loop
502 * allows to process CPUs in batches which let us to handle more
503 * CPUs than CPU_ADDED_LIST can hold.
505 while_ctx2
= aml_while(aml_equal(has_job
, one
));
507 aml_append(while_ctx2
, aml_store(zero
, has_job
));
509 aml_append(while_ctx2
, aml_store(one
, has_event
));
510 aml_append(while_ctx2
, aml_store(zero
, num_added_cpus
));
513 * Scan CPUs, till there are CPUs with events or
514 * CPU_ADDED_LIST capacity is exhausted
516 while_ctx
= aml_while(aml_land(aml_equal(has_event
, one
),
517 aml_lless(uid
, aml_int(arch_ids
->len
))));
520 * clear loop exit condition, ins_evt/rm_evt checks will
521 * set it to 1 while next_cpu_cmd returns a CPU with events
523 aml_append(while_ctx
, aml_store(zero
, has_event
));
525 aml_append(while_ctx
, aml_store(uid
, cpu_selector
));
526 aml_append(while_ctx
, aml_store(next_cpu_cmd
, cpu_cmd
));
529 * wrap around case, scan is complete, exit loop.
530 * It happens since events are not cleared in scan loop,
531 * so next_cpu_cmd continues to find already processed CPUs
533 ifctx
= aml_if(aml_lless(cpu_data
, uid
));
535 aml_append(ifctx
, aml_break());
537 aml_append(while_ctx
, ifctx
);
540 * if CPU_ADDED_LIST is full, exit inner loop and process
544 aml_equal(num_added_cpus
, aml_int(max_cpus_per_pass
)));
546 aml_append(ifctx
, aml_store(one
, has_job
));
547 aml_append(ifctx
, aml_break());
549 aml_append(while_ctx
, ifctx
);
551 aml_append(while_ctx
, aml_store(cpu_data
, uid
));
552 ifctx
= aml_if(aml_equal(ins_evt
, one
));
554 /* cache added CPUs to Notify/Wakeup later */
555 aml_append(ifctx
, aml_store(uid
,
556 aml_index(new_cpus
, num_added_cpus
)));
557 aml_append(ifctx
, aml_increment(num_added_cpus
));
558 aml_append(ifctx
, aml_store(one
, has_event
));
560 aml_append(while_ctx
, ifctx
);
561 else_ctx
= aml_else();
562 ifctx
= aml_if(aml_equal(rm_evt
, one
));
565 aml_call2(CPU_NOTIFY_METHOD
, uid
, eject_req
));
566 aml_append(ifctx
, aml_store(one
, rm_evt
));
567 aml_append(ifctx
, aml_store(one
, has_event
));
569 aml_append(else_ctx
, ifctx
);
570 aml_append(while_ctx
, else_ctx
);
571 aml_append(while_ctx
, aml_increment(uid
));
573 aml_append(while_ctx2
, while_ctx
);
576 * in case FW negotiated ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT,
577 * make upcall to FW, so it can pull in new CPUs before
578 * OS is notified and wakes them up
581 ifctx
= aml_if(aml_lgreater(num_added_cpus
, zero
));
583 aml_append(ifctx
, aml_store(aml_int(OVMF_CPUHP_SMI_CMD
),
584 aml_name("%s", opts
.smi_path
)));
586 aml_append(while_ctx2
, ifctx
);
589 /* Notify OSPM about new CPUs and clear insert events */
590 aml_append(while_ctx2
, aml_store(zero
, cpu_idx
));
591 while_ctx
= aml_while(aml_lless(cpu_idx
, num_added_cpus
));
593 aml_append(while_ctx
,
594 aml_store(aml_derefof(aml_index(new_cpus
, cpu_idx
)),
596 aml_append(while_ctx
,
597 aml_call2(CPU_NOTIFY_METHOD
, uid
, dev_chk
));
598 aml_append(while_ctx
, aml_store(uid
, aml_debug()));
599 aml_append(while_ctx
, aml_store(uid
, cpu_selector
));
600 aml_append(while_ctx
, aml_store(one
, ins_evt
));
601 aml_append(while_ctx
, aml_increment(cpu_idx
));
603 aml_append(while_ctx2
, while_ctx
);
605 * If another batch is needed, then it will resume scanning
606 * exactly at -- and not after -- the last CPU that's currently
607 * in CPU_ADDED_LIST. In other words, the last CPU in
608 * CPU_ADDED_LIST is going to be re-checked. That's OK: we've
609 * just cleared the insert event for *all* CPUs in
610 * CPU_ADDED_LIST, including the last one. So the scan will
611 * simply seek past it.
614 aml_append(method
, while_ctx2
);
615 aml_append(method
, aml_release(ctrl_lock
));
617 aml_append(cpus_dev
, method
);
619 method
= aml_method(CPU_OST_METHOD
, 4, AML_SERIALIZED
);
621 Aml
*uid
= aml_arg(0);
622 Aml
*ev_cmd
= aml_int(CPHP_OST_EVENT_CMD
);
623 Aml
*st_cmd
= aml_int(CPHP_OST_STATUS_CMD
);
625 aml_append(method
, aml_acquire(ctrl_lock
, 0xFFFF));
626 aml_append(method
, aml_store(uid
, cpu_selector
));
627 aml_append(method
, aml_store(ev_cmd
, cpu_cmd
));
628 aml_append(method
, aml_store(aml_arg(1), cpu_data
));
629 aml_append(method
, aml_store(st_cmd
, cpu_cmd
));
630 aml_append(method
, aml_store(aml_arg(2), cpu_data
));
631 aml_append(method
, aml_release(ctrl_lock
));
633 aml_append(cpus_dev
, method
);
635 /* build Processor object for each processor */
636 for (i
= 0; i
< arch_ids
->len
; i
++) {
638 Aml
*uid
= aml_int(i
);
639 GArray
*madt_buf
= g_array_new(0, 1, 1);
640 int arch_id
= arch_ids
->cpus
[i
].arch_id
;
642 if (opts
.acpi_1_compatible
&& arch_id
< 255) {
643 dev
= aml_processor(i
, 0, 0, CPU_NAME_FMT
, i
);
645 dev
= aml_device(CPU_NAME_FMT
, i
);
646 aml_append(dev
, aml_name_decl("_HID", aml_string("ACPI0007")));
647 aml_append(dev
, aml_name_decl("_UID", uid
));
650 method
= aml_method("_STA", 0, AML_SERIALIZED
);
651 aml_append(method
, aml_return(aml_call1(CPU_STS_METHOD
, uid
)));
652 aml_append(dev
, method
);
654 /* build _MAT object */
655 assert(adevc
&& adevc
->madt_cpu
);
656 adevc
->madt_cpu(adev
, i
, arch_ids
, madt_buf
);
657 switch (madt_buf
->data
[0]) {
658 case ACPI_APIC_PROCESSOR
: {
659 AcpiMadtProcessorApic
*apic
= (void *)madt_buf
->data
;
660 apic
->flags
= cpu_to_le32(1);
663 case ACPI_APIC_LOCAL_X2APIC
: {
664 AcpiMadtProcessorX2Apic
*apic
= (void *)madt_buf
->data
;
665 apic
->flags
= cpu_to_le32(1);
671 aml_append(dev
, aml_name_decl("_MAT",
672 aml_buffer(madt_buf
->len
, (uint8_t *)madt_buf
->data
)));
673 g_array_free(madt_buf
, true);
675 if (CPU(arch_ids
->cpus
[i
].cpu
) != first_cpu
) {
676 method
= aml_method("_EJ0", 1, AML_NOTSERIALIZED
);
677 aml_append(method
, aml_call1(CPU_EJECT_METHOD
, uid
));
678 aml_append(dev
, method
);
681 method
= aml_method("_OST", 3, AML_SERIALIZED
);
683 aml_call4(CPU_OST_METHOD
, uid
, aml_arg(0),
684 aml_arg(1), aml_arg(2))
686 aml_append(dev
, method
);
688 /* Linux guests discard SRAT info for non-present CPUs
689 * as a result _PXM is required for all CPUs which might
690 * be hot-plugged. For simplicity, add it for all CPUs.
692 if (arch_ids
->cpus
[i
].props
.has_node_id
) {
693 aml_append(dev
, aml_name_decl("_PXM",
694 aml_int(arch_ids
->cpus
[i
].props
.node_id
)));
697 aml_append(cpus_dev
, dev
);
700 aml_append(sb_scope
, cpus_dev
);
701 aml_append(table
, sb_scope
);
703 method
= aml_method(event_handler_method
, 0, AML_NOTSERIALIZED
);
704 aml_append(method
, aml_call0("\\_SB.CPUS." CPU_SCAN_METHOD
));
705 aml_append(table
, method
);
707 g_free(cphp_res_path
);