1 #include "qemu/osdep.h"
3 #include "hw/acpi/cpu.h"
4 #include "qapi/error.h"
7 #define ACPI_CPU_HOTPLUG_REG_LEN 12
8 #define ACPI_CPU_SELECTOR_OFFSET_WR 0
9 #define ACPI_CPU_FLAGS_OFFSET_RW 4
10 #define ACPI_CPU_CMD_OFFSET_WR 5
11 #define ACPI_CPU_CMD_DATA_OFFSET_RW 8
14 CPHP_GET_NEXT_CPU_WITH_EVENT_CMD
= 0,
18 static uint64_t cpu_hotplug_rd(void *opaque
, hwaddr addr
, unsigned size
)
21 CPUHotplugState
*cpu_st
= opaque
;
24 if (cpu_st
->selector
>= cpu_st
->dev_count
) {
28 cdev
= &cpu_st
->devs
[cpu_st
->selector
];
30 case ACPI_CPU_FLAGS_OFFSET_RW
: /* pack and return is_* fields */
31 val
|= cdev
->cpu
? 1 : 0;
32 val
|= cdev
->is_inserting
? 2 : 0;
33 trace_cpuhp_acpi_read_flags(cpu_st
->selector
, val
);
35 case ACPI_CPU_CMD_DATA_OFFSET_RW
:
36 switch (cpu_st
->command
) {
37 case CPHP_GET_NEXT_CPU_WITH_EVENT_CMD
:
38 val
= cpu_st
->selector
;
43 trace_cpuhp_acpi_read_cmd_data(cpu_st
->selector
, val
);
51 static void cpu_hotplug_wr(void *opaque
, hwaddr addr
, uint64_t data
,
54 CPUHotplugState
*cpu_st
= opaque
;
57 assert(cpu_st
->dev_count
);
60 if (cpu_st
->selector
>= cpu_st
->dev_count
) {
61 trace_cpuhp_acpi_invalid_idx_selected(cpu_st
->selector
);
67 case ACPI_CPU_SELECTOR_OFFSET_WR
: /* current CPU selector */
68 cpu_st
->selector
= data
;
69 trace_cpuhp_acpi_write_idx(cpu_st
->selector
);
71 case ACPI_CPU_FLAGS_OFFSET_RW
: /* set is_* fields */
72 cdev
= &cpu_st
->devs
[cpu_st
->selector
];
73 if (data
& 2) { /* clear insert event */
74 cdev
->is_inserting
= false;
75 trace_cpuhp_acpi_clear_inserting_evt(cpu_st
->selector
);
78 case ACPI_CPU_CMD_OFFSET_WR
:
79 trace_cpuhp_acpi_write_cmd(cpu_st
->selector
, data
);
80 if (data
< CPHP_CMD_MAX
) {
81 cpu_st
->command
= data
;
82 if (cpu_st
->command
== CPHP_GET_NEXT_CPU_WITH_EVENT_CMD
) {
83 uint32_t iter
= cpu_st
->selector
;
86 cdev
= &cpu_st
->devs
[iter
];
87 if (cdev
->is_inserting
) {
88 cpu_st
->selector
= iter
;
89 trace_cpuhp_acpi_cpu_has_events(cpu_st
->selector
,
93 iter
= iter
+ 1 < cpu_st
->dev_count
? iter
+ 1 : 0;
94 } while (iter
!= cpu_st
->selector
);
103 static const MemoryRegionOps cpu_hotplug_ops
= {
104 .read
= cpu_hotplug_rd
,
105 .write
= cpu_hotplug_wr
,
106 .endianness
= DEVICE_LITTLE_ENDIAN
,
108 .min_access_size
= 1,
109 .max_access_size
= 4,
113 void cpu_hotplug_hw_init(MemoryRegion
*as
, Object
*owner
,
114 CPUHotplugState
*state
, hwaddr base_addr
)
116 MachineState
*machine
= MACHINE(qdev_get_machine());
117 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
118 CPUArchIdList
*id_list
;
121 assert(mc
->possible_cpu_arch_ids
);
122 id_list
= mc
->possible_cpu_arch_ids(machine
);
123 state
->dev_count
= id_list
->len
;
124 state
->devs
= g_new0(typeof(*state
->devs
), state
->dev_count
);
125 for (i
= 0; i
< id_list
->len
; i
++) {
126 state
->devs
[i
].cpu
= id_list
->cpus
[i
].cpu
;
127 state
->devs
[i
].arch_id
= id_list
->cpus
[i
].arch_id
;
130 memory_region_init_io(&state
->ctrl_reg
, owner
, &cpu_hotplug_ops
, state
,
131 "acpi-mem-hotplug", ACPI_CPU_HOTPLUG_REG_LEN
);
132 memory_region_add_subregion(as
, base_addr
, &state
->ctrl_reg
);
135 static AcpiCpuStatus
*get_cpu_status(CPUHotplugState
*cpu_st
, DeviceState
*dev
)
137 CPUClass
*k
= CPU_GET_CLASS(dev
);
138 uint64_t cpu_arch_id
= k
->get_arch_id(CPU(dev
));
141 for (i
= 0; i
< cpu_st
->dev_count
; i
++) {
142 if (cpu_arch_id
== cpu_st
->devs
[i
].arch_id
) {
143 return &cpu_st
->devs
[i
];
149 void acpi_cpu_plug_cb(HotplugHandler
*hotplug_dev
,
150 CPUHotplugState
*cpu_st
, DeviceState
*dev
, Error
**errp
)
154 cdev
= get_cpu_status(cpu_st
, dev
);
159 cdev
->cpu
= CPU(dev
);
160 if (dev
->hotplugged
) {
161 cdev
->is_inserting
= true;
162 acpi_send_event(DEVICE(hotplug_dev
), ACPI_CPU_HOTPLUG_STATUS
);
166 static const VMStateDescription vmstate_cpuhp_sts
= {
167 .name
= "CPU hotplug device state",
169 .minimum_version_id
= 1,
170 .minimum_version_id_old
= 1,
171 .fields
= (VMStateField
[]) {
172 VMSTATE_BOOL(is_inserting
, AcpiCpuStatus
),
173 VMSTATE_END_OF_LIST()
177 const VMStateDescription vmstate_cpu_hotplug
= {
178 .name
= "CPU hotplug state",
180 .minimum_version_id
= 1,
181 .minimum_version_id_old
= 1,
182 .fields
= (VMStateField
[]) {
183 VMSTATE_UINT32(selector
, CPUHotplugState
),
184 VMSTATE_UINT8(command
, CPUHotplugState
),
185 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(devs
, CPUHotplugState
, dev_count
,
186 vmstate_cpuhp_sts
, AcpiCpuStatus
),
187 VMSTATE_END_OF_LIST()
191 #define CPU_NAME_FMT "C%.03X"
192 #define CPUHP_RES_DEVICE "PRES"
193 #define CPU_LOCK "CPLK"
194 #define CPU_STS_METHOD "CSTA"
195 #define CPU_SCAN_METHOD "CSCN"
196 #define CPU_NOTIFY_METHOD "CTFY"
198 #define CPU_ENABLED "CPEN"
199 #define CPU_SELECTOR "CSEL"
200 #define CPU_COMMAND "CCMD"
201 #define CPU_DATA "CDAT"
202 #define CPU_INSERT_EVENT "CINS"
204 void build_cpus_aml(Aml
*table
, MachineState
*machine
, CPUHotplugFeatures opts
,
206 const char *res_root
,
207 const char *event_handler_method
)
214 Aml
*zero
= aml_int(0);
215 Aml
*one
= aml_int(1);
216 Aml
*sb_scope
= aml_scope("_SB");
217 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
218 CPUArchIdList
*arch_ids
= mc
->possible_cpu_arch_ids(machine
);
219 char *cphp_res_path
= g_strdup_printf("%s." CPUHP_RES_DEVICE
, res_root
);
220 Object
*obj
= object_resolve_path_type("", TYPE_ACPI_DEVICE_IF
, NULL
);
221 AcpiDeviceIfClass
*adevc
= ACPI_DEVICE_IF_GET_CLASS(obj
);
222 AcpiDeviceIf
*adev
= ACPI_DEVICE_IF(obj
);
224 cpu_ctrl_dev
= aml_device("%s", cphp_res_path
);
228 aml_append(cpu_ctrl_dev
,
229 aml_name_decl("_HID", aml_eisaid("PNP0A06")));
230 aml_append(cpu_ctrl_dev
,
231 aml_name_decl("_UID", aml_string("CPU Hotplug resources")));
232 aml_append(cpu_ctrl_dev
, aml_mutex(CPU_LOCK
, 0));
234 crs
= aml_resource_template();
235 aml_append(crs
, aml_io(AML_DECODE16
, io_base
, io_base
, 1,
236 ACPI_CPU_HOTPLUG_REG_LEN
));
237 aml_append(cpu_ctrl_dev
, aml_name_decl("_CRS", crs
));
239 /* declare CPU hotplug MMIO region with related access fields */
240 aml_append(cpu_ctrl_dev
,
241 aml_operation_region("PRST", AML_SYSTEM_IO
, aml_int(io_base
),
242 ACPI_CPU_HOTPLUG_REG_LEN
));
244 field
= aml_field("PRST", AML_BYTE_ACC
, AML_NOLOCK
,
246 aml_append(field
, aml_reserved_field(ACPI_CPU_FLAGS_OFFSET_RW
* 8));
247 /* 1 if enabled, read only */
248 aml_append(field
, aml_named_field(CPU_ENABLED
, 1));
249 /* (read) 1 if has a insert event. (write) 1 to clear event */
250 aml_append(field
, aml_named_field(CPU_INSERT_EVENT
, 1));
251 aml_append(field
, aml_reserved_field(6));
252 aml_append(field
, aml_named_field(CPU_COMMAND
, 8));
253 aml_append(cpu_ctrl_dev
, field
);
255 field
= aml_field("PRST", AML_DWORD_ACC
, AML_NOLOCK
, AML_PRESERVE
);
256 /* CPU selector, write only */
257 aml_append(field
, aml_named_field(CPU_SELECTOR
, 32));
258 /* flags + cmd + 2byte align */
259 aml_append(field
, aml_reserved_field(4 * 8));
260 aml_append(field
, aml_named_field(CPU_DATA
, 32));
261 aml_append(cpu_ctrl_dev
, field
);
264 aml_append(sb_scope
, cpu_ctrl_dev
);
266 cpus_dev
= aml_device("\\_SB.CPUS");
269 Aml
*ctrl_lock
= aml_name("%s.%s", cphp_res_path
, CPU_LOCK
);
270 Aml
*cpu_selector
= aml_name("%s.%s", cphp_res_path
, CPU_SELECTOR
);
271 Aml
*is_enabled
= aml_name("%s.%s", cphp_res_path
, CPU_ENABLED
);
272 Aml
*cpu_cmd
= aml_name("%s.%s", cphp_res_path
, CPU_COMMAND
);
273 Aml
*cpu_data
= aml_name("%s.%s", cphp_res_path
, CPU_DATA
);
274 Aml
*ins_evt
= aml_name("%s.%s", cphp_res_path
, CPU_INSERT_EVENT
);
276 aml_append(cpus_dev
, aml_name_decl("_HID", aml_string("ACPI0010")));
277 aml_append(cpus_dev
, aml_name_decl("_CID", aml_eisaid("PNP0A05")));
279 method
= aml_method(CPU_NOTIFY_METHOD
, 2, AML_NOTSERIALIZED
);
280 for (i
= 0; i
< arch_ids
->len
; i
++) {
281 Aml
*cpu
= aml_name(CPU_NAME_FMT
, i
);
282 Aml
*uid
= aml_arg(0);
283 Aml
*event
= aml_arg(1);
285 ifctx
= aml_if(aml_equal(uid
, aml_int(i
)));
287 aml_append(ifctx
, aml_notify(cpu
, event
));
289 aml_append(method
, ifctx
);
291 aml_append(cpus_dev
, method
);
293 method
= aml_method(CPU_STS_METHOD
, 1, AML_SERIALIZED
);
295 Aml
*idx
= aml_arg(0);
296 Aml
*sta
= aml_local(0);
298 aml_append(method
, aml_acquire(ctrl_lock
, 0xFFFF));
299 aml_append(method
, aml_store(idx
, cpu_selector
));
300 aml_append(method
, aml_store(zero
, sta
));
301 ifctx
= aml_if(aml_equal(is_enabled
, one
));
303 aml_append(ifctx
, aml_store(aml_int(0xF), sta
));
305 aml_append(method
, ifctx
);
306 aml_append(method
, aml_release(ctrl_lock
));
307 aml_append(method
, aml_return(sta
));
309 aml_append(cpus_dev
, method
);
311 method
= aml_method(CPU_SCAN_METHOD
, 0, AML_SERIALIZED
);
314 Aml
*has_event
= aml_local(0);
315 Aml
*dev_chk
= aml_int(1);
316 Aml
*next_cpu_cmd
= aml_int(CPHP_GET_NEXT_CPU_WITH_EVENT_CMD
);
318 aml_append(method
, aml_acquire(ctrl_lock
, 0xFFFF));
319 aml_append(method
, aml_store(one
, has_event
));
320 while_ctx
= aml_while(aml_equal(has_event
, one
));
322 /* clear loop exit condition, ins_evt check
323 * will set it to 1 while next_cpu_cmd returns a CPU
325 aml_append(while_ctx
, aml_store(zero
, has_event
));
326 aml_append(while_ctx
, aml_store(next_cpu_cmd
, cpu_cmd
));
327 ifctx
= aml_if(aml_equal(ins_evt
, one
));
330 aml_call2(CPU_NOTIFY_METHOD
, cpu_data
, dev_chk
));
331 aml_append(ifctx
, aml_store(one
, ins_evt
));
332 aml_append(ifctx
, aml_store(one
, has_event
));
334 aml_append(while_ctx
, ifctx
);
336 aml_append(method
, while_ctx
);
337 aml_append(method
, aml_release(ctrl_lock
));
339 aml_append(cpus_dev
, method
);
341 /* build Processor object for each processor */
342 for (i
= 0; i
< arch_ids
->len
; i
++) {
344 Aml
*uid
= aml_int(i
);
345 GArray
*madt_buf
= g_array_new(0, 1, 1);
346 int arch_id
= arch_ids
->cpus
[i
].arch_id
;
348 if (opts
.apci_1_compatible
&& arch_id
< 255) {
349 dev
= aml_processor(i
, 0, 0, CPU_NAME_FMT
, i
);
351 dev
= aml_device(CPU_NAME_FMT
, i
);
352 aml_append(dev
, aml_name_decl("_HID", aml_string("ACPI0007")));
353 aml_append(dev
, aml_name_decl("_UID", uid
));
356 method
= aml_method("_STA", 0, AML_SERIALIZED
);
357 aml_append(method
, aml_return(aml_call1(CPU_STS_METHOD
, uid
)));
358 aml_append(dev
, method
);
360 /* build _MAT object */
361 assert(adevc
&& adevc
->madt_cpu
);
362 adevc
->madt_cpu(adev
, i
, arch_ids
, madt_buf
);
363 switch (madt_buf
->data
[0]) {
364 case ACPI_APIC_PROCESSOR
: {
365 AcpiMadtProcessorApic
*apic
= (void *)madt_buf
->data
;
366 apic
->flags
= cpu_to_le32(1);
372 aml_append(dev
, aml_name_decl("_MAT",
373 aml_buffer(madt_buf
->len
, (uint8_t *)madt_buf
->data
)));
374 g_array_free(madt_buf
, true);
376 aml_append(cpus_dev
, dev
);
379 aml_append(sb_scope
, cpus_dev
);
380 aml_append(table
, sb_scope
);
382 method
= aml_method(event_handler_method
, 0, AML_NOTSERIALIZED
);
383 aml_append(method
, aml_call0("\\_SB.CPUS." CPU_SCAN_METHOD
));
384 aml_append(table
, method
);
386 g_free(cphp_res_path
);