test-bdrv-drain: AIO_WAIT_WHILE() in job .commit/.abort
[qemu/ar7.git] / hw / ppc / spapr_events.c
blob32719a1b72d03fa2440973658b1b5b3067ad579e
1 /*
2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
4 * RTAS events handling
6 * Copyright (c) 2012 David Gibson, IBM Corporation.
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
27 #include "qemu/osdep.h"
28 #include "qapi/error.h"
29 #include "cpu.h"
30 #include "sysemu/sysemu.h"
31 #include "hw/qdev.h"
32 #include "sysemu/device_tree.h"
34 #include "hw/ppc/fdt.h"
35 #include "hw/ppc/spapr.h"
36 #include "hw/ppc/spapr_vio.h"
37 #include "hw/pci/pci.h"
38 #include "hw/pci-host/spapr.h"
39 #include "hw/ppc/spapr_drc.h"
40 #include "qemu/help_option.h"
41 #include "qemu/bcd.h"
42 #include "hw/ppc/spapr_ovec.h"
43 #include <libfdt.h>
45 #define RTAS_LOG_VERSION_MASK 0xff000000
46 #define RTAS_LOG_VERSION_6 0x06000000
47 #define RTAS_LOG_SEVERITY_MASK 0x00e00000
48 #define RTAS_LOG_SEVERITY_ALREADY_REPORTED 0x00c00000
49 #define RTAS_LOG_SEVERITY_FATAL 0x00a00000
50 #define RTAS_LOG_SEVERITY_ERROR 0x00800000
51 #define RTAS_LOG_SEVERITY_ERROR_SYNC 0x00600000
52 #define RTAS_LOG_SEVERITY_WARNING 0x00400000
53 #define RTAS_LOG_SEVERITY_EVENT 0x00200000
54 #define RTAS_LOG_SEVERITY_NO_ERROR 0x00000000
55 #define RTAS_LOG_DISPOSITION_MASK 0x00180000
56 #define RTAS_LOG_DISPOSITION_FULLY_RECOVERED 0x00000000
57 #define RTAS_LOG_DISPOSITION_LIMITED_RECOVERY 0x00080000
58 #define RTAS_LOG_DISPOSITION_NOT_RECOVERED 0x00100000
59 #define RTAS_LOG_OPTIONAL_PART_PRESENT 0x00040000
60 #define RTAS_LOG_INITIATOR_MASK 0x0000f000
61 #define RTAS_LOG_INITIATOR_UNKNOWN 0x00000000
62 #define RTAS_LOG_INITIATOR_CPU 0x00001000
63 #define RTAS_LOG_INITIATOR_PCI 0x00002000
64 #define RTAS_LOG_INITIATOR_MEMORY 0x00004000
65 #define RTAS_LOG_INITIATOR_HOTPLUG 0x00006000
66 #define RTAS_LOG_TARGET_MASK 0x00000f00
67 #define RTAS_LOG_TARGET_UNKNOWN 0x00000000
68 #define RTAS_LOG_TARGET_CPU 0x00000100
69 #define RTAS_LOG_TARGET_PCI 0x00000200
70 #define RTAS_LOG_TARGET_MEMORY 0x00000400
71 #define RTAS_LOG_TARGET_HOTPLUG 0x00000600
72 #define RTAS_LOG_TYPE_MASK 0x000000ff
73 #define RTAS_LOG_TYPE_OTHER 0x00000000
74 #define RTAS_LOG_TYPE_RETRY 0x00000001
75 #define RTAS_LOG_TYPE_TCE_ERR 0x00000002
76 #define RTAS_LOG_TYPE_INTERN_DEV_FAIL 0x00000003
77 #define RTAS_LOG_TYPE_TIMEOUT 0x00000004
78 #define RTAS_LOG_TYPE_DATA_PARITY 0x00000005
79 #define RTAS_LOG_TYPE_ADDR_PARITY 0x00000006
80 #define RTAS_LOG_TYPE_CACHE_PARITY 0x00000007
81 #define RTAS_LOG_TYPE_ADDR_INVALID 0x00000008
82 #define RTAS_LOG_TYPE_ECC_UNCORR 0x00000009
83 #define RTAS_LOG_TYPE_ECC_CORR 0x0000000a
84 #define RTAS_LOG_TYPE_EPOW 0x00000040
85 #define RTAS_LOG_TYPE_HOTPLUG 0x000000e5
87 struct rtas_error_log {
88 uint32_t summary;
89 uint32_t extended_length;
90 } QEMU_PACKED;
92 struct rtas_event_log_v6 {
93 uint8_t b0;
94 #define RTAS_LOG_V6_B0_VALID 0x80
95 #define RTAS_LOG_V6_B0_UNRECOVERABLE_ERROR 0x40
96 #define RTAS_LOG_V6_B0_RECOVERABLE_ERROR 0x20
97 #define RTAS_LOG_V6_B0_DEGRADED_OPERATION 0x10
98 #define RTAS_LOG_V6_B0_PREDICTIVE_ERROR 0x08
99 #define RTAS_LOG_V6_B0_NEW_LOG 0x04
100 #define RTAS_LOG_V6_B0_BIGENDIAN 0x02
101 uint8_t _resv1;
102 uint8_t b2;
103 #define RTAS_LOG_V6_B2_POWERPC_FORMAT 0x80
104 #define RTAS_LOG_V6_B2_LOG_FORMAT_MASK 0x0f
105 #define RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT 0x0e
106 uint8_t _resv2[9];
107 uint32_t company;
108 #define RTAS_LOG_V6_COMPANY_IBM 0x49424d00 /* IBM<null> */
109 } QEMU_PACKED;
111 struct rtas_event_log_v6_section_header {
112 uint16_t section_id;
113 uint16_t section_length;
114 uint8_t section_version;
115 uint8_t section_subtype;
116 uint16_t creator_component_id;
117 } QEMU_PACKED;
119 struct rtas_event_log_v6_maina {
120 #define RTAS_LOG_V6_SECTION_ID_MAINA 0x5048 /* PH */
121 struct rtas_event_log_v6_section_header hdr;
122 uint32_t creation_date; /* BCD: YYYYMMDD */
123 uint32_t creation_time; /* BCD: HHMMSS00 */
124 uint8_t _platform1[8];
125 char creator_id;
126 uint8_t _resv1[2];
127 uint8_t section_count;
128 uint8_t _resv2[4];
129 uint8_t _platform2[8];
130 uint32_t plid;
131 uint8_t _platform3[4];
132 } QEMU_PACKED;
134 struct rtas_event_log_v6_mainb {
135 #define RTAS_LOG_V6_SECTION_ID_MAINB 0x5548 /* UH */
136 struct rtas_event_log_v6_section_header hdr;
137 uint8_t subsystem_id;
138 uint8_t _platform1;
139 uint8_t event_severity;
140 uint8_t event_subtype;
141 uint8_t _platform2[4];
142 uint8_t _resv1[2];
143 uint16_t action_flags;
144 uint8_t _resv2[4];
145 } QEMU_PACKED;
147 struct rtas_event_log_v6_epow {
148 #define RTAS_LOG_V6_SECTION_ID_EPOW 0x4550 /* EP */
149 struct rtas_event_log_v6_section_header hdr;
150 uint8_t sensor_value;
151 #define RTAS_LOG_V6_EPOW_ACTION_RESET 0
152 #define RTAS_LOG_V6_EPOW_ACTION_WARN_COOLING 1
153 #define RTAS_LOG_V6_EPOW_ACTION_WARN_POWER 2
154 #define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN 3
155 #define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_HALT 4
156 #define RTAS_LOG_V6_EPOW_ACTION_MAIN_ENCLOSURE 5
157 #define RTAS_LOG_V6_EPOW_ACTION_POWER_OFF 7
158 uint8_t event_modifier;
159 #define RTAS_LOG_V6_EPOW_MODIFIER_NORMAL 1
160 #define RTAS_LOG_V6_EPOW_MODIFIER_ON_UPS 2
161 #define RTAS_LOG_V6_EPOW_MODIFIER_CRITICAL 3
162 #define RTAS_LOG_V6_EPOW_MODIFIER_TEMPERATURE 4
163 uint8_t extended_modifier;
164 #define RTAS_LOG_V6_EPOW_XMODIFIER_SYSTEM_WIDE 0
165 #define RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC 1
166 uint8_t _resv;
167 uint64_t reason_code;
168 } QEMU_PACKED;
170 struct epow_extended_log {
171 struct rtas_event_log_v6 v6hdr;
172 struct rtas_event_log_v6_maina maina;
173 struct rtas_event_log_v6_mainb mainb;
174 struct rtas_event_log_v6_epow epow;
175 } QEMU_PACKED;
177 union drc_identifier {
178 uint32_t index;
179 uint32_t count;
180 struct {
181 uint32_t count;
182 uint32_t index;
183 } count_indexed;
184 char name[1];
185 } QEMU_PACKED;
187 struct rtas_event_log_v6_hp {
188 #define RTAS_LOG_V6_SECTION_ID_HOTPLUG 0x4850 /* HP */
189 struct rtas_event_log_v6_section_header hdr;
190 uint8_t hotplug_type;
191 #define RTAS_LOG_V6_HP_TYPE_CPU 1
192 #define RTAS_LOG_V6_HP_TYPE_MEMORY 2
193 #define RTAS_LOG_V6_HP_TYPE_SLOT 3
194 #define RTAS_LOG_V6_HP_TYPE_PHB 4
195 #define RTAS_LOG_V6_HP_TYPE_PCI 5
196 uint8_t hotplug_action;
197 #define RTAS_LOG_V6_HP_ACTION_ADD 1
198 #define RTAS_LOG_V6_HP_ACTION_REMOVE 2
199 uint8_t hotplug_identifier;
200 #define RTAS_LOG_V6_HP_ID_DRC_NAME 1
201 #define RTAS_LOG_V6_HP_ID_DRC_INDEX 2
202 #define RTAS_LOG_V6_HP_ID_DRC_COUNT 3
203 #define RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED 4
204 uint8_t reserved;
205 union drc_identifier drc_id;
206 } QEMU_PACKED;
208 struct hp_extended_log {
209 struct rtas_event_log_v6 v6hdr;
210 struct rtas_event_log_v6_maina maina;
211 struct rtas_event_log_v6_mainb mainb;
212 struct rtas_event_log_v6_hp hp;
213 } QEMU_PACKED;
215 typedef enum EventClass {
216 EVENT_CLASS_INTERNAL_ERRORS = 0,
217 EVENT_CLASS_EPOW = 1,
218 EVENT_CLASS_RESERVED = 2,
219 EVENT_CLASS_HOT_PLUG = 3,
220 EVENT_CLASS_IO = 4,
221 EVENT_CLASS_MAX
222 } EventClassIndex;
223 #define EVENT_CLASS_MASK(index) (1 << (31 - index))
225 static const char * const event_names[EVENT_CLASS_MAX] = {
226 [EVENT_CLASS_INTERNAL_ERRORS] = "internal-errors",
227 [EVENT_CLASS_EPOW] = "epow-events",
228 [EVENT_CLASS_HOT_PLUG] = "hot-plug-events",
229 [EVENT_CLASS_IO] = "ibm,io-events",
232 struct sPAPREventSource {
233 int irq;
234 uint32_t mask;
235 bool enabled;
238 static sPAPREventSource *spapr_event_sources_new(void)
240 return g_new0(sPAPREventSource, EVENT_CLASS_MAX);
243 static void spapr_event_sources_register(sPAPREventSource *event_sources,
244 EventClassIndex index, int irq)
246 /* we only support 1 irq per event class at the moment */
247 g_assert(event_sources);
248 g_assert(!event_sources[index].enabled);
249 event_sources[index].irq = irq;
250 event_sources[index].mask = EVENT_CLASS_MASK(index);
251 event_sources[index].enabled = true;
254 static const sPAPREventSource *
255 spapr_event_sources_get_source(sPAPREventSource *event_sources,
256 EventClassIndex index)
258 g_assert(index < EVENT_CLASS_MAX);
259 g_assert(event_sources);
261 return &event_sources[index];
264 void spapr_dt_events(sPAPRMachineState *spapr, void *fdt)
266 uint32_t irq_ranges[EVENT_CLASS_MAX * 2];
267 int i, count = 0, event_sources;
268 sPAPREventSource *events = spapr->event_sources;
270 g_assert(events);
272 _FDT(event_sources = fdt_add_subnode(fdt, 0, "event-sources"));
274 for (i = 0, count = 0; i < EVENT_CLASS_MAX; i++) {
275 int node_offset;
276 uint32_t interrupts[2];
277 const sPAPREventSource *source =
278 spapr_event_sources_get_source(events, i);
279 const char *source_name = event_names[i];
281 if (!source->enabled) {
282 continue;
285 spapr_dt_xics_irq(interrupts, source->irq, false);
287 _FDT(node_offset = fdt_add_subnode(fdt, event_sources, source_name));
288 _FDT(fdt_setprop(fdt, node_offset, "interrupts", interrupts,
289 sizeof(interrupts)));
291 irq_ranges[count++] = interrupts[0];
292 irq_ranges[count++] = cpu_to_be32(1);
295 _FDT((fdt_setprop(fdt, event_sources, "interrupt-controller", NULL, 0)));
296 _FDT((fdt_setprop_cell(fdt, event_sources, "#interrupt-cells", 2)));
297 _FDT((fdt_setprop(fdt, event_sources, "interrupt-ranges",
298 irq_ranges, count * sizeof(uint32_t))));
301 static const sPAPREventSource *
302 rtas_event_log_to_source(sPAPRMachineState *spapr, int log_type)
304 const sPAPREventSource *source;
306 g_assert(spapr->event_sources);
308 switch (log_type) {
309 case RTAS_LOG_TYPE_HOTPLUG:
310 source = spapr_event_sources_get_source(spapr->event_sources,
311 EVENT_CLASS_HOT_PLUG);
312 if (spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT)) {
313 g_assert(source->enabled);
314 break;
316 /* fall back to epow for legacy hotplug interrupt source */
317 case RTAS_LOG_TYPE_EPOW:
318 source = spapr_event_sources_get_source(spapr->event_sources,
319 EVENT_CLASS_EPOW);
320 break;
321 default:
322 source = NULL;
325 return source;
328 static int rtas_event_log_to_irq(sPAPRMachineState *spapr, int log_type)
330 const sPAPREventSource *source;
332 source = rtas_event_log_to_source(spapr, log_type);
333 g_assert(source);
334 g_assert(source->enabled);
336 return source->irq;
339 static uint32_t spapr_event_log_entry_type(sPAPREventLogEntry *entry)
341 return entry->summary & RTAS_LOG_TYPE_MASK;
344 static void rtas_event_log_queue(sPAPRMachineState *spapr,
345 sPAPREventLogEntry *entry)
347 QTAILQ_INSERT_TAIL(&spapr->pending_events, entry, next);
350 static sPAPREventLogEntry *rtas_event_log_dequeue(sPAPRMachineState *spapr,
351 uint32_t event_mask)
353 sPAPREventLogEntry *entry = NULL;
355 QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
356 const sPAPREventSource *source =
357 rtas_event_log_to_source(spapr,
358 spapr_event_log_entry_type(entry));
360 if (source->mask & event_mask) {
361 break;
365 if (entry) {
366 QTAILQ_REMOVE(&spapr->pending_events, entry, next);
369 return entry;
372 static bool rtas_event_log_contains(uint32_t event_mask)
374 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
375 sPAPREventLogEntry *entry = NULL;
377 QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
378 const sPAPREventSource *source =
379 rtas_event_log_to_source(spapr,
380 spapr_event_log_entry_type(entry));
382 if (source->mask & event_mask) {
383 return true;
387 return false;
390 static uint32_t next_plid;
392 static void spapr_init_v6hdr(struct rtas_event_log_v6 *v6hdr)
394 v6hdr->b0 = RTAS_LOG_V6_B0_VALID | RTAS_LOG_V6_B0_NEW_LOG
395 | RTAS_LOG_V6_B0_BIGENDIAN;
396 v6hdr->b2 = RTAS_LOG_V6_B2_POWERPC_FORMAT
397 | RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT;
398 v6hdr->company = cpu_to_be32(RTAS_LOG_V6_COMPANY_IBM);
401 static void spapr_init_maina(struct rtas_event_log_v6_maina *maina,
402 int section_count)
404 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
405 struct tm tm;
406 int year;
408 maina->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINA);
409 maina->hdr.section_length = cpu_to_be16(sizeof(*maina));
410 /* FIXME: section version, subtype and creator id? */
411 spapr_rtc_read(&spapr->rtc, &tm, NULL);
412 year = tm.tm_year + 1900;
413 maina->creation_date = cpu_to_be32((to_bcd(year / 100) << 24)
414 | (to_bcd(year % 100) << 16)
415 | (to_bcd(tm.tm_mon + 1) << 8)
416 | to_bcd(tm.tm_mday));
417 maina->creation_time = cpu_to_be32((to_bcd(tm.tm_hour) << 24)
418 | (to_bcd(tm.tm_min) << 16)
419 | (to_bcd(tm.tm_sec) << 8));
420 maina->creator_id = 'H'; /* Hypervisor */
421 maina->section_count = section_count;
422 maina->plid = next_plid++;
425 static void spapr_powerdown_req(Notifier *n, void *opaque)
427 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
428 sPAPREventLogEntry *entry;
429 struct rtas_event_log_v6 *v6hdr;
430 struct rtas_event_log_v6_maina *maina;
431 struct rtas_event_log_v6_mainb *mainb;
432 struct rtas_event_log_v6_epow *epow;
433 struct epow_extended_log *new_epow;
435 entry = g_new(sPAPREventLogEntry, 1);
436 new_epow = g_malloc0(sizeof(*new_epow));
437 entry->extended_log = new_epow;
439 v6hdr = &new_epow->v6hdr;
440 maina = &new_epow->maina;
441 mainb = &new_epow->mainb;
442 epow = &new_epow->epow;
444 entry->summary = RTAS_LOG_VERSION_6
445 | RTAS_LOG_SEVERITY_EVENT
446 | RTAS_LOG_DISPOSITION_NOT_RECOVERED
447 | RTAS_LOG_OPTIONAL_PART_PRESENT
448 | RTAS_LOG_TYPE_EPOW;
449 entry->extended_length = sizeof(*new_epow);
451 spapr_init_v6hdr(v6hdr);
452 spapr_init_maina(maina, 3 /* Main-A, Main-B and EPOW */);
454 mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
455 mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
456 /* FIXME: section version, subtype and creator id? */
457 mainb->subsystem_id = 0xa0; /* External environment */
458 mainb->event_severity = 0x00; /* Informational / non-error */
459 mainb->event_subtype = 0xd0; /* Normal shutdown */
461 epow->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_EPOW);
462 epow->hdr.section_length = cpu_to_be16(sizeof(*epow));
463 epow->hdr.section_version = 2; /* includes extended modifier */
464 /* FIXME: section subtype and creator id? */
465 epow->sensor_value = RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN;
466 epow->event_modifier = RTAS_LOG_V6_EPOW_MODIFIER_NORMAL;
467 epow->extended_modifier = RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC;
469 rtas_event_log_queue(spapr, entry);
471 qemu_irq_pulse(spapr_qirq(spapr,
472 rtas_event_log_to_irq(spapr, RTAS_LOG_TYPE_EPOW)));
475 static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
476 sPAPRDRConnectorType drc_type,
477 union drc_identifier *drc_id)
479 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
480 sPAPREventLogEntry *entry;
481 struct hp_extended_log *new_hp;
482 struct rtas_event_log_v6 *v6hdr;
483 struct rtas_event_log_v6_maina *maina;
484 struct rtas_event_log_v6_mainb *mainb;
485 struct rtas_event_log_v6_hp *hp;
487 entry = g_new(sPAPREventLogEntry, 1);
488 new_hp = g_malloc0(sizeof(struct hp_extended_log));
489 entry->extended_log = new_hp;
491 v6hdr = &new_hp->v6hdr;
492 maina = &new_hp->maina;
493 mainb = &new_hp->mainb;
494 hp = &new_hp->hp;
496 entry->summary = RTAS_LOG_VERSION_6
497 | RTAS_LOG_SEVERITY_EVENT
498 | RTAS_LOG_DISPOSITION_NOT_RECOVERED
499 | RTAS_LOG_OPTIONAL_PART_PRESENT
500 | RTAS_LOG_INITIATOR_HOTPLUG
501 | RTAS_LOG_TYPE_HOTPLUG;
502 entry->extended_length = sizeof(*new_hp);
504 spapr_init_v6hdr(v6hdr);
505 spapr_init_maina(maina, 3 /* Main-A, Main-B, HP */);
507 mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
508 mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
509 mainb->subsystem_id = 0x80; /* External environment */
510 mainb->event_severity = 0x00; /* Informational / non-error */
511 mainb->event_subtype = 0x00; /* Normal shutdown */
513 hp->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_HOTPLUG);
514 hp->hdr.section_length = cpu_to_be16(sizeof(*hp));
515 hp->hdr.section_version = 1; /* includes extended modifier */
516 hp->hotplug_action = hp_action;
517 hp->hotplug_identifier = hp_id;
519 switch (drc_type) {
520 case SPAPR_DR_CONNECTOR_TYPE_PCI:
521 hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PCI;
522 break;
523 case SPAPR_DR_CONNECTOR_TYPE_LMB:
524 hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_MEMORY;
525 break;
526 case SPAPR_DR_CONNECTOR_TYPE_CPU:
527 hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_CPU;
528 break;
529 default:
530 /* we shouldn't be signaling hotplug events for resources
531 * that don't support them
533 g_assert(false);
534 return;
537 if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT) {
538 hp->drc_id.count = cpu_to_be32(drc_id->count);
539 } else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_INDEX) {
540 hp->drc_id.index = cpu_to_be32(drc_id->index);
541 } else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED) {
542 /* we should not be using count_indexed value unless the guest
543 * supports dedicated hotplug event source
545 g_assert(spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT));
546 hp->drc_id.count_indexed.count =
547 cpu_to_be32(drc_id->count_indexed.count);
548 hp->drc_id.count_indexed.index =
549 cpu_to_be32(drc_id->count_indexed.index);
552 rtas_event_log_queue(spapr, entry);
554 qemu_irq_pulse(spapr_qirq(spapr,
555 rtas_event_log_to_irq(spapr, RTAS_LOG_TYPE_HOTPLUG)));
558 void spapr_hotplug_req_add_by_index(sPAPRDRConnector *drc)
560 sPAPRDRConnectorType drc_type = spapr_drc_type(drc);
561 union drc_identifier drc_id;
563 drc_id.index = spapr_drc_index(drc);
564 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX,
565 RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
568 void spapr_hotplug_req_remove_by_index(sPAPRDRConnector *drc)
570 sPAPRDRConnectorType drc_type = spapr_drc_type(drc);
571 union drc_identifier drc_id;
573 drc_id.index = spapr_drc_index(drc);
574 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX,
575 RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
578 void spapr_hotplug_req_add_by_count(sPAPRDRConnectorType drc_type,
579 uint32_t count)
581 union drc_identifier drc_id;
583 drc_id.count = count;
584 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT,
585 RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
588 void spapr_hotplug_req_remove_by_count(sPAPRDRConnectorType drc_type,
589 uint32_t count)
591 union drc_identifier drc_id;
593 drc_id.count = count;
594 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT,
595 RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
598 void spapr_hotplug_req_add_by_count_indexed(sPAPRDRConnectorType drc_type,
599 uint32_t count, uint32_t index)
601 union drc_identifier drc_id;
603 drc_id.count_indexed.count = count;
604 drc_id.count_indexed.index = index;
605 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED,
606 RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
609 void spapr_hotplug_req_remove_by_count_indexed(sPAPRDRConnectorType drc_type,
610 uint32_t count, uint32_t index)
612 union drc_identifier drc_id;
614 drc_id.count_indexed.count = count;
615 drc_id.count_indexed.index = index;
616 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED,
617 RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
620 static void check_exception(PowerPCCPU *cpu, sPAPRMachineState *spapr,
621 uint32_t token, uint32_t nargs,
622 target_ulong args,
623 uint32_t nret, target_ulong rets)
625 uint32_t mask, buf, len, event_len;
626 uint64_t xinfo;
627 sPAPREventLogEntry *event;
628 struct rtas_error_log header;
629 int i;
631 if ((nargs < 6) || (nargs > 7) || nret != 1) {
632 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
633 return;
636 xinfo = rtas_ld(args, 1);
637 mask = rtas_ld(args, 2);
638 buf = rtas_ld(args, 4);
639 len = rtas_ld(args, 5);
640 if (nargs == 7) {
641 xinfo |= (uint64_t)rtas_ld(args, 6) << 32;
644 event = rtas_event_log_dequeue(spapr, mask);
645 if (!event) {
646 goto out_no_events;
649 event_len = event->extended_length + sizeof(header);
651 if (event_len < len) {
652 len = event_len;
655 header.summary = cpu_to_be32(event->summary);
656 header.extended_length = cpu_to_be32(event->extended_length);
657 cpu_physical_memory_write(buf, &header, sizeof(header));
658 cpu_physical_memory_write(buf + sizeof(header), event->extended_log,
659 event->extended_length);
660 rtas_st(rets, 0, RTAS_OUT_SUCCESS);
661 g_free(event->extended_log);
662 g_free(event);
664 /* according to PAPR+, the IRQ must be left asserted, or re-asserted, if
665 * there are still pending events to be fetched via check-exception. We
666 * do the latter here, since our code relies on edge-triggered
667 * interrupts.
669 for (i = 0; i < EVENT_CLASS_MAX; i++) {
670 if (rtas_event_log_contains(EVENT_CLASS_MASK(i))) {
671 const sPAPREventSource *source =
672 spapr_event_sources_get_source(spapr->event_sources, i);
674 g_assert(source->enabled);
675 qemu_irq_pulse(spapr_qirq(spapr, source->irq));
679 return;
681 out_no_events:
682 rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
685 static void event_scan(PowerPCCPU *cpu, sPAPRMachineState *spapr,
686 uint32_t token, uint32_t nargs,
687 target_ulong args,
688 uint32_t nret, target_ulong rets)
690 if (nargs != 4 || nret != 1) {
691 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
692 return;
694 rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
697 void spapr_clear_pending_events(sPAPRMachineState *spapr)
699 sPAPREventLogEntry *entry = NULL, *next_entry;
701 QTAILQ_FOREACH_SAFE(entry, &spapr->pending_events, next, next_entry) {
702 QTAILQ_REMOVE(&spapr->pending_events, entry, next);
703 g_free(entry->extended_log);
704 g_free(entry);
708 void spapr_events_init(sPAPRMachineState *spapr)
710 int epow_irq = SPAPR_IRQ_EPOW;
712 if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
713 epow_irq = spapr_irq_findone(spapr, &error_fatal);
716 spapr_irq_claim(spapr, epow_irq, false, &error_fatal);
718 QTAILQ_INIT(&spapr->pending_events);
720 spapr->event_sources = spapr_event_sources_new();
722 spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_EPOW,
723 epow_irq);
725 /* NOTE: if machine supports modern/dedicated hotplug event source,
726 * we add it to the device-tree unconditionally. This means we may
727 * have cases where the source is enabled in QEMU, but unused by the
728 * guest because it does not support modern hotplug events, so we
729 * take care to rely on checking for negotiation of OV5_HP_EVT option
730 * before attempting to use it to signal events, rather than simply
731 * checking that it's enabled.
733 if (spapr->use_hotplug_event_source) {
734 int hp_irq = SPAPR_IRQ_HOTPLUG;
736 if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
737 hp_irq = spapr_irq_findone(spapr, &error_fatal);
740 spapr_irq_claim(spapr, hp_irq, false, &error_fatal);
742 spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_HOT_PLUG,
743 hp_irq);
746 spapr->epow_notifier.notify = spapr_powerdown_req;
747 qemu_register_powerdown_notifier(&spapr->epow_notifier);
748 spapr_rtas_register(RTAS_CHECK_EXCEPTION, "check-exception",
749 check_exception);
750 spapr_rtas_register(RTAS_EVENT_SCAN, "event-scan", event_scan);