Include hw/irq.h a lot less
[qemu/ar7.git] / hw / ppc / spapr_events.c
blobd75359c5f3d60ee65b36c701fe934d2d5cd25a48
1 /*
2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
4 * RTAS events handling
6 * Copyright (c) 2012 David Gibson, IBM Corporation.
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
28 #include "qemu/osdep.h"
29 #include "qapi/error.h"
30 #include "cpu.h"
31 #include "sysemu/sysemu.h"
32 #include "hw/qdev.h"
33 #include "sysemu/device_tree.h"
35 #include "hw/ppc/fdt.h"
36 #include "hw/ppc/spapr.h"
37 #include "hw/ppc/spapr_vio.h"
38 #include "hw/pci/pci.h"
39 #include "hw/irq.h"
40 #include "hw/pci-host/spapr.h"
41 #include "hw/ppc/spapr_drc.h"
42 #include "qemu/help_option.h"
43 #include "qemu/bcd.h"
44 #include "hw/ppc/spapr_ovec.h"
45 #include <libfdt.h>
47 #define RTAS_LOG_VERSION_MASK 0xff000000
48 #define RTAS_LOG_VERSION_6 0x06000000
49 #define RTAS_LOG_SEVERITY_MASK 0x00e00000
50 #define RTAS_LOG_SEVERITY_ALREADY_REPORTED 0x00c00000
51 #define RTAS_LOG_SEVERITY_FATAL 0x00a00000
52 #define RTAS_LOG_SEVERITY_ERROR 0x00800000
53 #define RTAS_LOG_SEVERITY_ERROR_SYNC 0x00600000
54 #define RTAS_LOG_SEVERITY_WARNING 0x00400000
55 #define RTAS_LOG_SEVERITY_EVENT 0x00200000
56 #define RTAS_LOG_SEVERITY_NO_ERROR 0x00000000
57 #define RTAS_LOG_DISPOSITION_MASK 0x00180000
58 #define RTAS_LOG_DISPOSITION_FULLY_RECOVERED 0x00000000
59 #define RTAS_LOG_DISPOSITION_LIMITED_RECOVERY 0x00080000
60 #define RTAS_LOG_DISPOSITION_NOT_RECOVERED 0x00100000
61 #define RTAS_LOG_OPTIONAL_PART_PRESENT 0x00040000
62 #define RTAS_LOG_INITIATOR_MASK 0x0000f000
63 #define RTAS_LOG_INITIATOR_UNKNOWN 0x00000000
64 #define RTAS_LOG_INITIATOR_CPU 0x00001000
65 #define RTAS_LOG_INITIATOR_PCI 0x00002000
66 #define RTAS_LOG_INITIATOR_MEMORY 0x00004000
67 #define RTAS_LOG_INITIATOR_HOTPLUG 0x00006000
68 #define RTAS_LOG_TARGET_MASK 0x00000f00
69 #define RTAS_LOG_TARGET_UNKNOWN 0x00000000
70 #define RTAS_LOG_TARGET_CPU 0x00000100
71 #define RTAS_LOG_TARGET_PCI 0x00000200
72 #define RTAS_LOG_TARGET_MEMORY 0x00000400
73 #define RTAS_LOG_TARGET_HOTPLUG 0x00000600
74 #define RTAS_LOG_TYPE_MASK 0x000000ff
75 #define RTAS_LOG_TYPE_OTHER 0x00000000
76 #define RTAS_LOG_TYPE_RETRY 0x00000001
77 #define RTAS_LOG_TYPE_TCE_ERR 0x00000002
78 #define RTAS_LOG_TYPE_INTERN_DEV_FAIL 0x00000003
79 #define RTAS_LOG_TYPE_TIMEOUT 0x00000004
80 #define RTAS_LOG_TYPE_DATA_PARITY 0x00000005
81 #define RTAS_LOG_TYPE_ADDR_PARITY 0x00000006
82 #define RTAS_LOG_TYPE_CACHE_PARITY 0x00000007
83 #define RTAS_LOG_TYPE_ADDR_INVALID 0x00000008
84 #define RTAS_LOG_TYPE_ECC_UNCORR 0x00000009
85 #define RTAS_LOG_TYPE_ECC_CORR 0x0000000a
86 #define RTAS_LOG_TYPE_EPOW 0x00000040
87 #define RTAS_LOG_TYPE_HOTPLUG 0x000000e5
89 struct rtas_error_log {
90 uint32_t summary;
91 uint32_t extended_length;
92 } QEMU_PACKED;
94 struct rtas_event_log_v6 {
95 uint8_t b0;
96 #define RTAS_LOG_V6_B0_VALID 0x80
97 #define RTAS_LOG_V6_B0_UNRECOVERABLE_ERROR 0x40
98 #define RTAS_LOG_V6_B0_RECOVERABLE_ERROR 0x20
99 #define RTAS_LOG_V6_B0_DEGRADED_OPERATION 0x10
100 #define RTAS_LOG_V6_B0_PREDICTIVE_ERROR 0x08
101 #define RTAS_LOG_V6_B0_NEW_LOG 0x04
102 #define RTAS_LOG_V6_B0_BIGENDIAN 0x02
103 uint8_t _resv1;
104 uint8_t b2;
105 #define RTAS_LOG_V6_B2_POWERPC_FORMAT 0x80
106 #define RTAS_LOG_V6_B2_LOG_FORMAT_MASK 0x0f
107 #define RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT 0x0e
108 uint8_t _resv2[9];
109 uint32_t company;
110 #define RTAS_LOG_V6_COMPANY_IBM 0x49424d00 /* IBM<null> */
111 } QEMU_PACKED;
113 struct rtas_event_log_v6_section_header {
114 uint16_t section_id;
115 uint16_t section_length;
116 uint8_t section_version;
117 uint8_t section_subtype;
118 uint16_t creator_component_id;
119 } QEMU_PACKED;
121 struct rtas_event_log_v6_maina {
122 #define RTAS_LOG_V6_SECTION_ID_MAINA 0x5048 /* PH */
123 struct rtas_event_log_v6_section_header hdr;
124 uint32_t creation_date; /* BCD: YYYYMMDD */
125 uint32_t creation_time; /* BCD: HHMMSS00 */
126 uint8_t _platform1[8];
127 char creator_id;
128 uint8_t _resv1[2];
129 uint8_t section_count;
130 uint8_t _resv2[4];
131 uint8_t _platform2[8];
132 uint32_t plid;
133 uint8_t _platform3[4];
134 } QEMU_PACKED;
136 struct rtas_event_log_v6_mainb {
137 #define RTAS_LOG_V6_SECTION_ID_MAINB 0x5548 /* UH */
138 struct rtas_event_log_v6_section_header hdr;
139 uint8_t subsystem_id;
140 uint8_t _platform1;
141 uint8_t event_severity;
142 uint8_t event_subtype;
143 uint8_t _platform2[4];
144 uint8_t _resv1[2];
145 uint16_t action_flags;
146 uint8_t _resv2[4];
147 } QEMU_PACKED;
149 struct rtas_event_log_v6_epow {
150 #define RTAS_LOG_V6_SECTION_ID_EPOW 0x4550 /* EP */
151 struct rtas_event_log_v6_section_header hdr;
152 uint8_t sensor_value;
153 #define RTAS_LOG_V6_EPOW_ACTION_RESET 0
154 #define RTAS_LOG_V6_EPOW_ACTION_WARN_COOLING 1
155 #define RTAS_LOG_V6_EPOW_ACTION_WARN_POWER 2
156 #define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN 3
157 #define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_HALT 4
158 #define RTAS_LOG_V6_EPOW_ACTION_MAIN_ENCLOSURE 5
159 #define RTAS_LOG_V6_EPOW_ACTION_POWER_OFF 7
160 uint8_t event_modifier;
161 #define RTAS_LOG_V6_EPOW_MODIFIER_NORMAL 1
162 #define RTAS_LOG_V6_EPOW_MODIFIER_ON_UPS 2
163 #define RTAS_LOG_V6_EPOW_MODIFIER_CRITICAL 3
164 #define RTAS_LOG_V6_EPOW_MODIFIER_TEMPERATURE 4
165 uint8_t extended_modifier;
166 #define RTAS_LOG_V6_EPOW_XMODIFIER_SYSTEM_WIDE 0
167 #define RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC 1
168 uint8_t _resv;
169 uint64_t reason_code;
170 } QEMU_PACKED;
172 struct epow_extended_log {
173 struct rtas_event_log_v6 v6hdr;
174 struct rtas_event_log_v6_maina maina;
175 struct rtas_event_log_v6_mainb mainb;
176 struct rtas_event_log_v6_epow epow;
177 } QEMU_PACKED;
179 union drc_identifier {
180 uint32_t index;
181 uint32_t count;
182 struct {
183 uint32_t count;
184 uint32_t index;
185 } count_indexed;
186 char name[1];
187 } QEMU_PACKED;
189 struct rtas_event_log_v6_hp {
190 #define RTAS_LOG_V6_SECTION_ID_HOTPLUG 0x4850 /* HP */
191 struct rtas_event_log_v6_section_header hdr;
192 uint8_t hotplug_type;
193 #define RTAS_LOG_V6_HP_TYPE_CPU 1
194 #define RTAS_LOG_V6_HP_TYPE_MEMORY 2
195 #define RTAS_LOG_V6_HP_TYPE_SLOT 3
196 #define RTAS_LOG_V6_HP_TYPE_PHB 4
197 #define RTAS_LOG_V6_HP_TYPE_PCI 5
198 uint8_t hotplug_action;
199 #define RTAS_LOG_V6_HP_ACTION_ADD 1
200 #define RTAS_LOG_V6_HP_ACTION_REMOVE 2
201 uint8_t hotplug_identifier;
202 #define RTAS_LOG_V6_HP_ID_DRC_NAME 1
203 #define RTAS_LOG_V6_HP_ID_DRC_INDEX 2
204 #define RTAS_LOG_V6_HP_ID_DRC_COUNT 3
205 #define RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED 4
206 uint8_t reserved;
207 union drc_identifier drc_id;
208 } QEMU_PACKED;
210 struct hp_extended_log {
211 struct rtas_event_log_v6 v6hdr;
212 struct rtas_event_log_v6_maina maina;
213 struct rtas_event_log_v6_mainb mainb;
214 struct rtas_event_log_v6_hp hp;
215 } QEMU_PACKED;
217 typedef enum EventClass {
218 EVENT_CLASS_INTERNAL_ERRORS = 0,
219 EVENT_CLASS_EPOW = 1,
220 EVENT_CLASS_RESERVED = 2,
221 EVENT_CLASS_HOT_PLUG = 3,
222 EVENT_CLASS_IO = 4,
223 EVENT_CLASS_MAX
224 } EventClassIndex;
225 #define EVENT_CLASS_MASK(index) (1 << (31 - index))
227 static const char * const event_names[EVENT_CLASS_MAX] = {
228 [EVENT_CLASS_INTERNAL_ERRORS] = "internal-errors",
229 [EVENT_CLASS_EPOW] = "epow-events",
230 [EVENT_CLASS_HOT_PLUG] = "hot-plug-events",
231 [EVENT_CLASS_IO] = "ibm,io-events",
234 struct SpaprEventSource {
235 int irq;
236 uint32_t mask;
237 bool enabled;
240 static SpaprEventSource *spapr_event_sources_new(void)
242 return g_new0(SpaprEventSource, EVENT_CLASS_MAX);
245 static void spapr_event_sources_register(SpaprEventSource *event_sources,
246 EventClassIndex index, int irq)
248 /* we only support 1 irq per event class at the moment */
249 g_assert(event_sources);
250 g_assert(!event_sources[index].enabled);
251 event_sources[index].irq = irq;
252 event_sources[index].mask = EVENT_CLASS_MASK(index);
253 event_sources[index].enabled = true;
256 static const SpaprEventSource *
257 spapr_event_sources_get_source(SpaprEventSource *event_sources,
258 EventClassIndex index)
260 g_assert(index < EVENT_CLASS_MAX);
261 g_assert(event_sources);
263 return &event_sources[index];
266 void spapr_dt_events(SpaprMachineState *spapr, void *fdt)
268 uint32_t irq_ranges[EVENT_CLASS_MAX * 2];
269 int i, count = 0, event_sources;
270 SpaprEventSource *events = spapr->event_sources;
272 g_assert(events);
274 _FDT(event_sources = fdt_add_subnode(fdt, 0, "event-sources"));
276 for (i = 0, count = 0; i < EVENT_CLASS_MAX; i++) {
277 int node_offset;
278 uint32_t interrupts[2];
279 const SpaprEventSource *source =
280 spapr_event_sources_get_source(events, i);
281 const char *source_name = event_names[i];
283 if (!source->enabled) {
284 continue;
287 spapr_dt_irq(interrupts, source->irq, false);
289 _FDT(node_offset = fdt_add_subnode(fdt, event_sources, source_name));
290 _FDT(fdt_setprop(fdt, node_offset, "interrupts", interrupts,
291 sizeof(interrupts)));
293 irq_ranges[count++] = interrupts[0];
294 irq_ranges[count++] = cpu_to_be32(1);
297 _FDT((fdt_setprop(fdt, event_sources, "interrupt-controller", NULL, 0)));
298 _FDT((fdt_setprop_cell(fdt, event_sources, "#interrupt-cells", 2)));
299 _FDT((fdt_setprop(fdt, event_sources, "interrupt-ranges",
300 irq_ranges, count * sizeof(uint32_t))));
303 static const SpaprEventSource *
304 rtas_event_log_to_source(SpaprMachineState *spapr, int log_type)
306 const SpaprEventSource *source;
308 g_assert(spapr->event_sources);
310 switch (log_type) {
311 case RTAS_LOG_TYPE_HOTPLUG:
312 source = spapr_event_sources_get_source(spapr->event_sources,
313 EVENT_CLASS_HOT_PLUG);
314 if (spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT)) {
315 g_assert(source->enabled);
316 break;
318 /* fall back to epow for legacy hotplug interrupt source */
319 case RTAS_LOG_TYPE_EPOW:
320 source = spapr_event_sources_get_source(spapr->event_sources,
321 EVENT_CLASS_EPOW);
322 break;
323 default:
324 source = NULL;
327 return source;
330 static int rtas_event_log_to_irq(SpaprMachineState *spapr, int log_type)
332 const SpaprEventSource *source;
334 source = rtas_event_log_to_source(spapr, log_type);
335 g_assert(source);
336 g_assert(source->enabled);
338 return source->irq;
341 static uint32_t spapr_event_log_entry_type(SpaprEventLogEntry *entry)
343 return entry->summary & RTAS_LOG_TYPE_MASK;
346 static void rtas_event_log_queue(SpaprMachineState *spapr,
347 SpaprEventLogEntry *entry)
349 QTAILQ_INSERT_TAIL(&spapr->pending_events, entry, next);
352 static SpaprEventLogEntry *rtas_event_log_dequeue(SpaprMachineState *spapr,
353 uint32_t event_mask)
355 SpaprEventLogEntry *entry = NULL;
357 QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
358 const SpaprEventSource *source =
359 rtas_event_log_to_source(spapr,
360 spapr_event_log_entry_type(entry));
362 if (source->mask & event_mask) {
363 break;
367 if (entry) {
368 QTAILQ_REMOVE(&spapr->pending_events, entry, next);
371 return entry;
374 static bool rtas_event_log_contains(uint32_t event_mask)
376 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
377 SpaprEventLogEntry *entry = NULL;
379 QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
380 const SpaprEventSource *source =
381 rtas_event_log_to_source(spapr,
382 spapr_event_log_entry_type(entry));
384 if (source->mask & event_mask) {
385 return true;
389 return false;
392 static uint32_t next_plid;
394 static void spapr_init_v6hdr(struct rtas_event_log_v6 *v6hdr)
396 v6hdr->b0 = RTAS_LOG_V6_B0_VALID | RTAS_LOG_V6_B0_NEW_LOG
397 | RTAS_LOG_V6_B0_BIGENDIAN;
398 v6hdr->b2 = RTAS_LOG_V6_B2_POWERPC_FORMAT
399 | RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT;
400 v6hdr->company = cpu_to_be32(RTAS_LOG_V6_COMPANY_IBM);
403 static void spapr_init_maina(struct rtas_event_log_v6_maina *maina,
404 int section_count)
406 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
407 struct tm tm;
408 int year;
410 maina->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINA);
411 maina->hdr.section_length = cpu_to_be16(sizeof(*maina));
412 /* FIXME: section version, subtype and creator id? */
413 spapr_rtc_read(&spapr->rtc, &tm, NULL);
414 year = tm.tm_year + 1900;
415 maina->creation_date = cpu_to_be32((to_bcd(year / 100) << 24)
416 | (to_bcd(year % 100) << 16)
417 | (to_bcd(tm.tm_mon + 1) << 8)
418 | to_bcd(tm.tm_mday));
419 maina->creation_time = cpu_to_be32((to_bcd(tm.tm_hour) << 24)
420 | (to_bcd(tm.tm_min) << 16)
421 | (to_bcd(tm.tm_sec) << 8));
422 maina->creator_id = 'H'; /* Hypervisor */
423 maina->section_count = section_count;
424 maina->plid = next_plid++;
427 static void spapr_powerdown_req(Notifier *n, void *opaque)
429 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
430 SpaprEventLogEntry *entry;
431 struct rtas_event_log_v6 *v6hdr;
432 struct rtas_event_log_v6_maina *maina;
433 struct rtas_event_log_v6_mainb *mainb;
434 struct rtas_event_log_v6_epow *epow;
435 struct epow_extended_log *new_epow;
437 entry = g_new(SpaprEventLogEntry, 1);
438 new_epow = g_malloc0(sizeof(*new_epow));
439 entry->extended_log = new_epow;
441 v6hdr = &new_epow->v6hdr;
442 maina = &new_epow->maina;
443 mainb = &new_epow->mainb;
444 epow = &new_epow->epow;
446 entry->summary = RTAS_LOG_VERSION_6
447 | RTAS_LOG_SEVERITY_EVENT
448 | RTAS_LOG_DISPOSITION_NOT_RECOVERED
449 | RTAS_LOG_OPTIONAL_PART_PRESENT
450 | RTAS_LOG_TYPE_EPOW;
451 entry->extended_length = sizeof(*new_epow);
453 spapr_init_v6hdr(v6hdr);
454 spapr_init_maina(maina, 3 /* Main-A, Main-B and EPOW */);
456 mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
457 mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
458 /* FIXME: section version, subtype and creator id? */
459 mainb->subsystem_id = 0xa0; /* External environment */
460 mainb->event_severity = 0x00; /* Informational / non-error */
461 mainb->event_subtype = 0xd0; /* Normal shutdown */
463 epow->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_EPOW);
464 epow->hdr.section_length = cpu_to_be16(sizeof(*epow));
465 epow->hdr.section_version = 2; /* includes extended modifier */
466 /* FIXME: section subtype and creator id? */
467 epow->sensor_value = RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN;
468 epow->event_modifier = RTAS_LOG_V6_EPOW_MODIFIER_NORMAL;
469 epow->extended_modifier = RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC;
471 rtas_event_log_queue(spapr, entry);
473 qemu_irq_pulse(spapr_qirq(spapr,
474 rtas_event_log_to_irq(spapr, RTAS_LOG_TYPE_EPOW)));
477 static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
478 SpaprDrcType drc_type,
479 union drc_identifier *drc_id)
481 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
482 SpaprEventLogEntry *entry;
483 struct hp_extended_log *new_hp;
484 struct rtas_event_log_v6 *v6hdr;
485 struct rtas_event_log_v6_maina *maina;
486 struct rtas_event_log_v6_mainb *mainb;
487 struct rtas_event_log_v6_hp *hp;
489 entry = g_new(SpaprEventLogEntry, 1);
490 new_hp = g_malloc0(sizeof(struct hp_extended_log));
491 entry->extended_log = new_hp;
493 v6hdr = &new_hp->v6hdr;
494 maina = &new_hp->maina;
495 mainb = &new_hp->mainb;
496 hp = &new_hp->hp;
498 entry->summary = RTAS_LOG_VERSION_6
499 | RTAS_LOG_SEVERITY_EVENT
500 | RTAS_LOG_DISPOSITION_NOT_RECOVERED
501 | RTAS_LOG_OPTIONAL_PART_PRESENT
502 | RTAS_LOG_INITIATOR_HOTPLUG
503 | RTAS_LOG_TYPE_HOTPLUG;
504 entry->extended_length = sizeof(*new_hp);
506 spapr_init_v6hdr(v6hdr);
507 spapr_init_maina(maina, 3 /* Main-A, Main-B, HP */);
509 mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
510 mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
511 mainb->subsystem_id = 0x80; /* External environment */
512 mainb->event_severity = 0x00; /* Informational / non-error */
513 mainb->event_subtype = 0x00; /* Normal shutdown */
515 hp->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_HOTPLUG);
516 hp->hdr.section_length = cpu_to_be16(sizeof(*hp));
517 hp->hdr.section_version = 1; /* includes extended modifier */
518 hp->hotplug_action = hp_action;
519 hp->hotplug_identifier = hp_id;
521 switch (drc_type) {
522 case SPAPR_DR_CONNECTOR_TYPE_PCI:
523 hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PCI;
524 break;
525 case SPAPR_DR_CONNECTOR_TYPE_LMB:
526 hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_MEMORY;
527 break;
528 case SPAPR_DR_CONNECTOR_TYPE_CPU:
529 hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_CPU;
530 break;
531 case SPAPR_DR_CONNECTOR_TYPE_PHB:
532 hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PHB;
533 break;
534 default:
535 /* we shouldn't be signaling hotplug events for resources
536 * that don't support them
538 g_assert(false);
539 return;
542 if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT) {
543 hp->drc_id.count = cpu_to_be32(drc_id->count);
544 } else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_INDEX) {
545 hp->drc_id.index = cpu_to_be32(drc_id->index);
546 } else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED) {
547 /* we should not be using count_indexed value unless the guest
548 * supports dedicated hotplug event source
550 g_assert(spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT));
551 hp->drc_id.count_indexed.count =
552 cpu_to_be32(drc_id->count_indexed.count);
553 hp->drc_id.count_indexed.index =
554 cpu_to_be32(drc_id->count_indexed.index);
557 rtas_event_log_queue(spapr, entry);
559 qemu_irq_pulse(spapr_qirq(spapr,
560 rtas_event_log_to_irq(spapr, RTAS_LOG_TYPE_HOTPLUG)));
563 void spapr_hotplug_req_add_by_index(SpaprDrc *drc)
565 SpaprDrcType drc_type = spapr_drc_type(drc);
566 union drc_identifier drc_id;
568 drc_id.index = spapr_drc_index(drc);
569 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX,
570 RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
573 void spapr_hotplug_req_remove_by_index(SpaprDrc *drc)
575 SpaprDrcType drc_type = spapr_drc_type(drc);
576 union drc_identifier drc_id;
578 drc_id.index = spapr_drc_index(drc);
579 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX,
580 RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
583 void spapr_hotplug_req_add_by_count(SpaprDrcType drc_type,
584 uint32_t count)
586 union drc_identifier drc_id;
588 drc_id.count = count;
589 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT,
590 RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
593 void spapr_hotplug_req_remove_by_count(SpaprDrcType drc_type,
594 uint32_t count)
596 union drc_identifier drc_id;
598 drc_id.count = count;
599 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT,
600 RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
603 void spapr_hotplug_req_add_by_count_indexed(SpaprDrcType drc_type,
604 uint32_t count, uint32_t index)
606 union drc_identifier drc_id;
608 drc_id.count_indexed.count = count;
609 drc_id.count_indexed.index = index;
610 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED,
611 RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
614 void spapr_hotplug_req_remove_by_count_indexed(SpaprDrcType drc_type,
615 uint32_t count, uint32_t index)
617 union drc_identifier drc_id;
619 drc_id.count_indexed.count = count;
620 drc_id.count_indexed.index = index;
621 spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED,
622 RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
625 static void check_exception(PowerPCCPU *cpu, SpaprMachineState *spapr,
626 uint32_t token, uint32_t nargs,
627 target_ulong args,
628 uint32_t nret, target_ulong rets)
630 uint32_t mask, buf, len, event_len;
631 uint64_t xinfo;
632 SpaprEventLogEntry *event;
633 struct rtas_error_log header;
634 int i;
636 if ((nargs < 6) || (nargs > 7) || nret != 1) {
637 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
638 return;
641 xinfo = rtas_ld(args, 1);
642 mask = rtas_ld(args, 2);
643 buf = rtas_ld(args, 4);
644 len = rtas_ld(args, 5);
645 if (nargs == 7) {
646 xinfo |= (uint64_t)rtas_ld(args, 6) << 32;
649 event = rtas_event_log_dequeue(spapr, mask);
650 if (!event) {
651 goto out_no_events;
654 event_len = event->extended_length + sizeof(header);
656 if (event_len < len) {
657 len = event_len;
660 header.summary = cpu_to_be32(event->summary);
661 header.extended_length = cpu_to_be32(event->extended_length);
662 cpu_physical_memory_write(buf, &header, sizeof(header));
663 cpu_physical_memory_write(buf + sizeof(header), event->extended_log,
664 event->extended_length);
665 rtas_st(rets, 0, RTAS_OUT_SUCCESS);
666 g_free(event->extended_log);
667 g_free(event);
669 /* according to PAPR+, the IRQ must be left asserted, or re-asserted, if
670 * there are still pending events to be fetched via check-exception. We
671 * do the latter here, since our code relies on edge-triggered
672 * interrupts.
674 for (i = 0; i < EVENT_CLASS_MAX; i++) {
675 if (rtas_event_log_contains(EVENT_CLASS_MASK(i))) {
676 const SpaprEventSource *source =
677 spapr_event_sources_get_source(spapr->event_sources, i);
679 g_assert(source->enabled);
680 qemu_irq_pulse(spapr_qirq(spapr, source->irq));
684 return;
686 out_no_events:
687 rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
690 static void event_scan(PowerPCCPU *cpu, SpaprMachineState *spapr,
691 uint32_t token, uint32_t nargs,
692 target_ulong args,
693 uint32_t nret, target_ulong rets)
695 if (nargs != 4 || nret != 1) {
696 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
697 return;
699 rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
702 void spapr_clear_pending_events(SpaprMachineState *spapr)
704 SpaprEventLogEntry *entry = NULL, *next_entry;
706 QTAILQ_FOREACH_SAFE(entry, &spapr->pending_events, next, next_entry) {
707 QTAILQ_REMOVE(&spapr->pending_events, entry, next);
708 g_free(entry->extended_log);
709 g_free(entry);
713 void spapr_events_init(SpaprMachineState *spapr)
715 int epow_irq = SPAPR_IRQ_EPOW;
717 if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
718 epow_irq = spapr_irq_findone(spapr, &error_fatal);
721 spapr_irq_claim(spapr, epow_irq, false, &error_fatal);
723 QTAILQ_INIT(&spapr->pending_events);
725 spapr->event_sources = spapr_event_sources_new();
727 spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_EPOW,
728 epow_irq);
730 /* NOTE: if machine supports modern/dedicated hotplug event source,
731 * we add it to the device-tree unconditionally. This means we may
732 * have cases where the source is enabled in QEMU, but unused by the
733 * guest because it does not support modern hotplug events, so we
734 * take care to rely on checking for negotiation of OV5_HP_EVT option
735 * before attempting to use it to signal events, rather than simply
736 * checking that it's enabled.
738 if (spapr->use_hotplug_event_source) {
739 int hp_irq = SPAPR_IRQ_HOTPLUG;
741 if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
742 hp_irq = spapr_irq_findone(spapr, &error_fatal);
745 spapr_irq_claim(spapr, hp_irq, false, &error_fatal);
747 spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_HOT_PLUG,
748 hp_irq);
751 spapr->epow_notifier.notify = spapr_powerdown_req;
752 qemu_register_powerdown_notifier(&spapr->epow_notifier);
753 spapr_rtas_register(RTAS_CHECK_EXCEPTION, "check-exception",
754 check_exception);
755 spapr_rtas_register(RTAS_EVENT_SCAN, "event-scan", event_scan);