spapr/xive: introduce a XIVE interrupt controller
[qemu/ar7.git] / hw / intc / spapr_xive.c
blob5f03adca569146637220a53fad97aff7ff0d4968
1 /*
2 * QEMU PowerPC sPAPR XIVE interrupt controller model
4 * Copyright (c) 2017-2018, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
8 */
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qapi/error.h"
13 #include "qemu/error-report.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "monitor/monitor.h"
17 #include "hw/ppc/spapr.h"
18 #include "hw/ppc/spapr_xive.h"
19 #include "hw/ppc/xive.h"
20 #include "hw/ppc/xive_regs.h"
23 * XIVE Virtualization Controller BAR and Thread Managment BAR that we
24 * use for the ESB pages and the TIMA pages
26 #define SPAPR_XIVE_VC_BASE 0x0006010000000000ull
27 #define SPAPR_XIVE_TM_BASE 0x0006030203180000ull
30 * On sPAPR machines, use a simplified output for the XIVE END
31 * structure dumping only the information related to the OS EQ.
33 static void spapr_xive_end_pic_print_info(sPAPRXive *xive, XiveEND *end,
34 Monitor *mon)
36 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
37 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
38 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
39 uint32_t qentries = 1 << (qsize + 10);
40 uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
41 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
43 monitor_printf(mon, "%3d/%d % 6d/%5d ^%d", nvt,
44 priority, qindex, qentries, qgen);
46 xive_end_queue_pic_print_info(end, 6, mon);
47 monitor_printf(mon, "]");
50 void spapr_xive_pic_print_info(sPAPRXive *xive, Monitor *mon)
52 XiveSource *xsrc = &xive->source;
53 int i;
55 monitor_printf(mon, " LSIN PQ EISN CPU/PRIO EQ\n");
57 for (i = 0; i < xive->nr_irqs; i++) {
58 uint8_t pq = xive_source_esb_get(xsrc, i);
59 XiveEAS *eas = &xive->eat[i];
61 if (!xive_eas_is_valid(eas)) {
62 continue;
65 monitor_printf(mon, " %08x %s %c%c%c %s %08x ", i,
66 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
67 pq & XIVE_ESB_VAL_P ? 'P' : '-',
68 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
69 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ',
70 xive_eas_is_masked(eas) ? "M" : " ",
71 (int) xive_get_field64(EAS_END_DATA, eas->w));
73 if (!xive_eas_is_masked(eas)) {
74 uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
75 XiveEND *end;
77 assert(end_idx < xive->nr_ends);
78 end = &xive->endt[end_idx];
80 if (xive_end_is_valid(end)) {
81 spapr_xive_end_pic_print_info(xive, end, mon);
84 monitor_printf(mon, "\n");
88 static void spapr_xive_map_mmio(sPAPRXive *xive)
90 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base);
91 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base);
92 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base);
95 static void spapr_xive_end_reset(XiveEND *end)
97 memset(end, 0, sizeof(*end));
99 /* switch off the escalation and notification ESBs */
100 end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q);
103 static void spapr_xive_reset(void *dev)
105 sPAPRXive *xive = SPAPR_XIVE(dev);
106 int i;
109 * The XiveSource has its own reset handler, which mask off all
110 * IRQs (!P|Q)
113 /* Mask all valid EASs in the IRQ number space. */
114 for (i = 0; i < xive->nr_irqs; i++) {
115 XiveEAS *eas = &xive->eat[i];
116 if (xive_eas_is_valid(eas)) {
117 eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED);
118 } else {
119 eas->w = 0;
123 /* Clear all ENDs */
124 for (i = 0; i < xive->nr_ends; i++) {
125 spapr_xive_end_reset(&xive->endt[i]);
129 static void spapr_xive_instance_init(Object *obj)
131 sPAPRXive *xive = SPAPR_XIVE(obj);
133 object_initialize(&xive->source, sizeof(xive->source), TYPE_XIVE_SOURCE);
134 object_property_add_child(obj, "source", OBJECT(&xive->source), NULL);
136 object_initialize(&xive->end_source, sizeof(xive->end_source),
137 TYPE_XIVE_END_SOURCE);
138 object_property_add_child(obj, "end_source", OBJECT(&xive->end_source),
139 NULL);
142 static void spapr_xive_realize(DeviceState *dev, Error **errp)
144 sPAPRXive *xive = SPAPR_XIVE(dev);
145 XiveSource *xsrc = &xive->source;
146 XiveENDSource *end_xsrc = &xive->end_source;
147 Error *local_err = NULL;
149 if (!xive->nr_irqs) {
150 error_setg(errp, "Number of interrupt needs to be greater 0");
151 return;
154 if (!xive->nr_ends) {
155 error_setg(errp, "Number of interrupt needs to be greater 0");
156 return;
160 * Initialize the internal sources, for IPIs and virtual devices.
162 object_property_set_int(OBJECT(xsrc), xive->nr_irqs, "nr-irqs",
163 &error_fatal);
164 object_property_add_const_link(OBJECT(xsrc), "xive", OBJECT(xive),
165 &error_fatal);
166 object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err);
167 if (local_err) {
168 error_propagate(errp, local_err);
169 return;
173 * Initialize the END ESB source
175 object_property_set_int(OBJECT(end_xsrc), xive->nr_irqs, "nr-ends",
176 &error_fatal);
177 object_property_add_const_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
178 &error_fatal);
179 object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err);
180 if (local_err) {
181 error_propagate(errp, local_err);
182 return;
185 /* Set the mapping address of the END ESB pages after the source ESBs */
186 xive->end_base = xive->vc_base + (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
189 * Allocate the routing tables
191 xive->eat = g_new0(XiveEAS, xive->nr_irqs);
192 xive->endt = g_new0(XiveEND, xive->nr_ends);
194 /* TIMA initialization */
195 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, xive,
196 "xive.tima", 4ull << TM_SHIFT);
198 /* Define all XIVE MMIO regions on SysBus */
199 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio);
200 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
201 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio);
203 /* Map all regions */
204 spapr_xive_map_mmio(xive);
206 qemu_register_reset(spapr_xive_reset, dev);
209 static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk,
210 uint32_t eas_idx, XiveEAS *eas)
212 sPAPRXive *xive = SPAPR_XIVE(xrtr);
214 if (eas_idx >= xive->nr_irqs) {
215 return -1;
218 *eas = xive->eat[eas_idx];
219 return 0;
222 static int spapr_xive_get_end(XiveRouter *xrtr,
223 uint8_t end_blk, uint32_t end_idx, XiveEND *end)
225 sPAPRXive *xive = SPAPR_XIVE(xrtr);
227 if (end_idx >= xive->nr_ends) {
228 return -1;
231 memcpy(end, &xive->endt[end_idx], sizeof(XiveEND));
232 return 0;
235 static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk,
236 uint32_t end_idx, XiveEND *end,
237 uint8_t word_number)
239 sPAPRXive *xive = SPAPR_XIVE(xrtr);
241 if (end_idx >= xive->nr_ends) {
242 return -1;
245 memcpy(&xive->endt[end_idx], end, sizeof(XiveEND));
246 return 0;
249 static const VMStateDescription vmstate_spapr_xive_end = {
250 .name = TYPE_SPAPR_XIVE "/end",
251 .version_id = 1,
252 .minimum_version_id = 1,
253 .fields = (VMStateField []) {
254 VMSTATE_UINT32(w0, XiveEND),
255 VMSTATE_UINT32(w1, XiveEND),
256 VMSTATE_UINT32(w2, XiveEND),
257 VMSTATE_UINT32(w3, XiveEND),
258 VMSTATE_UINT32(w4, XiveEND),
259 VMSTATE_UINT32(w5, XiveEND),
260 VMSTATE_UINT32(w6, XiveEND),
261 VMSTATE_UINT32(w7, XiveEND),
262 VMSTATE_END_OF_LIST()
266 static const VMStateDescription vmstate_spapr_xive_eas = {
267 .name = TYPE_SPAPR_XIVE "/eas",
268 .version_id = 1,
269 .minimum_version_id = 1,
270 .fields = (VMStateField []) {
271 VMSTATE_UINT64(w, XiveEAS),
272 VMSTATE_END_OF_LIST()
276 static const VMStateDescription vmstate_spapr_xive = {
277 .name = TYPE_SPAPR_XIVE,
278 .version_id = 1,
279 .minimum_version_id = 1,
280 .fields = (VMStateField[]) {
281 VMSTATE_UINT32_EQUAL(nr_irqs, sPAPRXive, NULL),
282 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, sPAPRXive, nr_irqs,
283 vmstate_spapr_xive_eas, XiveEAS),
284 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, sPAPRXive, nr_ends,
285 vmstate_spapr_xive_end, XiveEND),
286 VMSTATE_END_OF_LIST()
290 static Property spapr_xive_properties[] = {
291 DEFINE_PROP_UINT32("nr-irqs", sPAPRXive, nr_irqs, 0),
292 DEFINE_PROP_UINT32("nr-ends", sPAPRXive, nr_ends, 0),
293 DEFINE_PROP_UINT64("vc-base", sPAPRXive, vc_base, SPAPR_XIVE_VC_BASE),
294 DEFINE_PROP_UINT64("tm-base", sPAPRXive, tm_base, SPAPR_XIVE_TM_BASE),
295 DEFINE_PROP_END_OF_LIST(),
298 static void spapr_xive_class_init(ObjectClass *klass, void *data)
300 DeviceClass *dc = DEVICE_CLASS(klass);
301 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
303 dc->desc = "sPAPR XIVE Interrupt Controller";
304 dc->props = spapr_xive_properties;
305 dc->realize = spapr_xive_realize;
306 dc->vmsd = &vmstate_spapr_xive;
308 xrc->get_eas = spapr_xive_get_eas;
309 xrc->get_end = spapr_xive_get_end;
310 xrc->write_end = spapr_xive_write_end;
313 static const TypeInfo spapr_xive_info = {
314 .name = TYPE_SPAPR_XIVE,
315 .parent = TYPE_XIVE_ROUTER,
316 .instance_init = spapr_xive_instance_init,
317 .instance_size = sizeof(sPAPRXive),
318 .class_init = spapr_xive_class_init,
321 static void spapr_xive_register_types(void)
323 type_register_static(&spapr_xive_info);
326 type_init(spapr_xive_register_types)
328 bool spapr_xive_irq_claim(sPAPRXive *xive, uint32_t lisn, bool lsi)
330 XiveSource *xsrc = &xive->source;
332 if (lisn >= xive->nr_irqs) {
333 return false;
336 xive->eat[lisn].w |= cpu_to_be64(EAS_VALID);
337 xive_source_irq_set(xsrc, lisn, lsi);
338 return true;
341 bool spapr_xive_irq_free(sPAPRXive *xive, uint32_t lisn)
343 XiveSource *xsrc = &xive->source;
345 if (lisn >= xive->nr_irqs) {
346 return false;
349 xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID);
350 xive_source_irq_set(xsrc, lisn, false);
351 return true;
354 qemu_irq spapr_xive_qirq(sPAPRXive *xive, uint32_t lisn)
356 XiveSource *xsrc = &xive->source;
358 if (lisn >= xive->nr_irqs) {
359 return NULL;
362 /* The sPAPR machine/device should have claimed the IRQ before */
363 assert(xive_eas_is_valid(&xive->eat[lisn]));
365 return xive_source_qirq(xsrc, lisn);