hw/intc/arm_gicv3_its: Factor out "is intid a valid LPI ID?"
[qemu/ar7.git] / hw / intc / arm_gicv3_its.c
bloba2462098445ef39fe5690d745fbdb57bc5fe8406
1 /*
2 * ITS emulation for a GICv3-based system
4 * Copyright Linaro.org 2021
6 * Authors:
7 * Shashi Mallela <shashi.mallela@linaro.org>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10 * option) any later version. See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "trace.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
23 typedef struct GICv3ITSClass GICv3ITSClass;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
26 ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
28 struct GICv3ITSClass {
29 GICv3ITSCommonClass parent_class;
30 void (*parent_reset)(DeviceState *dev);
34 * This is an internal enum used to distinguish between LPI triggered
35 * via command queue and LPI triggered via gits_translater write.
37 typedef enum ItsCmdType {
38 NONE = 0, /* internal indication for GITS_TRANSLATER write */
39 CLEAR = 1,
40 DISCARD = 2,
41 INTERRUPT = 3,
42 } ItsCmdType;
44 typedef struct DTEntry {
45 bool valid;
46 unsigned size;
47 uint64_t ittaddr;
48 } DTEntry;
50 typedef struct CTEntry {
51 bool valid;
52 uint32_t rdbase;
53 } CTEntry;
55 typedef struct ITEntry {
56 bool valid;
57 int inttype;
58 uint32_t intid;
59 uint32_t doorbell;
60 uint32_t icid;
61 uint32_t vpeid;
62 } ITEntry;
66 * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
67 * if a command parameter is not correct. These include both "stall
68 * processing of the command queue" and "ignore this command, and
69 * keep processing the queue". In our implementation we choose that
70 * memory transaction errors reading the command packet provoke a
71 * stall, but errors in parameters cause us to ignore the command
72 * and continue processing.
73 * The process_* functions which handle individual ITS commands all
74 * return an ItsCmdResult which tells process_cmdq() whether it should
75 * stall or keep going.
77 typedef enum ItsCmdResult {
78 CMD_STALL = 0,
79 CMD_CONTINUE = 1,
80 } ItsCmdResult;
82 static inline bool intid_in_lpi_range(uint32_t id)
84 return id >= GICV3_LPI_INTID_START &&
85 id < (1 << (GICD_TYPER_IDBITS + 1));
88 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
90 uint64_t result = 0;
92 switch (page_sz) {
93 case GITS_PAGE_SIZE_4K:
94 case GITS_PAGE_SIZE_16K:
95 result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
96 break;
98 case GITS_PAGE_SIZE_64K:
99 result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
100 result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
101 break;
103 default:
104 break;
106 return result;
109 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
110 uint32_t idx, MemTxResult *res)
113 * Given a TableDesc describing one of the ITS in-guest-memory
114 * tables and an index into it, return the guest address
115 * corresponding to that table entry.
116 * If there was a memory error reading the L1 table of an
117 * indirect table, *res is set accordingly, and we return -1.
118 * If the L1 table entry is marked not valid, we return -1 with
119 * *res set to MEMTX_OK.
121 * The specification defines the format of level 1 entries of a
122 * 2-level table, but the format of level 2 entries and the format
123 * of flat-mapped tables is IMPDEF.
125 AddressSpace *as = &s->gicv3->dma_as;
126 uint32_t l2idx;
127 uint64_t l2;
128 uint32_t num_l2_entries;
130 *res = MEMTX_OK;
132 if (!td->indirect) {
133 /* Single level table */
134 return td->base_addr + idx * td->entry_sz;
137 /* Two level table */
138 l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
140 l2 = address_space_ldq_le(as,
141 td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
142 MEMTXATTRS_UNSPECIFIED, res);
143 if (*res != MEMTX_OK) {
144 return -1;
146 if (!(l2 & L2_TABLE_VALID_MASK)) {
147 return -1;
150 num_l2_entries = td->page_sz / td->entry_sz;
151 return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
155 * Read the Collection Table entry at index @icid. On success (including
156 * successfully determining that there is no valid CTE for this index),
157 * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
158 * If there is an error reading memory then we return the error code.
160 static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
162 AddressSpace *as = &s->gicv3->dma_as;
163 MemTxResult res = MEMTX_OK;
164 uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
165 uint64_t cteval;
167 if (entry_addr == -1) {
168 /* No L2 table entry, i.e. no valid CTE, or a memory error */
169 cte->valid = false;
170 goto out;
173 cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
174 if (res != MEMTX_OK) {
175 goto out;
177 cte->valid = FIELD_EX64(cteval, CTE, VALID);
178 cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
179 out:
180 if (res != MEMTX_OK) {
181 trace_gicv3_its_cte_read_fault(icid);
182 } else {
183 trace_gicv3_its_cte_read(icid, cte->valid, cte->rdbase);
185 return res;
189 * Update the Interrupt Table entry at index @evinted in the table specified
190 * by the dte @dte. Returns true on success, false if there was a memory
191 * access error.
193 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
194 const ITEntry *ite)
196 AddressSpace *as = &s->gicv3->dma_as;
197 MemTxResult res = MEMTX_OK;
198 hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
199 uint64_t itel = 0;
200 uint32_t iteh = 0;
202 trace_gicv3_its_ite_write(dte->ittaddr, eventid, ite->valid,
203 ite->inttype, ite->intid, ite->icid,
204 ite->vpeid, ite->doorbell);
206 if (ite->valid) {
207 itel = FIELD_DP64(itel, ITE_L, VALID, 1);
208 itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
209 itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
210 itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
211 itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
212 iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
215 address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
216 if (res != MEMTX_OK) {
217 return false;
219 address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
220 return res == MEMTX_OK;
224 * Read the Interrupt Table entry at index @eventid from the table specified
225 * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
226 * struct @ite accordingly. If there is an error reading memory then we return
227 * the error code.
229 static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
230 const DTEntry *dte, ITEntry *ite)
232 AddressSpace *as = &s->gicv3->dma_as;
233 MemTxResult res = MEMTX_OK;
234 uint64_t itel;
235 uint32_t iteh;
236 hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
238 itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
239 if (res != MEMTX_OK) {
240 trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
241 return res;
244 iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
245 if (res != MEMTX_OK) {
246 trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
247 return res;
250 ite->valid = FIELD_EX64(itel, ITE_L, VALID);
251 ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
252 ite->intid = FIELD_EX64(itel, ITE_L, INTID);
253 ite->icid = FIELD_EX64(itel, ITE_L, ICID);
254 ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
255 ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
256 trace_gicv3_its_ite_read(dte->ittaddr, eventid, ite->valid,
257 ite->inttype, ite->intid, ite->icid,
258 ite->vpeid, ite->doorbell);
259 return MEMTX_OK;
263 * Read the Device Table entry at index @devid. On success (including
264 * successfully determining that there is no valid DTE for this index),
265 * we return MEMTX_OK and populate the DTEntry struct accordingly.
266 * If there is an error reading memory then we return the error code.
268 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
270 MemTxResult res = MEMTX_OK;
271 AddressSpace *as = &s->gicv3->dma_as;
272 uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
273 uint64_t dteval;
275 if (entry_addr == -1) {
276 /* No L2 table entry, i.e. no valid DTE, or a memory error */
277 dte->valid = false;
278 goto out;
280 dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
281 if (res != MEMTX_OK) {
282 goto out;
284 dte->valid = FIELD_EX64(dteval, DTE, VALID);
285 dte->size = FIELD_EX64(dteval, DTE, SIZE);
286 /* DTE word field stores bits [51:8] of the ITT address */
287 dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
288 out:
289 if (res != MEMTX_OK) {
290 trace_gicv3_its_dte_read_fault(devid);
291 } else {
292 trace_gicv3_its_dte_read(devid, dte->valid, dte->size, dte->ittaddr);
294 return res;
298 * This function handles the processing of following commands based on
299 * the ItsCmdType parameter passed:-
300 * 1. triggering of lpi interrupt translation via ITS INT command
301 * 2. triggering of lpi interrupt translation via gits_translater register
302 * 3. handling of ITS CLEAR command
303 * 4. handling of ITS DISCARD command
305 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
306 uint32_t eventid, ItsCmdType cmd)
308 uint64_t num_eventids;
309 DTEntry dte;
310 CTEntry cte;
311 ITEntry ite;
313 if (devid >= s->dt.num_entries) {
314 qemu_log_mask(LOG_GUEST_ERROR,
315 "%s: invalid command attributes: devid %d>=%d",
316 __func__, devid, s->dt.num_entries);
317 return CMD_CONTINUE;
320 if (get_dte(s, devid, &dte) != MEMTX_OK) {
321 return CMD_STALL;
323 if (!dte.valid) {
324 qemu_log_mask(LOG_GUEST_ERROR,
325 "%s: invalid command attributes: "
326 "invalid dte for %d\n", __func__, devid);
327 return CMD_CONTINUE;
330 num_eventids = 1ULL << (dte.size + 1);
331 if (eventid >= num_eventids) {
332 qemu_log_mask(LOG_GUEST_ERROR,
333 "%s: invalid command attributes: eventid %d >= %"
334 PRId64 "\n",
335 __func__, eventid, num_eventids);
336 return CMD_CONTINUE;
339 if (get_ite(s, eventid, &dte, &ite) != MEMTX_OK) {
340 return CMD_STALL;
343 if (!ite.valid || ite.inttype != ITE_INTTYPE_PHYSICAL) {
344 qemu_log_mask(LOG_GUEST_ERROR,
345 "%s: invalid command attributes: invalid ITE\n",
346 __func__);
347 return CMD_CONTINUE;
350 if (ite.icid >= s->ct.num_entries) {
351 qemu_log_mask(LOG_GUEST_ERROR,
352 "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
353 __func__, ite.icid);
354 return CMD_CONTINUE;
357 if (get_cte(s, ite.icid, &cte) != MEMTX_OK) {
358 return CMD_STALL;
360 if (!cte.valid) {
361 qemu_log_mask(LOG_GUEST_ERROR,
362 "%s: invalid command attributes: invalid CTE\n",
363 __func__);
364 return CMD_CONTINUE;
368 * Current implementation only supports rdbase == procnum
369 * Hence rdbase physical address is ignored
371 if (cte.rdbase >= s->gicv3->num_cpu) {
372 return CMD_CONTINUE;
375 if ((cmd == CLEAR) || (cmd == DISCARD)) {
376 gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 0);
377 } else {
378 gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 1);
381 if (cmd == DISCARD) {
382 ITEntry ite = {};
383 /* remove mapping from interrupt translation table */
384 ite.valid = false;
385 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
387 return CMD_CONTINUE;
390 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
391 ItsCmdType cmd)
393 uint32_t devid, eventid;
395 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
396 eventid = cmdpkt[1] & EVENTID_MASK;
397 switch (cmd) {
398 case INTERRUPT:
399 trace_gicv3_its_cmd_int(devid, eventid);
400 break;
401 case CLEAR:
402 trace_gicv3_its_cmd_clear(devid, eventid);
403 break;
404 case DISCARD:
405 trace_gicv3_its_cmd_discard(devid, eventid);
406 break;
407 default:
408 g_assert_not_reached();
410 return do_process_its_cmd(s, devid, eventid, cmd);
413 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
414 bool ignore_pInt)
416 uint32_t devid, eventid;
417 uint32_t pIntid = 0;
418 uint64_t num_eventids;
419 uint16_t icid = 0;
420 DTEntry dte;
421 ITEntry ite;
423 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
424 eventid = cmdpkt[1] & EVENTID_MASK;
425 icid = cmdpkt[2] & ICID_MASK;
427 if (ignore_pInt) {
428 pIntid = eventid;
429 trace_gicv3_its_cmd_mapi(devid, eventid, icid);
430 } else {
431 pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
432 trace_gicv3_its_cmd_mapti(devid, eventid, icid, pIntid);
435 if (devid >= s->dt.num_entries) {
436 qemu_log_mask(LOG_GUEST_ERROR,
437 "%s: invalid command attributes: devid %d>=%d",
438 __func__, devid, s->dt.num_entries);
439 return CMD_CONTINUE;
442 if (get_dte(s, devid, &dte) != MEMTX_OK) {
443 return CMD_STALL;
445 num_eventids = 1ULL << (dte.size + 1);
447 if (icid >= s->ct.num_entries) {
448 qemu_log_mask(LOG_GUEST_ERROR,
449 "%s: invalid ICID 0x%x >= 0x%x\n",
450 __func__, icid, s->ct.num_entries);
451 return CMD_CONTINUE;
454 if (!dte.valid) {
455 qemu_log_mask(LOG_GUEST_ERROR,
456 "%s: no valid DTE for devid 0x%x\n", __func__, devid);
457 return CMD_CONTINUE;
460 if (eventid >= num_eventids) {
461 qemu_log_mask(LOG_GUEST_ERROR,
462 "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n",
463 __func__, eventid, num_eventids);
464 return CMD_CONTINUE;
467 if (!intid_in_lpi_range(pIntid)) {
468 qemu_log_mask(LOG_GUEST_ERROR,
469 "%s: invalid interrupt ID 0x%x\n", __func__, pIntid);
470 return CMD_CONTINUE;
473 /* add ite entry to interrupt translation table */
474 ite.valid = true;
475 ite.inttype = ITE_INTTYPE_PHYSICAL;
476 ite.intid = pIntid;
477 ite.icid = icid;
478 ite.doorbell = INTID_SPURIOUS;
479 ite.vpeid = 0;
480 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
484 * Update the Collection Table entry for @icid to @cte. Returns true
485 * on success, false if there was a memory access error.
487 static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
489 AddressSpace *as = &s->gicv3->dma_as;
490 uint64_t entry_addr;
491 uint64_t cteval = 0;
492 MemTxResult res = MEMTX_OK;
494 trace_gicv3_its_cte_write(icid, cte->valid, cte->rdbase);
496 if (cte->valid) {
497 /* add mapping entry to collection table */
498 cteval = FIELD_DP64(cteval, CTE, VALID, 1);
499 cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
502 entry_addr = table_entry_addr(s, &s->ct, icid, &res);
503 if (res != MEMTX_OK) {
504 /* memory access error: stall */
505 return false;
507 if (entry_addr == -1) {
508 /* No L2 table for this index: discard write and continue */
509 return true;
512 address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
513 return res == MEMTX_OK;
516 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
518 uint16_t icid;
519 CTEntry cte;
521 icid = cmdpkt[2] & ICID_MASK;
522 cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
523 if (cte.valid) {
524 cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
525 cte.rdbase &= RDBASE_PROCNUM_MASK;
526 } else {
527 cte.rdbase = 0;
529 trace_gicv3_its_cmd_mapc(icid, cte.rdbase, cte.valid);
531 if (icid >= s->ct.num_entries) {
532 qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%x\n", icid);
533 return CMD_CONTINUE;
535 if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) {
536 qemu_log_mask(LOG_GUEST_ERROR,
537 "ITS MAPC: invalid RDBASE %u\n", cte.rdbase);
538 return CMD_CONTINUE;
541 return update_cte(s, icid, &cte) ? CMD_CONTINUE : CMD_STALL;
545 * Update the Device Table entry for @devid to @dte. Returns true
546 * on success, false if there was a memory access error.
548 static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
550 AddressSpace *as = &s->gicv3->dma_as;
551 uint64_t entry_addr;
552 uint64_t dteval = 0;
553 MemTxResult res = MEMTX_OK;
555 trace_gicv3_its_dte_write(devid, dte->valid, dte->size, dte->ittaddr);
557 if (dte->valid) {
558 /* add mapping entry to device table */
559 dteval = FIELD_DP64(dteval, DTE, VALID, 1);
560 dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
561 dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
564 entry_addr = table_entry_addr(s, &s->dt, devid, &res);
565 if (res != MEMTX_OK) {
566 /* memory access error: stall */
567 return false;
569 if (entry_addr == -1) {
570 /* No L2 table for this index: discard write and continue */
571 return true;
573 address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
574 return res == MEMTX_OK;
577 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
579 uint32_t devid;
580 DTEntry dte;
582 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
583 dte.size = cmdpkt[1] & SIZE_MASK;
584 dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
585 dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
587 trace_gicv3_its_cmd_mapd(devid, dte.size, dte.ittaddr, dte.valid);
589 if (devid >= s->dt.num_entries) {
590 qemu_log_mask(LOG_GUEST_ERROR,
591 "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
592 devid, s->dt.num_entries);
593 return CMD_CONTINUE;
596 if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
597 qemu_log_mask(LOG_GUEST_ERROR,
598 "ITS MAPD: invalid size %d\n", dte.size);
599 return CMD_CONTINUE;
602 return update_dte(s, devid, &dte) ? CMD_CONTINUE : CMD_STALL;
605 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
607 uint64_t rd1, rd2;
609 rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
610 rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
612 trace_gicv3_its_cmd_movall(rd1, rd2);
614 if (rd1 >= s->gicv3->num_cpu) {
615 qemu_log_mask(LOG_GUEST_ERROR,
616 "%s: RDBASE1 %" PRId64
617 " out of range (must be less than %d)\n",
618 __func__, rd1, s->gicv3->num_cpu);
619 return CMD_CONTINUE;
621 if (rd2 >= s->gicv3->num_cpu) {
622 qemu_log_mask(LOG_GUEST_ERROR,
623 "%s: RDBASE2 %" PRId64
624 " out of range (must be less than %d)\n",
625 __func__, rd2, s->gicv3->num_cpu);
626 return CMD_CONTINUE;
629 if (rd1 == rd2) {
630 /* Move to same target must succeed as a no-op */
631 return CMD_CONTINUE;
634 /* Move all pending LPIs from redistributor 1 to redistributor 2 */
635 gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
637 return CMD_CONTINUE;
640 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
642 uint32_t devid, eventid;
643 uint16_t new_icid;
644 uint64_t num_eventids;
645 DTEntry dte;
646 CTEntry old_cte, new_cte;
647 ITEntry old_ite;
649 devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
650 eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
651 new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
653 trace_gicv3_its_cmd_movi(devid, eventid, new_icid);
655 if (devid >= s->dt.num_entries) {
656 qemu_log_mask(LOG_GUEST_ERROR,
657 "%s: invalid command attributes: devid %d>=%d",
658 __func__, devid, s->dt.num_entries);
659 return CMD_CONTINUE;
661 if (get_dte(s, devid, &dte) != MEMTX_OK) {
662 return CMD_STALL;
665 if (!dte.valid) {
666 qemu_log_mask(LOG_GUEST_ERROR,
667 "%s: invalid command attributes: "
668 "invalid dte for %d\n", __func__, devid);
669 return CMD_CONTINUE;
672 num_eventids = 1ULL << (dte.size + 1);
673 if (eventid >= num_eventids) {
674 qemu_log_mask(LOG_GUEST_ERROR,
675 "%s: invalid command attributes: eventid %d >= %"
676 PRId64 "\n",
677 __func__, eventid, num_eventids);
678 return CMD_CONTINUE;
681 if (get_ite(s, eventid, &dte, &old_ite) != MEMTX_OK) {
682 return CMD_STALL;
685 if (!old_ite.valid || old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
686 qemu_log_mask(LOG_GUEST_ERROR,
687 "%s: invalid command attributes: invalid ITE\n",
688 __func__);
689 return CMD_CONTINUE;
692 if (old_ite.icid >= s->ct.num_entries) {
693 qemu_log_mask(LOG_GUEST_ERROR,
694 "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
695 __func__, old_ite.icid);
696 return CMD_CONTINUE;
699 if (new_icid >= s->ct.num_entries) {
700 qemu_log_mask(LOG_GUEST_ERROR,
701 "%s: invalid command attributes: ICID 0x%x\n",
702 __func__, new_icid);
703 return CMD_CONTINUE;
706 if (get_cte(s, old_ite.icid, &old_cte) != MEMTX_OK) {
707 return CMD_STALL;
709 if (!old_cte.valid) {
710 qemu_log_mask(LOG_GUEST_ERROR,
711 "%s: invalid command attributes: "
712 "invalid CTE for old ICID 0x%x\n",
713 __func__, old_ite.icid);
714 return CMD_CONTINUE;
717 if (get_cte(s, new_icid, &new_cte) != MEMTX_OK) {
718 return CMD_STALL;
720 if (!new_cte.valid) {
721 qemu_log_mask(LOG_GUEST_ERROR,
722 "%s: invalid command attributes: "
723 "invalid CTE for new ICID 0x%x\n",
724 __func__, new_icid);
725 return CMD_CONTINUE;
728 if (old_cte.rdbase >= s->gicv3->num_cpu) {
729 qemu_log_mask(LOG_GUEST_ERROR,
730 "%s: CTE has invalid rdbase 0x%x\n",
731 __func__, old_cte.rdbase);
732 return CMD_CONTINUE;
735 if (new_cte.rdbase >= s->gicv3->num_cpu) {
736 qemu_log_mask(LOG_GUEST_ERROR,
737 "%s: CTE has invalid rdbase 0x%x\n",
738 __func__, new_cte.rdbase);
739 return CMD_CONTINUE;
742 if (old_cte.rdbase != new_cte.rdbase) {
743 /* Move the LPI from the old redistributor to the new one */
744 gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
745 &s->gicv3->cpu[new_cte.rdbase],
746 old_ite.intid);
749 /* Update the ICID field in the interrupt translation table entry */
750 old_ite.icid = new_icid;
751 return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE : CMD_STALL;
755 * Current implementation blocks until all
756 * commands are processed
758 static void process_cmdq(GICv3ITSState *s)
760 uint32_t wr_offset = 0;
761 uint32_t rd_offset = 0;
762 uint32_t cq_offset = 0;
763 AddressSpace *as = &s->gicv3->dma_as;
764 uint8_t cmd;
765 int i;
767 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
768 return;
771 wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
773 if (wr_offset >= s->cq.num_entries) {
774 qemu_log_mask(LOG_GUEST_ERROR,
775 "%s: invalid write offset "
776 "%d\n", __func__, wr_offset);
777 return;
780 rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
782 if (rd_offset >= s->cq.num_entries) {
783 qemu_log_mask(LOG_GUEST_ERROR,
784 "%s: invalid read offset "
785 "%d\n", __func__, rd_offset);
786 return;
789 while (wr_offset != rd_offset) {
790 ItsCmdResult result = CMD_CONTINUE;
791 void *hostmem;
792 hwaddr buflen;
793 uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
795 cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
797 buflen = GITS_CMDQ_ENTRY_SIZE;
798 hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
799 &buflen, false, MEMTXATTRS_UNSPECIFIED);
800 if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
801 if (hostmem) {
802 address_space_unmap(as, hostmem, buflen, false, 0);
804 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
805 qemu_log_mask(LOG_GUEST_ERROR,
806 "%s: could not read command at 0x%" PRIx64 "\n",
807 __func__, s->cq.base_addr + cq_offset);
808 break;
810 for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
811 cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
813 address_space_unmap(as, hostmem, buflen, false, 0);
815 cmd = cmdpkt[0] & CMD_MASK;
817 trace_gicv3_its_process_command(rd_offset, cmd);
819 switch (cmd) {
820 case GITS_CMD_INT:
821 result = process_its_cmd(s, cmdpkt, INTERRUPT);
822 break;
823 case GITS_CMD_CLEAR:
824 result = process_its_cmd(s, cmdpkt, CLEAR);
825 break;
826 case GITS_CMD_SYNC:
828 * Current implementation makes a blocking synchronous call
829 * for every command issued earlier, hence the internal state
830 * is already consistent by the time SYNC command is executed.
831 * Hence no further processing is required for SYNC command.
833 trace_gicv3_its_cmd_sync();
834 break;
835 case GITS_CMD_MAPD:
836 result = process_mapd(s, cmdpkt);
837 break;
838 case GITS_CMD_MAPC:
839 result = process_mapc(s, cmdpkt);
840 break;
841 case GITS_CMD_MAPTI:
842 result = process_mapti(s, cmdpkt, false);
843 break;
844 case GITS_CMD_MAPI:
845 result = process_mapti(s, cmdpkt, true);
846 break;
847 case GITS_CMD_DISCARD:
848 result = process_its_cmd(s, cmdpkt, DISCARD);
849 break;
850 case GITS_CMD_INV:
851 case GITS_CMD_INVALL:
853 * Current implementation doesn't cache any ITS tables,
854 * but the calculated lpi priority information. We only
855 * need to trigger lpi priority re-calculation to be in
856 * sync with LPI config table or pending table changes.
858 trace_gicv3_its_cmd_inv();
859 for (i = 0; i < s->gicv3->num_cpu; i++) {
860 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
862 break;
863 case GITS_CMD_MOVI:
864 result = process_movi(s, cmdpkt);
865 break;
866 case GITS_CMD_MOVALL:
867 result = process_movall(s, cmdpkt);
868 break;
869 default:
870 trace_gicv3_its_cmd_unknown(cmd);
871 break;
873 if (result == CMD_CONTINUE) {
874 rd_offset++;
875 rd_offset %= s->cq.num_entries;
876 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
877 } else {
878 /* CMD_STALL */
879 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
880 qemu_log_mask(LOG_GUEST_ERROR,
881 "%s: 0x%x cmd processing failed, stalling\n",
882 __func__, cmd);
883 break;
889 * This function extracts the ITS Device and Collection table specific
890 * parameters (like base_addr, size etc) from GITS_BASER register.
891 * It is called during ITS enable and also during post_load migration
893 static void extract_table_params(GICv3ITSState *s)
895 uint16_t num_pages = 0;
896 uint8_t page_sz_type;
897 uint8_t type;
898 uint32_t page_sz = 0;
899 uint64_t value;
901 for (int i = 0; i < 8; i++) {
902 TableDesc *td;
903 int idbits;
905 value = s->baser[i];
907 if (!value) {
908 continue;
911 page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
913 switch (page_sz_type) {
914 case 0:
915 page_sz = GITS_PAGE_SIZE_4K;
916 break;
918 case 1:
919 page_sz = GITS_PAGE_SIZE_16K;
920 break;
922 case 2:
923 case 3:
924 page_sz = GITS_PAGE_SIZE_64K;
925 break;
927 default:
928 g_assert_not_reached();
931 num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
933 type = FIELD_EX64(value, GITS_BASER, TYPE);
935 switch (type) {
936 case GITS_BASER_TYPE_DEVICE:
937 td = &s->dt;
938 idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
939 break;
940 case GITS_BASER_TYPE_COLLECTION:
941 td = &s->ct;
942 if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
943 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
944 } else {
945 /* 16-bit CollectionId supported when CIL == 0 */
946 idbits = 16;
948 break;
949 default:
951 * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
952 * ensures we will only see type values corresponding to
953 * the values set up in gicv3_its_reset().
955 g_assert_not_reached();
958 memset(td, 0, sizeof(*td));
960 * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
961 * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
962 * do not have a special case where the GITS_BASER<n>.Valid bit is 0
963 * for the register corresponding to the Collection table but we
964 * still have to process interrupts using non-memory-backed
965 * Collection table entries.)
966 * The specification makes it UNPREDICTABLE to enable the ITS without
967 * marking each BASER<n> as valid. We choose to handle these as if
968 * the table was zero-sized, so commands using the table will fail
969 * and interrupts requested via GITS_TRANSLATER writes will be ignored.
970 * This happens automatically by leaving the num_entries field at
971 * zero, which will be caught by the bounds checks we have before
972 * every table lookup anyway.
974 if (!FIELD_EX64(value, GITS_BASER, VALID)) {
975 continue;
977 td->page_sz = page_sz;
978 td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
979 td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
980 td->base_addr = baser_base_addr(value, page_sz);
981 if (!td->indirect) {
982 td->num_entries = (num_pages * page_sz) / td->entry_sz;
983 } else {
984 td->num_entries = (((num_pages * page_sz) /
985 L1TABLE_ENTRY_SIZE) *
986 (page_sz / td->entry_sz));
988 td->num_entries = MIN(td->num_entries, 1ULL << idbits);
992 static void extract_cmdq_params(GICv3ITSState *s)
994 uint16_t num_pages = 0;
995 uint64_t value = s->cbaser;
997 num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
999 memset(&s->cq, 0 , sizeof(s->cq));
1001 if (FIELD_EX64(value, GITS_CBASER, VALID)) {
1002 s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
1003 GITS_CMDQ_ENTRY_SIZE;
1004 s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
1005 s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
1009 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
1010 uint64_t *data, unsigned size,
1011 MemTxAttrs attrs)
1014 * GITS_TRANSLATER is write-only, and all other addresses
1015 * in the interrupt translation space frame are RES0.
1017 *data = 0;
1018 return MEMTX_OK;
1021 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
1022 uint64_t data, unsigned size,
1023 MemTxAttrs attrs)
1025 GICv3ITSState *s = (GICv3ITSState *)opaque;
1026 bool result = true;
1028 trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
1030 switch (offset) {
1031 case GITS_TRANSLATER:
1032 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1033 result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
1035 break;
1036 default:
1037 break;
1040 if (result) {
1041 return MEMTX_OK;
1042 } else {
1043 return MEMTX_ERROR;
1047 static bool its_writel(GICv3ITSState *s, hwaddr offset,
1048 uint64_t value, MemTxAttrs attrs)
1050 bool result = true;
1051 int index;
1053 switch (offset) {
1054 case GITS_CTLR:
1055 if (value & R_GITS_CTLR_ENABLED_MASK) {
1056 s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
1057 extract_table_params(s);
1058 extract_cmdq_params(s);
1059 process_cmdq(s);
1060 } else {
1061 s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
1063 break;
1064 case GITS_CBASER:
1066 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1067 * already enabled
1069 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1070 s->cbaser = deposit64(s->cbaser, 0, 32, value);
1071 s->creadr = 0;
1073 break;
1074 case GITS_CBASER + 4:
1076 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1077 * already enabled
1079 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1080 s->cbaser = deposit64(s->cbaser, 32, 32, value);
1081 s->creadr = 0;
1083 break;
1084 case GITS_CWRITER:
1085 s->cwriter = deposit64(s->cwriter, 0, 32,
1086 (value & ~R_GITS_CWRITER_RETRY_MASK));
1087 if (s->cwriter != s->creadr) {
1088 process_cmdq(s);
1090 break;
1091 case GITS_CWRITER + 4:
1092 s->cwriter = deposit64(s->cwriter, 32, 32, value);
1093 break;
1094 case GITS_CREADR:
1095 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1096 s->creadr = deposit64(s->creadr, 0, 32,
1097 (value & ~R_GITS_CREADR_STALLED_MASK));
1098 } else {
1099 /* RO register, ignore the write */
1100 qemu_log_mask(LOG_GUEST_ERROR,
1101 "%s: invalid guest write to RO register at offset "
1102 TARGET_FMT_plx "\n", __func__, offset);
1104 break;
1105 case GITS_CREADR + 4:
1106 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1107 s->creadr = deposit64(s->creadr, 32, 32, value);
1108 } else {
1109 /* RO register, ignore the write */
1110 qemu_log_mask(LOG_GUEST_ERROR,
1111 "%s: invalid guest write to RO register at offset "
1112 TARGET_FMT_plx "\n", __func__, offset);
1114 break;
1115 case GITS_BASER ... GITS_BASER + 0x3f:
1117 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1118 * already enabled
1120 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1121 index = (offset - GITS_BASER) / 8;
1123 if (s->baser[index] == 0) {
1124 /* Unimplemented GITS_BASERn: RAZ/WI */
1125 break;
1127 if (offset & 7) {
1128 value <<= 32;
1129 value &= ~GITS_BASER_RO_MASK;
1130 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
1131 s->baser[index] |= value;
1132 } else {
1133 value &= ~GITS_BASER_RO_MASK;
1134 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
1135 s->baser[index] |= value;
1138 break;
1139 case GITS_IIDR:
1140 case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1141 /* RO registers, ignore the write */
1142 qemu_log_mask(LOG_GUEST_ERROR,
1143 "%s: invalid guest write to RO register at offset "
1144 TARGET_FMT_plx "\n", __func__, offset);
1145 break;
1146 default:
1147 result = false;
1148 break;
1150 return result;
1153 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1154 uint64_t *data, MemTxAttrs attrs)
1156 bool result = true;
1157 int index;
1159 switch (offset) {
1160 case GITS_CTLR:
1161 *data = s->ctlr;
1162 break;
1163 case GITS_IIDR:
1164 *data = gicv3_iidr();
1165 break;
1166 case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1167 /* ID registers */
1168 *data = gicv3_idreg(offset - GITS_IDREGS, GICV3_PIDR0_ITS);
1169 break;
1170 case GITS_TYPER:
1171 *data = extract64(s->typer, 0, 32);
1172 break;
1173 case GITS_TYPER + 4:
1174 *data = extract64(s->typer, 32, 32);
1175 break;
1176 case GITS_CBASER:
1177 *data = extract64(s->cbaser, 0, 32);
1178 break;
1179 case GITS_CBASER + 4:
1180 *data = extract64(s->cbaser, 32, 32);
1181 break;
1182 case GITS_CREADR:
1183 *data = extract64(s->creadr, 0, 32);
1184 break;
1185 case GITS_CREADR + 4:
1186 *data = extract64(s->creadr, 32, 32);
1187 break;
1188 case GITS_CWRITER:
1189 *data = extract64(s->cwriter, 0, 32);
1190 break;
1191 case GITS_CWRITER + 4:
1192 *data = extract64(s->cwriter, 32, 32);
1193 break;
1194 case GITS_BASER ... GITS_BASER + 0x3f:
1195 index = (offset - GITS_BASER) / 8;
1196 if (offset & 7) {
1197 *data = extract64(s->baser[index], 32, 32);
1198 } else {
1199 *data = extract64(s->baser[index], 0, 32);
1201 break;
1202 default:
1203 result = false;
1204 break;
1206 return result;
1209 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1210 uint64_t value, MemTxAttrs attrs)
1212 bool result = true;
1213 int index;
1215 switch (offset) {
1216 case GITS_BASER ... GITS_BASER + 0x3f:
1218 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1219 * already enabled
1221 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1222 index = (offset - GITS_BASER) / 8;
1223 if (s->baser[index] == 0) {
1224 /* Unimplemented GITS_BASERn: RAZ/WI */
1225 break;
1227 s->baser[index] &= GITS_BASER_RO_MASK;
1228 s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1230 break;
1231 case GITS_CBASER:
1233 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1234 * already enabled
1236 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1237 s->cbaser = value;
1238 s->creadr = 0;
1240 break;
1241 case GITS_CWRITER:
1242 s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1243 if (s->cwriter != s->creadr) {
1244 process_cmdq(s);
1246 break;
1247 case GITS_CREADR:
1248 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1249 s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1250 } else {
1251 /* RO register, ignore the write */
1252 qemu_log_mask(LOG_GUEST_ERROR,
1253 "%s: invalid guest write to RO register at offset "
1254 TARGET_FMT_plx "\n", __func__, offset);
1256 break;
1257 case GITS_TYPER:
1258 /* RO registers, ignore the write */
1259 qemu_log_mask(LOG_GUEST_ERROR,
1260 "%s: invalid guest write to RO register at offset "
1261 TARGET_FMT_plx "\n", __func__, offset);
1262 break;
1263 default:
1264 result = false;
1265 break;
1267 return result;
1270 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1271 uint64_t *data, MemTxAttrs attrs)
1273 bool result = true;
1274 int index;
1276 switch (offset) {
1277 case GITS_TYPER:
1278 *data = s->typer;
1279 break;
1280 case GITS_BASER ... GITS_BASER + 0x3f:
1281 index = (offset - GITS_BASER) / 8;
1282 *data = s->baser[index];
1283 break;
1284 case GITS_CBASER:
1285 *data = s->cbaser;
1286 break;
1287 case GITS_CREADR:
1288 *data = s->creadr;
1289 break;
1290 case GITS_CWRITER:
1291 *data = s->cwriter;
1292 break;
1293 default:
1294 result = false;
1295 break;
1297 return result;
1300 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1301 unsigned size, MemTxAttrs attrs)
1303 GICv3ITSState *s = (GICv3ITSState *)opaque;
1304 bool result;
1306 switch (size) {
1307 case 4:
1308 result = its_readl(s, offset, data, attrs);
1309 break;
1310 case 8:
1311 result = its_readll(s, offset, data, attrs);
1312 break;
1313 default:
1314 result = false;
1315 break;
1318 if (!result) {
1319 qemu_log_mask(LOG_GUEST_ERROR,
1320 "%s: invalid guest read at offset " TARGET_FMT_plx
1321 " size %u\n", __func__, offset, size);
1322 trace_gicv3_its_badread(offset, size);
1324 * The spec requires that reserved registers are RAZ/WI;
1325 * so use false returns from leaf functions as a way to
1326 * trigger the guest-error logging but don't return it to
1327 * the caller, or we'll cause a spurious guest data abort.
1329 *data = 0;
1330 } else {
1331 trace_gicv3_its_read(offset, *data, size);
1333 return MEMTX_OK;
1336 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1337 unsigned size, MemTxAttrs attrs)
1339 GICv3ITSState *s = (GICv3ITSState *)opaque;
1340 bool result;
1342 switch (size) {
1343 case 4:
1344 result = its_writel(s, offset, data, attrs);
1345 break;
1346 case 8:
1347 result = its_writell(s, offset, data, attrs);
1348 break;
1349 default:
1350 result = false;
1351 break;
1354 if (!result) {
1355 qemu_log_mask(LOG_GUEST_ERROR,
1356 "%s: invalid guest write at offset " TARGET_FMT_plx
1357 " size %u\n", __func__, offset, size);
1358 trace_gicv3_its_badwrite(offset, data, size);
1360 * The spec requires that reserved registers are RAZ/WI;
1361 * so use false returns from leaf functions as a way to
1362 * trigger the guest-error logging but don't return it to
1363 * the caller, or we'll cause a spurious guest data abort.
1365 } else {
1366 trace_gicv3_its_write(offset, data, size);
1368 return MEMTX_OK;
1371 static const MemoryRegionOps gicv3_its_control_ops = {
1372 .read_with_attrs = gicv3_its_read,
1373 .write_with_attrs = gicv3_its_write,
1374 .valid.min_access_size = 4,
1375 .valid.max_access_size = 8,
1376 .impl.min_access_size = 4,
1377 .impl.max_access_size = 8,
1378 .endianness = DEVICE_NATIVE_ENDIAN,
1381 static const MemoryRegionOps gicv3_its_translation_ops = {
1382 .read_with_attrs = gicv3_its_translation_read,
1383 .write_with_attrs = gicv3_its_translation_write,
1384 .valid.min_access_size = 2,
1385 .valid.max_access_size = 4,
1386 .impl.min_access_size = 2,
1387 .impl.max_access_size = 4,
1388 .endianness = DEVICE_NATIVE_ENDIAN,
1391 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1393 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1394 int i;
1396 for (i = 0; i < s->gicv3->num_cpu; i++) {
1397 if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1398 error_setg(errp, "Physical LPI not supported by CPU %d", i);
1399 return;
1403 gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1405 /* set the ITS default features supported */
1406 s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1407 s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1408 ITS_ITT_ENTRY_SIZE - 1);
1409 s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1410 s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1411 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1412 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1415 static void gicv3_its_reset(DeviceState *dev)
1417 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1418 GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1420 c->parent_reset(dev);
1422 /* Quiescent bit reset to 1 */
1423 s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1426 * setting GITS_BASER0.Type = 0b001 (Device)
1427 * GITS_BASER1.Type = 0b100 (Collection Table)
1428 * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1429 * GITS_BASER<0,1>.Page_Size = 64KB
1430 * and default translation table entry size to 16 bytes
1432 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1433 GITS_BASER_TYPE_DEVICE);
1434 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1435 GITS_BASER_PAGESIZE_64K);
1436 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1437 GITS_DTE_SIZE - 1);
1439 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1440 GITS_BASER_TYPE_COLLECTION);
1441 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1442 GITS_BASER_PAGESIZE_64K);
1443 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1444 GITS_CTE_SIZE - 1);
1447 static void gicv3_its_post_load(GICv3ITSState *s)
1449 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1450 extract_table_params(s);
1451 extract_cmdq_params(s);
1455 static Property gicv3_its_props[] = {
1456 DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1457 GICv3State *),
1458 DEFINE_PROP_END_OF_LIST(),
1461 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1463 DeviceClass *dc = DEVICE_CLASS(klass);
1464 GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1465 GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1467 dc->realize = gicv3_arm_its_realize;
1468 device_class_set_props(dc, gicv3_its_props);
1469 device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1470 icc->post_load = gicv3_its_post_load;
1473 static const TypeInfo gicv3_its_info = {
1474 .name = TYPE_ARM_GICV3_ITS,
1475 .parent = TYPE_ARM_GICV3_ITS_COMMON,
1476 .instance_size = sizeof(GICv3ITSState),
1477 .class_init = gicv3_its_class_init,
1478 .class_size = sizeof(GICv3ITSClass),
1481 static void gicv3_its_register_types(void)
1483 type_register_static(&gicv3_its_info);
1486 type_init(gicv3_its_register_types)