2 * ITS emulation for a GICv3-based system
4 * Copyright Linaro.org 2021
7 * Shashi Mallela <shashi.mallela@linaro.org>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10 * option) any later version. See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
23 typedef struct GICv3ITSClass GICv3ITSClass
;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState
, GICv3ITSClass
,
26 ARM_GICV3_ITS
, TYPE_ARM_GICV3_ITS
)
28 struct GICv3ITSClass
{
29 GICv3ITSCommonClass parent_class
;
30 void (*parent_reset
)(DeviceState
*dev
);
34 * This is an internal enum used to distinguish between LPI triggered
35 * via command queue and LPI triggered via gits_translater write.
37 typedef enum ItsCmdType
{
38 NONE
= 0, /* internal indication for GITS_TRANSLATER write */
44 typedef struct DTEntry
{
50 typedef struct CTEntry
{
55 typedef struct ITEntry
{
66 * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
67 * if a command parameter is not correct. These include both "stall
68 * processing of the command queue" and "ignore this command, and
69 * keep processing the queue". In our implementation we choose that
70 * memory transaction errors reading the command packet provoke a
71 * stall, but errors in parameters cause us to ignore the command
72 * and continue processing.
73 * The process_* functions which handle individual ITS commands all
74 * return an ItsCmdResult which tells process_cmdq() whether it should
75 * stall or keep going.
77 typedef enum ItsCmdResult
{
82 static uint64_t baser_base_addr(uint64_t value
, uint32_t page_sz
)
87 case GITS_PAGE_SIZE_4K
:
88 case GITS_PAGE_SIZE_16K
:
89 result
= FIELD_EX64(value
, GITS_BASER
, PHYADDR
) << 12;
92 case GITS_PAGE_SIZE_64K
:
93 result
= FIELD_EX64(value
, GITS_BASER
, PHYADDRL_64K
) << 16;
94 result
|= FIELD_EX64(value
, GITS_BASER
, PHYADDRH_64K
) << 48;
103 static uint64_t table_entry_addr(GICv3ITSState
*s
, TableDesc
*td
,
104 uint32_t idx
, MemTxResult
*res
)
107 * Given a TableDesc describing one of the ITS in-guest-memory
108 * tables and an index into it, return the guest address
109 * corresponding to that table entry.
110 * If there was a memory error reading the L1 table of an
111 * indirect table, *res is set accordingly, and we return -1.
112 * If the L1 table entry is marked not valid, we return -1 with
113 * *res set to MEMTX_OK.
115 * The specification defines the format of level 1 entries of a
116 * 2-level table, but the format of level 2 entries and the format
117 * of flat-mapped tables is IMPDEF.
119 AddressSpace
*as
= &s
->gicv3
->dma_as
;
122 uint32_t num_l2_entries
;
127 /* Single level table */
128 return td
->base_addr
+ idx
* td
->entry_sz
;
131 /* Two level table */
132 l2idx
= idx
/ (td
->page_sz
/ L1TABLE_ENTRY_SIZE
);
134 l2
= address_space_ldq_le(as
,
135 td
->base_addr
+ (l2idx
* L1TABLE_ENTRY_SIZE
),
136 MEMTXATTRS_UNSPECIFIED
, res
);
137 if (*res
!= MEMTX_OK
) {
140 if (!(l2
& L2_TABLE_VALID_MASK
)) {
144 num_l2_entries
= td
->page_sz
/ td
->entry_sz
;
145 return (l2
& ((1ULL << 51) - 1)) + (idx
% num_l2_entries
) * td
->entry_sz
;
149 * Read the Collection Table entry at index @icid. On success (including
150 * successfully determining that there is no valid CTE for this index),
151 * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
152 * If there is an error reading memory then we return the error code.
154 static MemTxResult
get_cte(GICv3ITSState
*s
, uint16_t icid
, CTEntry
*cte
)
156 AddressSpace
*as
= &s
->gicv3
->dma_as
;
157 MemTxResult res
= MEMTX_OK
;
158 uint64_t entry_addr
= table_entry_addr(s
, &s
->ct
, icid
, &res
);
161 if (entry_addr
== -1) {
162 /* No L2 table entry, i.e. no valid CTE, or a memory error */
167 cteval
= address_space_ldq_le(as
, entry_addr
, MEMTXATTRS_UNSPECIFIED
, &res
);
168 if (res
!= MEMTX_OK
) {
171 cte
->valid
= FIELD_EX64(cteval
, CTE
, VALID
);
172 cte
->rdbase
= FIELD_EX64(cteval
, CTE
, RDBASE
);
177 * Update the Interrupt Table entry at index @evinted in the table specified
178 * by the dte @dte. Returns true on success, false if there was a memory
181 static bool update_ite(GICv3ITSState
*s
, uint32_t eventid
, const DTEntry
*dte
,
184 AddressSpace
*as
= &s
->gicv3
->dma_as
;
185 MemTxResult res
= MEMTX_OK
;
186 hwaddr iteaddr
= dte
->ittaddr
+ eventid
* ITS_ITT_ENTRY_SIZE
;
191 itel
= FIELD_DP64(itel
, ITE_L
, VALID
, 1);
192 itel
= FIELD_DP64(itel
, ITE_L
, INTTYPE
, ite
->inttype
);
193 itel
= FIELD_DP64(itel
, ITE_L
, INTID
, ite
->intid
);
194 itel
= FIELD_DP64(itel
, ITE_L
, ICID
, ite
->icid
);
195 itel
= FIELD_DP64(itel
, ITE_L
, VPEID
, ite
->vpeid
);
196 iteh
= FIELD_DP32(iteh
, ITE_H
, DOORBELL
, ite
->doorbell
);
199 address_space_stq_le(as
, iteaddr
, itel
, MEMTXATTRS_UNSPECIFIED
, &res
);
200 if (res
!= MEMTX_OK
) {
203 address_space_stl_le(as
, iteaddr
+ 8, iteh
, MEMTXATTRS_UNSPECIFIED
, &res
);
204 return res
== MEMTX_OK
;
208 * Read the Interrupt Table entry at index @eventid from the table specified
209 * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
210 * struct @ite accordingly. If there is an error reading memory then we return
213 static MemTxResult
get_ite(GICv3ITSState
*s
, uint32_t eventid
,
214 const DTEntry
*dte
, ITEntry
*ite
)
216 AddressSpace
*as
= &s
->gicv3
->dma_as
;
217 MemTxResult res
= MEMTX_OK
;
220 hwaddr iteaddr
= dte
->ittaddr
+ eventid
* ITS_ITT_ENTRY_SIZE
;
222 itel
= address_space_ldq_le(as
, iteaddr
, MEMTXATTRS_UNSPECIFIED
, &res
);
223 if (res
!= MEMTX_OK
) {
227 iteh
= address_space_ldl_le(as
, iteaddr
+ 8, MEMTXATTRS_UNSPECIFIED
, &res
);
228 if (res
!= MEMTX_OK
) {
232 ite
->valid
= FIELD_EX64(itel
, ITE_L
, VALID
);
233 ite
->inttype
= FIELD_EX64(itel
, ITE_L
, INTTYPE
);
234 ite
->intid
= FIELD_EX64(itel
, ITE_L
, INTID
);
235 ite
->icid
= FIELD_EX64(itel
, ITE_L
, ICID
);
236 ite
->vpeid
= FIELD_EX64(itel
, ITE_L
, VPEID
);
237 ite
->doorbell
= FIELD_EX64(iteh
, ITE_H
, DOORBELL
);
242 * Read the Device Table entry at index @devid. On success (including
243 * successfully determining that there is no valid DTE for this index),
244 * we return MEMTX_OK and populate the DTEntry struct accordingly.
245 * If there is an error reading memory then we return the error code.
247 static MemTxResult
get_dte(GICv3ITSState
*s
, uint32_t devid
, DTEntry
*dte
)
249 MemTxResult res
= MEMTX_OK
;
250 AddressSpace
*as
= &s
->gicv3
->dma_as
;
251 uint64_t entry_addr
= table_entry_addr(s
, &s
->dt
, devid
, &res
);
254 if (entry_addr
== -1) {
255 /* No L2 table entry, i.e. no valid DTE, or a memory error */
259 dteval
= address_space_ldq_le(as
, entry_addr
, MEMTXATTRS_UNSPECIFIED
, &res
);
260 if (res
!= MEMTX_OK
) {
263 dte
->valid
= FIELD_EX64(dteval
, DTE
, VALID
);
264 dte
->size
= FIELD_EX64(dteval
, DTE
, SIZE
);
265 /* DTE word field stores bits [51:8] of the ITT address */
266 dte
->ittaddr
= FIELD_EX64(dteval
, DTE
, ITTADDR
) << ITTADDR_SHIFT
;
271 * This function handles the processing of following commands based on
272 * the ItsCmdType parameter passed:-
273 * 1. triggering of lpi interrupt translation via ITS INT command
274 * 2. triggering of lpi interrupt translation via gits_translater register
275 * 3. handling of ITS CLEAR command
276 * 4. handling of ITS DISCARD command
278 static ItsCmdResult
do_process_its_cmd(GICv3ITSState
*s
, uint32_t devid
,
279 uint32_t eventid
, ItsCmdType cmd
)
281 uint64_t num_eventids
;
286 if (devid
>= s
->dt
.num_entries
) {
287 qemu_log_mask(LOG_GUEST_ERROR
,
288 "%s: invalid command attributes: devid %d>=%d",
289 __func__
, devid
, s
->dt
.num_entries
);
293 if (get_dte(s
, devid
, &dte
) != MEMTX_OK
) {
297 qemu_log_mask(LOG_GUEST_ERROR
,
298 "%s: invalid command attributes: "
299 "invalid dte for %d\n", __func__
, devid
);
303 num_eventids
= 1ULL << (dte
.size
+ 1);
304 if (eventid
>= num_eventids
) {
305 qemu_log_mask(LOG_GUEST_ERROR
,
306 "%s: invalid command attributes: eventid %d >= %"
308 __func__
, eventid
, num_eventids
);
312 if (get_ite(s
, eventid
, &dte
, &ite
) != MEMTX_OK
) {
316 if (!ite
.valid
|| ite
.inttype
!= ITE_INTTYPE_PHYSICAL
) {
317 qemu_log_mask(LOG_GUEST_ERROR
,
318 "%s: invalid command attributes: invalid ITE\n",
323 if (ite
.icid
>= s
->ct
.num_entries
) {
324 qemu_log_mask(LOG_GUEST_ERROR
,
325 "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
330 if (get_cte(s
, ite
.icid
, &cte
) != MEMTX_OK
) {
334 qemu_log_mask(LOG_GUEST_ERROR
,
335 "%s: invalid command attributes: invalid CTE\n",
341 * Current implementation only supports rdbase == procnum
342 * Hence rdbase physical address is ignored
344 if (cte
.rdbase
>= s
->gicv3
->num_cpu
) {
348 if ((cmd
== CLEAR
) || (cmd
== DISCARD
)) {
349 gicv3_redist_process_lpi(&s
->gicv3
->cpu
[cte
.rdbase
], ite
.intid
, 0);
351 gicv3_redist_process_lpi(&s
->gicv3
->cpu
[cte
.rdbase
], ite
.intid
, 1);
354 if (cmd
== DISCARD
) {
356 /* remove mapping from interrupt translation table */
358 return update_ite(s
, eventid
, &dte
, &ite
) ? CMD_CONTINUE
: CMD_STALL
;
362 static ItsCmdResult
process_its_cmd(GICv3ITSState
*s
, const uint64_t *cmdpkt
,
365 uint32_t devid
, eventid
;
367 devid
= (cmdpkt
[0] & DEVID_MASK
) >> DEVID_SHIFT
;
368 eventid
= cmdpkt
[1] & EVENTID_MASK
;
369 return do_process_its_cmd(s
, devid
, eventid
, cmd
);
372 static ItsCmdResult
process_mapti(GICv3ITSState
*s
, const uint64_t *cmdpkt
,
375 uint32_t devid
, eventid
;
377 uint64_t num_eventids
;
383 devid
= (cmdpkt
[0] & DEVID_MASK
) >> DEVID_SHIFT
;
384 eventid
= cmdpkt
[1] & EVENTID_MASK
;
389 pIntid
= (cmdpkt
[1] & pINTID_MASK
) >> pINTID_SHIFT
;
392 icid
= cmdpkt
[2] & ICID_MASK
;
394 if (devid
>= s
->dt
.num_entries
) {
395 qemu_log_mask(LOG_GUEST_ERROR
,
396 "%s: invalid command attributes: devid %d>=%d",
397 __func__
, devid
, s
->dt
.num_entries
);
401 if (get_dte(s
, devid
, &dte
) != MEMTX_OK
) {
404 num_eventids
= 1ULL << (dte
.size
+ 1);
405 num_intids
= 1ULL << (GICD_TYPER_IDBITS
+ 1);
407 if (icid
>= s
->ct
.num_entries
) {
408 qemu_log_mask(LOG_GUEST_ERROR
,
409 "%s: invalid ICID 0x%x >= 0x%x\n",
410 __func__
, icid
, s
->ct
.num_entries
);
415 qemu_log_mask(LOG_GUEST_ERROR
,
416 "%s: no valid DTE for devid 0x%x\n", __func__
, devid
);
420 if (eventid
>= num_eventids
) {
421 qemu_log_mask(LOG_GUEST_ERROR
,
422 "%s: invalid event ID 0x%x >= 0x%" PRIx64
"\n",
423 __func__
, eventid
, num_eventids
);
427 if (pIntid
< GICV3_LPI_INTID_START
|| pIntid
>= num_intids
) {
428 qemu_log_mask(LOG_GUEST_ERROR
,
429 "%s: invalid interrupt ID 0x%x\n", __func__
, pIntid
);
433 /* add ite entry to interrupt translation table */
435 ite
.inttype
= ITE_INTTYPE_PHYSICAL
;
438 ite
.doorbell
= INTID_SPURIOUS
;
440 return update_ite(s
, eventid
, &dte
, &ite
) ? CMD_CONTINUE
: CMD_STALL
;
444 * Update the Collection Table entry for @icid to @cte. Returns true
445 * on success, false if there was a memory access error.
447 static bool update_cte(GICv3ITSState
*s
, uint16_t icid
, const CTEntry
*cte
)
449 AddressSpace
*as
= &s
->gicv3
->dma_as
;
452 MemTxResult res
= MEMTX_OK
;
455 /* add mapping entry to collection table */
456 cteval
= FIELD_DP64(cteval
, CTE
, VALID
, 1);
457 cteval
= FIELD_DP64(cteval
, CTE
, RDBASE
, cte
->rdbase
);
460 entry_addr
= table_entry_addr(s
, &s
->ct
, icid
, &res
);
461 if (res
!= MEMTX_OK
) {
462 /* memory access error: stall */
465 if (entry_addr
== -1) {
466 /* No L2 table for this index: discard write and continue */
470 address_space_stq_le(as
, entry_addr
, cteval
, MEMTXATTRS_UNSPECIFIED
, &res
);
471 return res
== MEMTX_OK
;
474 static ItsCmdResult
process_mapc(GICv3ITSState
*s
, const uint64_t *cmdpkt
)
479 icid
= cmdpkt
[2] & ICID_MASK
;
480 cte
.valid
= cmdpkt
[2] & CMD_FIELD_VALID_MASK
;
482 cte
.rdbase
= (cmdpkt
[2] & R_MAPC_RDBASE_MASK
) >> R_MAPC_RDBASE_SHIFT
;
483 cte
.rdbase
&= RDBASE_PROCNUM_MASK
;
488 if (icid
>= s
->ct
.num_entries
) {
489 qemu_log_mask(LOG_GUEST_ERROR
, "ITS MAPC: invalid ICID 0x%d", icid
);
492 if (cte
.valid
&& cte
.rdbase
>= s
->gicv3
->num_cpu
) {
493 qemu_log_mask(LOG_GUEST_ERROR
,
494 "ITS MAPC: invalid RDBASE %u ", cte
.rdbase
);
498 return update_cte(s
, icid
, &cte
) ? CMD_CONTINUE
: CMD_STALL
;
502 * Update the Device Table entry for @devid to @dte. Returns true
503 * on success, false if there was a memory access error.
505 static bool update_dte(GICv3ITSState
*s
, uint32_t devid
, const DTEntry
*dte
)
507 AddressSpace
*as
= &s
->gicv3
->dma_as
;
510 MemTxResult res
= MEMTX_OK
;
513 /* add mapping entry to device table */
514 dteval
= FIELD_DP64(dteval
, DTE
, VALID
, 1);
515 dteval
= FIELD_DP64(dteval
, DTE
, SIZE
, dte
->size
);
516 dteval
= FIELD_DP64(dteval
, DTE
, ITTADDR
, dte
->ittaddr
);
519 entry_addr
= table_entry_addr(s
, &s
->dt
, devid
, &res
);
520 if (res
!= MEMTX_OK
) {
521 /* memory access error: stall */
524 if (entry_addr
== -1) {
525 /* No L2 table for this index: discard write and continue */
528 address_space_stq_le(as
, entry_addr
, dteval
, MEMTXATTRS_UNSPECIFIED
, &res
);
529 return res
== MEMTX_OK
;
532 static ItsCmdResult
process_mapd(GICv3ITSState
*s
, const uint64_t *cmdpkt
)
537 devid
= (cmdpkt
[0] & DEVID_MASK
) >> DEVID_SHIFT
;
538 dte
.size
= cmdpkt
[1] & SIZE_MASK
;
539 dte
.ittaddr
= (cmdpkt
[2] & ITTADDR_MASK
) >> ITTADDR_SHIFT
;
540 dte
.valid
= cmdpkt
[2] & CMD_FIELD_VALID_MASK
;
542 if (devid
>= s
->dt
.num_entries
) {
543 qemu_log_mask(LOG_GUEST_ERROR
,
544 "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
545 devid
, s
->dt
.num_entries
);
549 if (dte
.size
> FIELD_EX64(s
->typer
, GITS_TYPER
, IDBITS
)) {
550 qemu_log_mask(LOG_GUEST_ERROR
,
551 "ITS MAPD: invalid size %d\n", dte
.size
);
555 return update_dte(s
, devid
, &dte
) ? CMD_CONTINUE
: CMD_STALL
;
558 static ItsCmdResult
process_movall(GICv3ITSState
*s
, const uint64_t *cmdpkt
)
562 rd1
= FIELD_EX64(cmdpkt
[2], MOVALL_2
, RDBASE1
);
563 rd2
= FIELD_EX64(cmdpkt
[3], MOVALL_3
, RDBASE2
);
565 if (rd1
>= s
->gicv3
->num_cpu
) {
566 qemu_log_mask(LOG_GUEST_ERROR
,
567 "%s: RDBASE1 %" PRId64
568 " out of range (must be less than %d)\n",
569 __func__
, rd1
, s
->gicv3
->num_cpu
);
572 if (rd2
>= s
->gicv3
->num_cpu
) {
573 qemu_log_mask(LOG_GUEST_ERROR
,
574 "%s: RDBASE2 %" PRId64
575 " out of range (must be less than %d)\n",
576 __func__
, rd2
, s
->gicv3
->num_cpu
);
581 /* Move to same target must succeed as a no-op */
585 /* Move all pending LPIs from redistributor 1 to redistributor 2 */
586 gicv3_redist_movall_lpis(&s
->gicv3
->cpu
[rd1
], &s
->gicv3
->cpu
[rd2
]);
591 static ItsCmdResult
process_movi(GICv3ITSState
*s
, const uint64_t *cmdpkt
)
593 uint32_t devid
, eventid
;
595 uint64_t num_eventids
;
597 CTEntry old_cte
, new_cte
;
600 devid
= FIELD_EX64(cmdpkt
[0], MOVI_0
, DEVICEID
);
601 eventid
= FIELD_EX64(cmdpkt
[1], MOVI_1
, EVENTID
);
602 new_icid
= FIELD_EX64(cmdpkt
[2], MOVI_2
, ICID
);
604 if (devid
>= s
->dt
.num_entries
) {
605 qemu_log_mask(LOG_GUEST_ERROR
,
606 "%s: invalid command attributes: devid %d>=%d",
607 __func__
, devid
, s
->dt
.num_entries
);
610 if (get_dte(s
, devid
, &dte
) != MEMTX_OK
) {
615 qemu_log_mask(LOG_GUEST_ERROR
,
616 "%s: invalid command attributes: "
617 "invalid dte for %d\n", __func__
, devid
);
621 num_eventids
= 1ULL << (dte
.size
+ 1);
622 if (eventid
>= num_eventids
) {
623 qemu_log_mask(LOG_GUEST_ERROR
,
624 "%s: invalid command attributes: eventid %d >= %"
626 __func__
, eventid
, num_eventids
);
630 if (get_ite(s
, eventid
, &dte
, &old_ite
) != MEMTX_OK
) {
634 if (!old_ite
.valid
|| old_ite
.inttype
!= ITE_INTTYPE_PHYSICAL
) {
635 qemu_log_mask(LOG_GUEST_ERROR
,
636 "%s: invalid command attributes: invalid ITE\n",
641 if (old_ite
.icid
>= s
->ct
.num_entries
) {
642 qemu_log_mask(LOG_GUEST_ERROR
,
643 "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
644 __func__
, old_ite
.icid
);
648 if (new_icid
>= s
->ct
.num_entries
) {
649 qemu_log_mask(LOG_GUEST_ERROR
,
650 "%s: invalid command attributes: ICID 0x%x\n",
655 if (get_cte(s
, old_ite
.icid
, &old_cte
) != MEMTX_OK
) {
658 if (!old_cte
.valid
) {
659 qemu_log_mask(LOG_GUEST_ERROR
,
660 "%s: invalid command attributes: "
661 "invalid CTE for old ICID 0x%x\n",
662 __func__
, old_ite
.icid
);
666 if (get_cte(s
, new_icid
, &new_cte
) != MEMTX_OK
) {
669 if (!new_cte
.valid
) {
670 qemu_log_mask(LOG_GUEST_ERROR
,
671 "%s: invalid command attributes: "
672 "invalid CTE for new ICID 0x%x\n",
677 if (old_cte
.rdbase
>= s
->gicv3
->num_cpu
) {
678 qemu_log_mask(LOG_GUEST_ERROR
,
679 "%s: CTE has invalid rdbase 0x%x\n",
680 __func__
, old_cte
.rdbase
);
684 if (new_cte
.rdbase
>= s
->gicv3
->num_cpu
) {
685 qemu_log_mask(LOG_GUEST_ERROR
,
686 "%s: CTE has invalid rdbase 0x%x\n",
687 __func__
, new_cte
.rdbase
);
691 if (old_cte
.rdbase
!= new_cte
.rdbase
) {
692 /* Move the LPI from the old redistributor to the new one */
693 gicv3_redist_mov_lpi(&s
->gicv3
->cpu
[old_cte
.rdbase
],
694 &s
->gicv3
->cpu
[new_cte
.rdbase
],
698 /* Update the ICID field in the interrupt translation table entry */
699 old_ite
.icid
= new_icid
;
700 return update_ite(s
, eventid
, &dte
, &old_ite
) ? CMD_CONTINUE
: CMD_STALL
;
704 * Current implementation blocks until all
705 * commands are processed
707 static void process_cmdq(GICv3ITSState
*s
)
709 uint32_t wr_offset
= 0;
710 uint32_t rd_offset
= 0;
711 uint32_t cq_offset
= 0;
712 AddressSpace
*as
= &s
->gicv3
->dma_as
;
716 if (!(s
->ctlr
& R_GITS_CTLR_ENABLED_MASK
)) {
720 wr_offset
= FIELD_EX64(s
->cwriter
, GITS_CWRITER
, OFFSET
);
722 if (wr_offset
>= s
->cq
.num_entries
) {
723 qemu_log_mask(LOG_GUEST_ERROR
,
724 "%s: invalid write offset "
725 "%d\n", __func__
, wr_offset
);
729 rd_offset
= FIELD_EX64(s
->creadr
, GITS_CREADR
, OFFSET
);
731 if (rd_offset
>= s
->cq
.num_entries
) {
732 qemu_log_mask(LOG_GUEST_ERROR
,
733 "%s: invalid read offset "
734 "%d\n", __func__
, rd_offset
);
738 while (wr_offset
!= rd_offset
) {
739 ItsCmdResult result
= CMD_CONTINUE
;
742 uint64_t cmdpkt
[GITS_CMDQ_ENTRY_WORDS
];
744 cq_offset
= (rd_offset
* GITS_CMDQ_ENTRY_SIZE
);
746 buflen
= GITS_CMDQ_ENTRY_SIZE
;
747 hostmem
= address_space_map(as
, s
->cq
.base_addr
+ cq_offset
,
748 &buflen
, false, MEMTXATTRS_UNSPECIFIED
);
749 if (!hostmem
|| buflen
!= GITS_CMDQ_ENTRY_SIZE
) {
751 address_space_unmap(as
, hostmem
, buflen
, false, 0);
753 s
->creadr
= FIELD_DP64(s
->creadr
, GITS_CREADR
, STALLED
, 1);
754 qemu_log_mask(LOG_GUEST_ERROR
,
755 "%s: could not read command at 0x%" PRIx64
"\n",
756 __func__
, s
->cq
.base_addr
+ cq_offset
);
759 for (i
= 0; i
< ARRAY_SIZE(cmdpkt
); i
++) {
760 cmdpkt
[i
] = ldq_le_p(hostmem
+ i
* sizeof(uint64_t));
762 address_space_unmap(as
, hostmem
, buflen
, false, 0);
764 cmd
= cmdpkt
[0] & CMD_MASK
;
766 trace_gicv3_its_process_command(rd_offset
, cmd
);
770 result
= process_its_cmd(s
, cmdpkt
, INTERRUPT
);
773 result
= process_its_cmd(s
, cmdpkt
, CLEAR
);
777 * Current implementation makes a blocking synchronous call
778 * for every command issued earlier, hence the internal state
779 * is already consistent by the time SYNC command is executed.
780 * Hence no further processing is required for SYNC command.
784 result
= process_mapd(s
, cmdpkt
);
787 result
= process_mapc(s
, cmdpkt
);
790 result
= process_mapti(s
, cmdpkt
, false);
793 result
= process_mapti(s
, cmdpkt
, true);
795 case GITS_CMD_DISCARD
:
796 result
= process_its_cmd(s
, cmdpkt
, DISCARD
);
799 case GITS_CMD_INVALL
:
801 * Current implementation doesn't cache any ITS tables,
802 * but the calculated lpi priority information. We only
803 * need to trigger lpi priority re-calculation to be in
804 * sync with LPI config table or pending table changes.
806 for (i
= 0; i
< s
->gicv3
->num_cpu
; i
++) {
807 gicv3_redist_update_lpi(&s
->gicv3
->cpu
[i
]);
811 result
= process_movi(s
, cmdpkt
);
813 case GITS_CMD_MOVALL
:
814 result
= process_movall(s
, cmdpkt
);
819 if (result
== CMD_CONTINUE
) {
821 rd_offset
%= s
->cq
.num_entries
;
822 s
->creadr
= FIELD_DP64(s
->creadr
, GITS_CREADR
, OFFSET
, rd_offset
);
825 s
->creadr
= FIELD_DP64(s
->creadr
, GITS_CREADR
, STALLED
, 1);
826 qemu_log_mask(LOG_GUEST_ERROR
,
827 "%s: 0x%x cmd processing failed, stalling\n",
835 * This function extracts the ITS Device and Collection table specific
836 * parameters (like base_addr, size etc) from GITS_BASER register.
837 * It is called during ITS enable and also during post_load migration
839 static void extract_table_params(GICv3ITSState
*s
)
841 uint16_t num_pages
= 0;
842 uint8_t page_sz_type
;
844 uint32_t page_sz
= 0;
847 for (int i
= 0; i
< 8; i
++) {
857 page_sz_type
= FIELD_EX64(value
, GITS_BASER
, PAGESIZE
);
859 switch (page_sz_type
) {
861 page_sz
= GITS_PAGE_SIZE_4K
;
865 page_sz
= GITS_PAGE_SIZE_16K
;
870 page_sz
= GITS_PAGE_SIZE_64K
;
874 g_assert_not_reached();
877 num_pages
= FIELD_EX64(value
, GITS_BASER
, SIZE
) + 1;
879 type
= FIELD_EX64(value
, GITS_BASER
, TYPE
);
882 case GITS_BASER_TYPE_DEVICE
:
884 idbits
= FIELD_EX64(s
->typer
, GITS_TYPER
, DEVBITS
) + 1;
886 case GITS_BASER_TYPE_COLLECTION
:
888 if (FIELD_EX64(s
->typer
, GITS_TYPER
, CIL
)) {
889 idbits
= FIELD_EX64(s
->typer
, GITS_TYPER
, CIDBITS
) + 1;
891 /* 16-bit CollectionId supported when CIL == 0 */
897 * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
898 * ensures we will only see type values corresponding to
899 * the values set up in gicv3_its_reset().
901 g_assert_not_reached();
904 memset(td
, 0, sizeof(*td
));
906 * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
907 * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
908 * do not have a special case where the GITS_BASER<n>.Valid bit is 0
909 * for the register corresponding to the Collection table but we
910 * still have to process interrupts using non-memory-backed
911 * Collection table entries.)
912 * The specification makes it UNPREDICTABLE to enable the ITS without
913 * marking each BASER<n> as valid. We choose to handle these as if
914 * the table was zero-sized, so commands using the table will fail
915 * and interrupts requested via GITS_TRANSLATER writes will be ignored.
916 * This happens automatically by leaving the num_entries field at
917 * zero, which will be caught by the bounds checks we have before
918 * every table lookup anyway.
920 if (!FIELD_EX64(value
, GITS_BASER
, VALID
)) {
923 td
->page_sz
= page_sz
;
924 td
->indirect
= FIELD_EX64(value
, GITS_BASER
, INDIRECT
);
925 td
->entry_sz
= FIELD_EX64(value
, GITS_BASER
, ENTRYSIZE
) + 1;
926 td
->base_addr
= baser_base_addr(value
, page_sz
);
928 td
->num_entries
= (num_pages
* page_sz
) / td
->entry_sz
;
930 td
->num_entries
= (((num_pages
* page_sz
) /
931 L1TABLE_ENTRY_SIZE
) *
932 (page_sz
/ td
->entry_sz
));
934 td
->num_entries
= MIN(td
->num_entries
, 1ULL << idbits
);
938 static void extract_cmdq_params(GICv3ITSState
*s
)
940 uint16_t num_pages
= 0;
941 uint64_t value
= s
->cbaser
;
943 num_pages
= FIELD_EX64(value
, GITS_CBASER
, SIZE
) + 1;
945 memset(&s
->cq
, 0 , sizeof(s
->cq
));
947 if (FIELD_EX64(value
, GITS_CBASER
, VALID
)) {
948 s
->cq
.num_entries
= (num_pages
* GITS_PAGE_SIZE_4K
) /
949 GITS_CMDQ_ENTRY_SIZE
;
950 s
->cq
.base_addr
= FIELD_EX64(value
, GITS_CBASER
, PHYADDR
);
951 s
->cq
.base_addr
<<= R_GITS_CBASER_PHYADDR_SHIFT
;
955 static MemTxResult
gicv3_its_translation_read(void *opaque
, hwaddr offset
,
956 uint64_t *data
, unsigned size
,
960 * GITS_TRANSLATER is write-only, and all other addresses
961 * in the interrupt translation space frame are RES0.
967 static MemTxResult
gicv3_its_translation_write(void *opaque
, hwaddr offset
,
968 uint64_t data
, unsigned size
,
971 GICv3ITSState
*s
= (GICv3ITSState
*)opaque
;
974 trace_gicv3_its_translation_write(offset
, data
, size
, attrs
.requester_id
);
977 case GITS_TRANSLATER
:
978 if (s
->ctlr
& R_GITS_CTLR_ENABLED_MASK
) {
979 result
= do_process_its_cmd(s
, attrs
.requester_id
, data
, NONE
);
993 static bool its_writel(GICv3ITSState
*s
, hwaddr offset
,
994 uint64_t value
, MemTxAttrs attrs
)
1001 if (value
& R_GITS_CTLR_ENABLED_MASK
) {
1002 s
->ctlr
|= R_GITS_CTLR_ENABLED_MASK
;
1003 extract_table_params(s
);
1004 extract_cmdq_params(s
);
1007 s
->ctlr
&= ~R_GITS_CTLR_ENABLED_MASK
;
1012 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1015 if (!(s
->ctlr
& R_GITS_CTLR_ENABLED_MASK
)) {
1016 s
->cbaser
= deposit64(s
->cbaser
, 0, 32, value
);
1020 case GITS_CBASER
+ 4:
1022 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1025 if (!(s
->ctlr
& R_GITS_CTLR_ENABLED_MASK
)) {
1026 s
->cbaser
= deposit64(s
->cbaser
, 32, 32, value
);
1031 s
->cwriter
= deposit64(s
->cwriter
, 0, 32,
1032 (value
& ~R_GITS_CWRITER_RETRY_MASK
));
1033 if (s
->cwriter
!= s
->creadr
) {
1037 case GITS_CWRITER
+ 4:
1038 s
->cwriter
= deposit64(s
->cwriter
, 32, 32, value
);
1041 if (s
->gicv3
->gicd_ctlr
& GICD_CTLR_DS
) {
1042 s
->creadr
= deposit64(s
->creadr
, 0, 32,
1043 (value
& ~R_GITS_CREADR_STALLED_MASK
));
1045 /* RO register, ignore the write */
1046 qemu_log_mask(LOG_GUEST_ERROR
,
1047 "%s: invalid guest write to RO register at offset "
1048 TARGET_FMT_plx
"\n", __func__
, offset
);
1051 case GITS_CREADR
+ 4:
1052 if (s
->gicv3
->gicd_ctlr
& GICD_CTLR_DS
) {
1053 s
->creadr
= deposit64(s
->creadr
, 32, 32, value
);
1055 /* RO register, ignore the write */
1056 qemu_log_mask(LOG_GUEST_ERROR
,
1057 "%s: invalid guest write to RO register at offset "
1058 TARGET_FMT_plx
"\n", __func__
, offset
);
1061 case GITS_BASER
... GITS_BASER
+ 0x3f:
1063 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1066 if (!(s
->ctlr
& R_GITS_CTLR_ENABLED_MASK
)) {
1067 index
= (offset
- GITS_BASER
) / 8;
1069 if (s
->baser
[index
] == 0) {
1070 /* Unimplemented GITS_BASERn: RAZ/WI */
1075 value
&= ~GITS_BASER_RO_MASK
;
1076 s
->baser
[index
] &= GITS_BASER_RO_MASK
| MAKE_64BIT_MASK(0, 32);
1077 s
->baser
[index
] |= value
;
1079 value
&= ~GITS_BASER_RO_MASK
;
1080 s
->baser
[index
] &= GITS_BASER_RO_MASK
| MAKE_64BIT_MASK(32, 32);
1081 s
->baser
[index
] |= value
;
1086 case GITS_IDREGS
... GITS_IDREGS
+ 0x2f:
1087 /* RO registers, ignore the write */
1088 qemu_log_mask(LOG_GUEST_ERROR
,
1089 "%s: invalid guest write to RO register at offset "
1090 TARGET_FMT_plx
"\n", __func__
, offset
);
1099 static bool its_readl(GICv3ITSState
*s
, hwaddr offset
,
1100 uint64_t *data
, MemTxAttrs attrs
)
1110 *data
= gicv3_iidr();
1112 case GITS_IDREGS
... GITS_IDREGS
+ 0x2f:
1114 *data
= gicv3_idreg(offset
- GITS_IDREGS
);
1117 *data
= extract64(s
->typer
, 0, 32);
1119 case GITS_TYPER
+ 4:
1120 *data
= extract64(s
->typer
, 32, 32);
1123 *data
= extract64(s
->cbaser
, 0, 32);
1125 case GITS_CBASER
+ 4:
1126 *data
= extract64(s
->cbaser
, 32, 32);
1129 *data
= extract64(s
->creadr
, 0, 32);
1131 case GITS_CREADR
+ 4:
1132 *data
= extract64(s
->creadr
, 32, 32);
1135 *data
= extract64(s
->cwriter
, 0, 32);
1137 case GITS_CWRITER
+ 4:
1138 *data
= extract64(s
->cwriter
, 32, 32);
1140 case GITS_BASER
... GITS_BASER
+ 0x3f:
1141 index
= (offset
- GITS_BASER
) / 8;
1143 *data
= extract64(s
->baser
[index
], 32, 32);
1145 *data
= extract64(s
->baser
[index
], 0, 32);
1155 static bool its_writell(GICv3ITSState
*s
, hwaddr offset
,
1156 uint64_t value
, MemTxAttrs attrs
)
1162 case GITS_BASER
... GITS_BASER
+ 0x3f:
1164 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1167 if (!(s
->ctlr
& R_GITS_CTLR_ENABLED_MASK
)) {
1168 index
= (offset
- GITS_BASER
) / 8;
1169 if (s
->baser
[index
] == 0) {
1170 /* Unimplemented GITS_BASERn: RAZ/WI */
1173 s
->baser
[index
] &= GITS_BASER_RO_MASK
;
1174 s
->baser
[index
] |= (value
& ~GITS_BASER_RO_MASK
);
1179 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1182 if (!(s
->ctlr
& R_GITS_CTLR_ENABLED_MASK
)) {
1188 s
->cwriter
= value
& ~R_GITS_CWRITER_RETRY_MASK
;
1189 if (s
->cwriter
!= s
->creadr
) {
1194 if (s
->gicv3
->gicd_ctlr
& GICD_CTLR_DS
) {
1195 s
->creadr
= value
& ~R_GITS_CREADR_STALLED_MASK
;
1197 /* RO register, ignore the write */
1198 qemu_log_mask(LOG_GUEST_ERROR
,
1199 "%s: invalid guest write to RO register at offset "
1200 TARGET_FMT_plx
"\n", __func__
, offset
);
1204 /* RO registers, ignore the write */
1205 qemu_log_mask(LOG_GUEST_ERROR
,
1206 "%s: invalid guest write to RO register at offset "
1207 TARGET_FMT_plx
"\n", __func__
, offset
);
1216 static bool its_readll(GICv3ITSState
*s
, hwaddr offset
,
1217 uint64_t *data
, MemTxAttrs attrs
)
1226 case GITS_BASER
... GITS_BASER
+ 0x3f:
1227 index
= (offset
- GITS_BASER
) / 8;
1228 *data
= s
->baser
[index
];
1246 static MemTxResult
gicv3_its_read(void *opaque
, hwaddr offset
, uint64_t *data
,
1247 unsigned size
, MemTxAttrs attrs
)
1249 GICv3ITSState
*s
= (GICv3ITSState
*)opaque
;
1254 result
= its_readl(s
, offset
, data
, attrs
);
1257 result
= its_readll(s
, offset
, data
, attrs
);
1265 qemu_log_mask(LOG_GUEST_ERROR
,
1266 "%s: invalid guest read at offset " TARGET_FMT_plx
1267 "size %u\n", __func__
, offset
, size
);
1268 trace_gicv3_its_badread(offset
, size
);
1270 * The spec requires that reserved registers are RAZ/WI;
1271 * so use false returns from leaf functions as a way to
1272 * trigger the guest-error logging but don't return it to
1273 * the caller, or we'll cause a spurious guest data abort.
1277 trace_gicv3_its_read(offset
, *data
, size
);
1282 static MemTxResult
gicv3_its_write(void *opaque
, hwaddr offset
, uint64_t data
,
1283 unsigned size
, MemTxAttrs attrs
)
1285 GICv3ITSState
*s
= (GICv3ITSState
*)opaque
;
1290 result
= its_writel(s
, offset
, data
, attrs
);
1293 result
= its_writell(s
, offset
, data
, attrs
);
1301 qemu_log_mask(LOG_GUEST_ERROR
,
1302 "%s: invalid guest write at offset " TARGET_FMT_plx
1303 "size %u\n", __func__
, offset
, size
);
1304 trace_gicv3_its_badwrite(offset
, data
, size
);
1306 * The spec requires that reserved registers are RAZ/WI;
1307 * so use false returns from leaf functions as a way to
1308 * trigger the guest-error logging but don't return it to
1309 * the caller, or we'll cause a spurious guest data abort.
1312 trace_gicv3_its_write(offset
, data
, size
);
1317 static const MemoryRegionOps gicv3_its_control_ops
= {
1318 .read_with_attrs
= gicv3_its_read
,
1319 .write_with_attrs
= gicv3_its_write
,
1320 .valid
.min_access_size
= 4,
1321 .valid
.max_access_size
= 8,
1322 .impl
.min_access_size
= 4,
1323 .impl
.max_access_size
= 8,
1324 .endianness
= DEVICE_NATIVE_ENDIAN
,
1327 static const MemoryRegionOps gicv3_its_translation_ops
= {
1328 .read_with_attrs
= gicv3_its_translation_read
,
1329 .write_with_attrs
= gicv3_its_translation_write
,
1330 .valid
.min_access_size
= 2,
1331 .valid
.max_access_size
= 4,
1332 .impl
.min_access_size
= 2,
1333 .impl
.max_access_size
= 4,
1334 .endianness
= DEVICE_NATIVE_ENDIAN
,
1337 static void gicv3_arm_its_realize(DeviceState
*dev
, Error
**errp
)
1339 GICv3ITSState
*s
= ARM_GICV3_ITS_COMMON(dev
);
1342 for (i
= 0; i
< s
->gicv3
->num_cpu
; i
++) {
1343 if (!(s
->gicv3
->cpu
[i
].gicr_typer
& GICR_TYPER_PLPIS
)) {
1344 error_setg(errp
, "Physical LPI not supported by CPU %d", i
);
1349 gicv3_its_init_mmio(s
, &gicv3_its_control_ops
, &gicv3_its_translation_ops
);
1351 /* set the ITS default features supported */
1352 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, PHYSICAL
, 1);
1353 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, ITT_ENTRY_SIZE
,
1354 ITS_ITT_ENTRY_SIZE
- 1);
1355 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, IDBITS
, ITS_IDBITS
);
1356 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, DEVBITS
, ITS_DEVBITS
);
1357 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, CIL
, 1);
1358 s
->typer
= FIELD_DP64(s
->typer
, GITS_TYPER
, CIDBITS
, ITS_CIDBITS
);
1361 static void gicv3_its_reset(DeviceState
*dev
)
1363 GICv3ITSState
*s
= ARM_GICV3_ITS_COMMON(dev
);
1364 GICv3ITSClass
*c
= ARM_GICV3_ITS_GET_CLASS(s
);
1366 c
->parent_reset(dev
);
1368 /* Quiescent bit reset to 1 */
1369 s
->ctlr
= FIELD_DP32(s
->ctlr
, GITS_CTLR
, QUIESCENT
, 1);
1372 * setting GITS_BASER0.Type = 0b001 (Device)
1373 * GITS_BASER1.Type = 0b100 (Collection Table)
1374 * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1375 * GITS_BASER<0,1>.Page_Size = 64KB
1376 * and default translation table entry size to 16 bytes
1378 s
->baser
[0] = FIELD_DP64(s
->baser
[0], GITS_BASER
, TYPE
,
1379 GITS_BASER_TYPE_DEVICE
);
1380 s
->baser
[0] = FIELD_DP64(s
->baser
[0], GITS_BASER
, PAGESIZE
,
1381 GITS_BASER_PAGESIZE_64K
);
1382 s
->baser
[0] = FIELD_DP64(s
->baser
[0], GITS_BASER
, ENTRYSIZE
,
1385 s
->baser
[1] = FIELD_DP64(s
->baser
[1], GITS_BASER
, TYPE
,
1386 GITS_BASER_TYPE_COLLECTION
);
1387 s
->baser
[1] = FIELD_DP64(s
->baser
[1], GITS_BASER
, PAGESIZE
,
1388 GITS_BASER_PAGESIZE_64K
);
1389 s
->baser
[1] = FIELD_DP64(s
->baser
[1], GITS_BASER
, ENTRYSIZE
,
1393 static void gicv3_its_post_load(GICv3ITSState
*s
)
1395 if (s
->ctlr
& R_GITS_CTLR_ENABLED_MASK
) {
1396 extract_table_params(s
);
1397 extract_cmdq_params(s
);
1401 static Property gicv3_its_props
[] = {
1402 DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState
, gicv3
, "arm-gicv3",
1404 DEFINE_PROP_END_OF_LIST(),
1407 static void gicv3_its_class_init(ObjectClass
*klass
, void *data
)
1409 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1410 GICv3ITSClass
*ic
= ARM_GICV3_ITS_CLASS(klass
);
1411 GICv3ITSCommonClass
*icc
= ARM_GICV3_ITS_COMMON_CLASS(klass
);
1413 dc
->realize
= gicv3_arm_its_realize
;
1414 device_class_set_props(dc
, gicv3_its_props
);
1415 device_class_set_parent_reset(dc
, gicv3_its_reset
, &ic
->parent_reset
);
1416 icc
->post_load
= gicv3_its_post_load
;
1419 static const TypeInfo gicv3_its_info
= {
1420 .name
= TYPE_ARM_GICV3_ITS
,
1421 .parent
= TYPE_ARM_GICV3_ITS_COMMON
,
1422 .instance_size
= sizeof(GICv3ITSState
),
1423 .class_init
= gicv3_its_class_init
,
1424 .class_size
= sizeof(GICv3ITSClass
),
1427 static void gicv3_its_register_types(void)
1429 type_register_static(&gicv3_its_info
);
1432 type_init(gicv3_its_register_types
)