tcg/ppc: Use the Set Boolean Extension
[qemu/kevin.git] / hw / intc / arm_gicv3_its.c
blob43dfd7a35c796c2672bb71fe4d35217a56de622d
1 /*
2 * ITS emulation for a GICv3-based system
4 * Copyright Linaro.org 2021
6 * Authors:
7 * Shashi Mallela <shashi.mallela@linaro.org>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10 * option) any later version. See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "trace.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
23 typedef struct GICv3ITSClass GICv3ITSClass;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
26 ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
28 struct GICv3ITSClass {
29 GICv3ITSCommonClass parent_class;
30 ResettablePhases parent_phases;
34 * This is an internal enum used to distinguish between LPI triggered
35 * via command queue and LPI triggered via gits_translater write.
37 typedef enum ItsCmdType {
38 NONE = 0, /* internal indication for GITS_TRANSLATER write */
39 CLEAR = 1,
40 DISCARD = 2,
41 INTERRUPT = 3,
42 } ItsCmdType;
44 typedef struct DTEntry {
45 bool valid;
46 unsigned size;
47 uint64_t ittaddr;
48 } DTEntry;
50 typedef struct CTEntry {
51 bool valid;
52 uint32_t rdbase;
53 } CTEntry;
55 typedef struct ITEntry {
56 bool valid;
57 int inttype;
58 uint32_t intid;
59 uint32_t doorbell;
60 uint32_t icid;
61 uint32_t vpeid;
62 } ITEntry;
64 typedef struct VTEntry {
65 bool valid;
66 unsigned vptsize;
67 uint32_t rdbase;
68 uint64_t vptaddr;
69 } VTEntry;
72 * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
73 * if a command parameter is not correct. These include both "stall
74 * processing of the command queue" and "ignore this command, and
75 * keep processing the queue". In our implementation we choose that
76 * memory transaction errors reading the command packet provoke a
77 * stall, but errors in parameters cause us to ignore the command
78 * and continue processing.
79 * The process_* functions which handle individual ITS commands all
80 * return an ItsCmdResult which tells process_cmdq() whether it should
81 * stall, keep going because of an error, or keep going because the
82 * command was a success.
84 typedef enum ItsCmdResult {
85 CMD_STALL = 0,
86 CMD_CONTINUE = 1,
87 CMD_CONTINUE_OK = 2,
88 } ItsCmdResult;
90 /* True if the ITS supports the GICv4 virtual LPI feature */
91 static bool its_feature_virtual(GICv3ITSState *s)
93 return s->typer & R_GITS_TYPER_VIRTUAL_MASK;
96 static inline bool intid_in_lpi_range(uint32_t id)
98 return id >= GICV3_LPI_INTID_START &&
99 id < (1 << (GICD_TYPER_IDBITS + 1));
102 static inline bool valid_doorbell(uint32_t id)
104 /* Doorbell fields may be an LPI, or 1023 to mean "no doorbell" */
105 return id == INTID_SPURIOUS || intid_in_lpi_range(id);
108 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
110 uint64_t result = 0;
112 switch (page_sz) {
113 case GITS_PAGE_SIZE_4K:
114 case GITS_PAGE_SIZE_16K:
115 result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
116 break;
118 case GITS_PAGE_SIZE_64K:
119 result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
120 result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
121 break;
123 default:
124 break;
126 return result;
129 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
130 uint32_t idx, MemTxResult *res)
133 * Given a TableDesc describing one of the ITS in-guest-memory
134 * tables and an index into it, return the guest address
135 * corresponding to that table entry.
136 * If there was a memory error reading the L1 table of an
137 * indirect table, *res is set accordingly, and we return -1.
138 * If the L1 table entry is marked not valid, we return -1 with
139 * *res set to MEMTX_OK.
141 * The specification defines the format of level 1 entries of a
142 * 2-level table, but the format of level 2 entries and the format
143 * of flat-mapped tables is IMPDEF.
145 AddressSpace *as = &s->gicv3->dma_as;
146 uint32_t l2idx;
147 uint64_t l2;
148 uint32_t num_l2_entries;
150 *res = MEMTX_OK;
152 if (!td->indirect) {
153 /* Single level table */
154 return td->base_addr + idx * td->entry_sz;
157 /* Two level table */
158 l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
160 l2 = address_space_ldq_le(as,
161 td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
162 MEMTXATTRS_UNSPECIFIED, res);
163 if (*res != MEMTX_OK) {
164 return -1;
166 if (!(l2 & L2_TABLE_VALID_MASK)) {
167 return -1;
170 num_l2_entries = td->page_sz / td->entry_sz;
171 return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
175 * Read the Collection Table entry at index @icid. On success (including
176 * successfully determining that there is no valid CTE for this index),
177 * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
178 * If there is an error reading memory then we return the error code.
180 static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
182 AddressSpace *as = &s->gicv3->dma_as;
183 MemTxResult res = MEMTX_OK;
184 uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
185 uint64_t cteval;
187 if (entry_addr == -1) {
188 /* No L2 table entry, i.e. no valid CTE, or a memory error */
189 cte->valid = false;
190 goto out;
193 cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
194 if (res != MEMTX_OK) {
195 goto out;
197 cte->valid = FIELD_EX64(cteval, CTE, VALID);
198 cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
199 out:
200 if (res != MEMTX_OK) {
201 trace_gicv3_its_cte_read_fault(icid);
202 } else {
203 trace_gicv3_its_cte_read(icid, cte->valid, cte->rdbase);
205 return res;
209 * Update the Interrupt Table entry at index @evinted in the table specified
210 * by the dte @dte. Returns true on success, false if there was a memory
211 * access error.
213 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
214 const ITEntry *ite)
216 AddressSpace *as = &s->gicv3->dma_as;
217 MemTxResult res = MEMTX_OK;
218 hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
219 uint64_t itel = 0;
220 uint32_t iteh = 0;
222 trace_gicv3_its_ite_write(dte->ittaddr, eventid, ite->valid,
223 ite->inttype, ite->intid, ite->icid,
224 ite->vpeid, ite->doorbell);
226 if (ite->valid) {
227 itel = FIELD_DP64(itel, ITE_L, VALID, 1);
228 itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
229 itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
230 itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
231 itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
232 iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
235 address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
236 if (res != MEMTX_OK) {
237 return false;
239 address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
240 return res == MEMTX_OK;
244 * Read the Interrupt Table entry at index @eventid from the table specified
245 * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
246 * struct @ite accordingly. If there is an error reading memory then we return
247 * the error code.
249 static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
250 const DTEntry *dte, ITEntry *ite)
252 AddressSpace *as = &s->gicv3->dma_as;
253 MemTxResult res = MEMTX_OK;
254 uint64_t itel;
255 uint32_t iteh;
256 hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
258 itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
259 if (res != MEMTX_OK) {
260 trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
261 return res;
264 iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
265 if (res != MEMTX_OK) {
266 trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
267 return res;
270 ite->valid = FIELD_EX64(itel, ITE_L, VALID);
271 ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
272 ite->intid = FIELD_EX64(itel, ITE_L, INTID);
273 ite->icid = FIELD_EX64(itel, ITE_L, ICID);
274 ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
275 ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
276 trace_gicv3_its_ite_read(dte->ittaddr, eventid, ite->valid,
277 ite->inttype, ite->intid, ite->icid,
278 ite->vpeid, ite->doorbell);
279 return MEMTX_OK;
283 * Read the Device Table entry at index @devid. On success (including
284 * successfully determining that there is no valid DTE for this index),
285 * we return MEMTX_OK and populate the DTEntry struct accordingly.
286 * If there is an error reading memory then we return the error code.
288 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
290 MemTxResult res = MEMTX_OK;
291 AddressSpace *as = &s->gicv3->dma_as;
292 uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
293 uint64_t dteval;
295 if (entry_addr == -1) {
296 /* No L2 table entry, i.e. no valid DTE, or a memory error */
297 dte->valid = false;
298 goto out;
300 dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
301 if (res != MEMTX_OK) {
302 goto out;
304 dte->valid = FIELD_EX64(dteval, DTE, VALID);
305 dte->size = FIELD_EX64(dteval, DTE, SIZE);
306 /* DTE word field stores bits [51:8] of the ITT address */
307 dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
308 out:
309 if (res != MEMTX_OK) {
310 trace_gicv3_its_dte_read_fault(devid);
311 } else {
312 trace_gicv3_its_dte_read(devid, dte->valid, dte->size, dte->ittaddr);
314 return res;
318 * Read the vPE Table entry at index @vpeid. On success (including
319 * successfully determining that there is no valid entry for this index),
320 * we return MEMTX_OK and populate the VTEntry struct accordingly.
321 * If there is an error reading memory then we return the error code.
323 static MemTxResult get_vte(GICv3ITSState *s, uint32_t vpeid, VTEntry *vte)
325 MemTxResult res = MEMTX_OK;
326 AddressSpace *as = &s->gicv3->dma_as;
327 uint64_t entry_addr = table_entry_addr(s, &s->vpet, vpeid, &res);
328 uint64_t vteval;
330 if (entry_addr == -1) {
331 /* No L2 table entry, i.e. no valid VTE, or a memory error */
332 vte->valid = false;
333 goto out;
335 vteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
336 if (res != MEMTX_OK) {
337 goto out;
339 vte->valid = FIELD_EX64(vteval, VTE, VALID);
340 vte->vptsize = FIELD_EX64(vteval, VTE, VPTSIZE);
341 vte->vptaddr = FIELD_EX64(vteval, VTE, VPTADDR);
342 vte->rdbase = FIELD_EX64(vteval, VTE, RDBASE);
343 out:
344 if (res != MEMTX_OK) {
345 trace_gicv3_its_vte_read_fault(vpeid);
346 } else {
347 trace_gicv3_its_vte_read(vpeid, vte->valid, vte->vptsize,
348 vte->vptaddr, vte->rdbase);
350 return res;
354 * Given a (DeviceID, EventID), look up the corresponding ITE, including
355 * checking for the various invalid-value cases. If we find a valid ITE,
356 * fill in @ite and @dte and return CMD_CONTINUE_OK. Otherwise return
357 * CMD_STALL or CMD_CONTINUE as appropriate (and the contents of @ite
358 * should not be relied on).
360 * The string @who is purely for the LOG_GUEST_ERROR messages,
361 * and should indicate the name of the calling function or similar.
363 static ItsCmdResult lookup_ite(GICv3ITSState *s, const char *who,
364 uint32_t devid, uint32_t eventid, ITEntry *ite,
365 DTEntry *dte)
367 uint64_t num_eventids;
369 if (devid >= s->dt.num_entries) {
370 qemu_log_mask(LOG_GUEST_ERROR,
371 "%s: invalid command attributes: devid %d>=%d",
372 who, devid, s->dt.num_entries);
373 return CMD_CONTINUE;
376 if (get_dte(s, devid, dte) != MEMTX_OK) {
377 return CMD_STALL;
379 if (!dte->valid) {
380 qemu_log_mask(LOG_GUEST_ERROR,
381 "%s: invalid command attributes: "
382 "invalid dte for %d\n", who, devid);
383 return CMD_CONTINUE;
386 num_eventids = 1ULL << (dte->size + 1);
387 if (eventid >= num_eventids) {
388 qemu_log_mask(LOG_GUEST_ERROR,
389 "%s: invalid command attributes: eventid %d >= %"
390 PRId64 "\n", who, eventid, num_eventids);
391 return CMD_CONTINUE;
394 if (get_ite(s, eventid, dte, ite) != MEMTX_OK) {
395 return CMD_STALL;
398 if (!ite->valid) {
399 qemu_log_mask(LOG_GUEST_ERROR,
400 "%s: invalid command attributes: invalid ITE\n", who);
401 return CMD_CONTINUE;
404 return CMD_CONTINUE_OK;
408 * Given an ICID, look up the corresponding CTE, including checking for various
409 * invalid-value cases. If we find a valid CTE, fill in @cte and return
410 * CMD_CONTINUE_OK; otherwise return CMD_STALL or CMD_CONTINUE (and the
411 * contents of @cte should not be relied on).
413 * The string @who is purely for the LOG_GUEST_ERROR messages,
414 * and should indicate the name of the calling function or similar.
416 static ItsCmdResult lookup_cte(GICv3ITSState *s, const char *who,
417 uint32_t icid, CTEntry *cte)
419 if (icid >= s->ct.num_entries) {
420 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid ICID 0x%x\n", who, icid);
421 return CMD_CONTINUE;
423 if (get_cte(s, icid, cte) != MEMTX_OK) {
424 return CMD_STALL;
426 if (!cte->valid) {
427 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid CTE\n", who);
428 return CMD_CONTINUE;
430 if (cte->rdbase >= s->gicv3->num_cpu) {
431 return CMD_CONTINUE;
433 return CMD_CONTINUE_OK;
437 * Given a VPEID, look up the corresponding VTE, including checking
438 * for various invalid-value cases. if we find a valid VTE, fill in @vte
439 * and return CMD_CONTINUE_OK; otherwise return CMD_STALL or CMD_CONTINUE
440 * (and the contents of @vte should not be relied on).
442 * The string @who is purely for the LOG_GUEST_ERROR messages,
443 * and should indicate the name of the calling function or similar.
445 static ItsCmdResult lookup_vte(GICv3ITSState *s, const char *who,
446 uint32_t vpeid, VTEntry *vte)
448 if (vpeid >= s->vpet.num_entries) {
449 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid VPEID 0x%x\n", who, vpeid);
450 return CMD_CONTINUE;
453 if (get_vte(s, vpeid, vte) != MEMTX_OK) {
454 return CMD_STALL;
456 if (!vte->valid) {
457 qemu_log_mask(LOG_GUEST_ERROR,
458 "%s: invalid VTE for VPEID 0x%x\n", who, vpeid);
459 return CMD_CONTINUE;
462 if (vte->rdbase >= s->gicv3->num_cpu) {
463 return CMD_CONTINUE;
465 return CMD_CONTINUE_OK;
468 static ItsCmdResult process_its_cmd_phys(GICv3ITSState *s, const ITEntry *ite,
469 int irqlevel)
471 CTEntry cte;
472 ItsCmdResult cmdres;
474 cmdres = lookup_cte(s, __func__, ite->icid, &cte);
475 if (cmdres != CMD_CONTINUE_OK) {
476 return cmdres;
478 gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite->intid, irqlevel);
479 return CMD_CONTINUE_OK;
482 static ItsCmdResult process_its_cmd_virt(GICv3ITSState *s, const ITEntry *ite,
483 int irqlevel)
485 VTEntry vte;
486 ItsCmdResult cmdres;
488 cmdres = lookup_vte(s, __func__, ite->vpeid, &vte);
489 if (cmdres != CMD_CONTINUE_OK) {
490 return cmdres;
493 if (!intid_in_lpi_range(ite->intid) ||
494 ite->intid >= (1ULL << (vte.vptsize + 1))) {
495 qemu_log_mask(LOG_GUEST_ERROR, "%s: intid 0x%x out of range\n",
496 __func__, ite->intid);
497 return CMD_CONTINUE;
501 * For QEMU the actual pending of the vLPI is handled in the
502 * redistributor code
504 gicv3_redist_process_vlpi(&s->gicv3->cpu[vte.rdbase], ite->intid,
505 vte.vptaddr << 16, ite->doorbell, irqlevel);
506 return CMD_CONTINUE_OK;
510 * This function handles the processing of following commands based on
511 * the ItsCmdType parameter passed:-
512 * 1. triggering of lpi interrupt translation via ITS INT command
513 * 2. triggering of lpi interrupt translation via gits_translater register
514 * 3. handling of ITS CLEAR command
515 * 4. handling of ITS DISCARD command
517 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
518 uint32_t eventid, ItsCmdType cmd)
520 DTEntry dte;
521 ITEntry ite;
522 ItsCmdResult cmdres;
523 int irqlevel;
525 cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte);
526 if (cmdres != CMD_CONTINUE_OK) {
527 return cmdres;
530 irqlevel = (cmd == CLEAR || cmd == DISCARD) ? 0 : 1;
532 switch (ite.inttype) {
533 case ITE_INTTYPE_PHYSICAL:
534 cmdres = process_its_cmd_phys(s, &ite, irqlevel);
535 break;
536 case ITE_INTTYPE_VIRTUAL:
537 if (!its_feature_virtual(s)) {
538 /* Can't happen unless guest is illegally writing to table memory */
539 qemu_log_mask(LOG_GUEST_ERROR,
540 "%s: invalid type %d in ITE (table corrupted?)\n",
541 __func__, ite.inttype);
542 return CMD_CONTINUE;
544 cmdres = process_its_cmd_virt(s, &ite, irqlevel);
545 break;
546 default:
547 g_assert_not_reached();
550 if (cmdres == CMD_CONTINUE_OK && cmd == DISCARD) {
551 ITEntry ite = {};
552 /* remove mapping from interrupt translation table */
553 ite.valid = false;
554 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
556 return CMD_CONTINUE_OK;
559 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
560 ItsCmdType cmd)
562 uint32_t devid, eventid;
564 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
565 eventid = cmdpkt[1] & EVENTID_MASK;
566 switch (cmd) {
567 case INTERRUPT:
568 trace_gicv3_its_cmd_int(devid, eventid);
569 break;
570 case CLEAR:
571 trace_gicv3_its_cmd_clear(devid, eventid);
572 break;
573 case DISCARD:
574 trace_gicv3_its_cmd_discard(devid, eventid);
575 break;
576 default:
577 g_assert_not_reached();
579 return do_process_its_cmd(s, devid, eventid, cmd);
582 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
583 bool ignore_pInt)
585 uint32_t devid, eventid;
586 uint32_t pIntid = 0;
587 uint64_t num_eventids;
588 uint16_t icid = 0;
589 DTEntry dte;
590 ITEntry ite;
592 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
593 eventid = cmdpkt[1] & EVENTID_MASK;
594 icid = cmdpkt[2] & ICID_MASK;
596 if (ignore_pInt) {
597 pIntid = eventid;
598 trace_gicv3_its_cmd_mapi(devid, eventid, icid);
599 } else {
600 pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
601 trace_gicv3_its_cmd_mapti(devid, eventid, icid, pIntid);
604 if (devid >= s->dt.num_entries) {
605 qemu_log_mask(LOG_GUEST_ERROR,
606 "%s: invalid command attributes: devid %d>=%d",
607 __func__, devid, s->dt.num_entries);
608 return CMD_CONTINUE;
611 if (get_dte(s, devid, &dte) != MEMTX_OK) {
612 return CMD_STALL;
614 num_eventids = 1ULL << (dte.size + 1);
616 if (icid >= s->ct.num_entries) {
617 qemu_log_mask(LOG_GUEST_ERROR,
618 "%s: invalid ICID 0x%x >= 0x%x\n",
619 __func__, icid, s->ct.num_entries);
620 return CMD_CONTINUE;
623 if (!dte.valid) {
624 qemu_log_mask(LOG_GUEST_ERROR,
625 "%s: no valid DTE for devid 0x%x\n", __func__, devid);
626 return CMD_CONTINUE;
629 if (eventid >= num_eventids) {
630 qemu_log_mask(LOG_GUEST_ERROR,
631 "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n",
632 __func__, eventid, num_eventids);
633 return CMD_CONTINUE;
636 if (!intid_in_lpi_range(pIntid)) {
637 qemu_log_mask(LOG_GUEST_ERROR,
638 "%s: invalid interrupt ID 0x%x\n", __func__, pIntid);
639 return CMD_CONTINUE;
642 /* add ite entry to interrupt translation table */
643 ite.valid = true;
644 ite.inttype = ITE_INTTYPE_PHYSICAL;
645 ite.intid = pIntid;
646 ite.icid = icid;
647 ite.doorbell = INTID_SPURIOUS;
648 ite.vpeid = 0;
649 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
652 static ItsCmdResult process_vmapti(GICv3ITSState *s, const uint64_t *cmdpkt,
653 bool ignore_vintid)
655 uint32_t devid, eventid, vintid, doorbell, vpeid;
656 uint32_t num_eventids;
657 DTEntry dte;
658 ITEntry ite;
660 if (!its_feature_virtual(s)) {
661 return CMD_CONTINUE;
664 devid = FIELD_EX64(cmdpkt[0], VMAPTI_0, DEVICEID);
665 eventid = FIELD_EX64(cmdpkt[1], VMAPTI_1, EVENTID);
666 vpeid = FIELD_EX64(cmdpkt[1], VMAPTI_1, VPEID);
667 doorbell = FIELD_EX64(cmdpkt[2], VMAPTI_2, DOORBELL);
668 if (ignore_vintid) {
669 vintid = eventid;
670 trace_gicv3_its_cmd_vmapi(devid, eventid, vpeid, doorbell);
671 } else {
672 vintid = FIELD_EX64(cmdpkt[2], VMAPTI_2, VINTID);
673 trace_gicv3_its_cmd_vmapti(devid, eventid, vpeid, vintid, doorbell);
676 if (devid >= s->dt.num_entries) {
677 qemu_log_mask(LOG_GUEST_ERROR,
678 "%s: invalid DeviceID 0x%x (must be less than 0x%x)\n",
679 __func__, devid, s->dt.num_entries);
680 return CMD_CONTINUE;
683 if (get_dte(s, devid, &dte) != MEMTX_OK) {
684 return CMD_STALL;
687 if (!dte.valid) {
688 qemu_log_mask(LOG_GUEST_ERROR,
689 "%s: no entry in device table for DeviceID 0x%x\n",
690 __func__, devid);
691 return CMD_CONTINUE;
694 num_eventids = 1ULL << (dte.size + 1);
696 if (eventid >= num_eventids) {
697 qemu_log_mask(LOG_GUEST_ERROR,
698 "%s: EventID 0x%x too large for DeviceID 0x%x "
699 "(must be less than 0x%x)\n",
700 __func__, eventid, devid, num_eventids);
701 return CMD_CONTINUE;
703 if (!intid_in_lpi_range(vintid)) {
704 qemu_log_mask(LOG_GUEST_ERROR,
705 "%s: VIntID 0x%x not a valid LPI\n",
706 __func__, vintid);
707 return CMD_CONTINUE;
709 if (!valid_doorbell(doorbell)) {
710 qemu_log_mask(LOG_GUEST_ERROR,
711 "%s: Doorbell %d not 1023 and not a valid LPI\n",
712 __func__, doorbell);
713 return CMD_CONTINUE;
715 if (vpeid >= s->vpet.num_entries) {
716 qemu_log_mask(LOG_GUEST_ERROR,
717 "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
718 __func__, vpeid, s->vpet.num_entries);
719 return CMD_CONTINUE;
721 /* add ite entry to interrupt translation table */
722 ite.valid = true;
723 ite.inttype = ITE_INTTYPE_VIRTUAL;
724 ite.intid = vintid;
725 ite.icid = 0;
726 ite.doorbell = doorbell;
727 ite.vpeid = vpeid;
728 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
732 * Update the Collection Table entry for @icid to @cte. Returns true
733 * on success, false if there was a memory access error.
735 static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
737 AddressSpace *as = &s->gicv3->dma_as;
738 uint64_t entry_addr;
739 uint64_t cteval = 0;
740 MemTxResult res = MEMTX_OK;
742 trace_gicv3_its_cte_write(icid, cte->valid, cte->rdbase);
744 if (cte->valid) {
745 /* add mapping entry to collection table */
746 cteval = FIELD_DP64(cteval, CTE, VALID, 1);
747 cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
750 entry_addr = table_entry_addr(s, &s->ct, icid, &res);
751 if (res != MEMTX_OK) {
752 /* memory access error: stall */
753 return false;
755 if (entry_addr == -1) {
756 /* No L2 table for this index: discard write and continue */
757 return true;
760 address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
761 return res == MEMTX_OK;
764 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
766 uint16_t icid;
767 CTEntry cte;
769 icid = cmdpkt[2] & ICID_MASK;
770 cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
771 if (cte.valid) {
772 cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
773 cte.rdbase &= RDBASE_PROCNUM_MASK;
774 } else {
775 cte.rdbase = 0;
777 trace_gicv3_its_cmd_mapc(icid, cte.rdbase, cte.valid);
779 if (icid >= s->ct.num_entries) {
780 qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%x\n", icid);
781 return CMD_CONTINUE;
783 if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) {
784 qemu_log_mask(LOG_GUEST_ERROR,
785 "ITS MAPC: invalid RDBASE %u\n", cte.rdbase);
786 return CMD_CONTINUE;
789 return update_cte(s, icid, &cte) ? CMD_CONTINUE_OK : CMD_STALL;
793 * Update the Device Table entry for @devid to @dte. Returns true
794 * on success, false if there was a memory access error.
796 static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
798 AddressSpace *as = &s->gicv3->dma_as;
799 uint64_t entry_addr;
800 uint64_t dteval = 0;
801 MemTxResult res = MEMTX_OK;
803 trace_gicv3_its_dte_write(devid, dte->valid, dte->size, dte->ittaddr);
805 if (dte->valid) {
806 /* add mapping entry to device table */
807 dteval = FIELD_DP64(dteval, DTE, VALID, 1);
808 dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
809 dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
812 entry_addr = table_entry_addr(s, &s->dt, devid, &res);
813 if (res != MEMTX_OK) {
814 /* memory access error: stall */
815 return false;
817 if (entry_addr == -1) {
818 /* No L2 table for this index: discard write and continue */
819 return true;
821 address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
822 return res == MEMTX_OK;
825 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
827 uint32_t devid;
828 DTEntry dte;
830 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
831 dte.size = cmdpkt[1] & SIZE_MASK;
832 dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
833 dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
835 trace_gicv3_its_cmd_mapd(devid, dte.size, dte.ittaddr, dte.valid);
837 if (devid >= s->dt.num_entries) {
838 qemu_log_mask(LOG_GUEST_ERROR,
839 "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
840 devid, s->dt.num_entries);
841 return CMD_CONTINUE;
844 if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
845 qemu_log_mask(LOG_GUEST_ERROR,
846 "ITS MAPD: invalid size %d\n", dte.size);
847 return CMD_CONTINUE;
850 return update_dte(s, devid, &dte) ? CMD_CONTINUE_OK : CMD_STALL;
853 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
855 uint64_t rd1, rd2;
857 rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
858 rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
860 trace_gicv3_its_cmd_movall(rd1, rd2);
862 if (rd1 >= s->gicv3->num_cpu) {
863 qemu_log_mask(LOG_GUEST_ERROR,
864 "%s: RDBASE1 %" PRId64
865 " out of range (must be less than %d)\n",
866 __func__, rd1, s->gicv3->num_cpu);
867 return CMD_CONTINUE;
869 if (rd2 >= s->gicv3->num_cpu) {
870 qemu_log_mask(LOG_GUEST_ERROR,
871 "%s: RDBASE2 %" PRId64
872 " out of range (must be less than %d)\n",
873 __func__, rd2, s->gicv3->num_cpu);
874 return CMD_CONTINUE;
877 if (rd1 == rd2) {
878 /* Move to same target must succeed as a no-op */
879 return CMD_CONTINUE_OK;
882 /* Move all pending LPIs from redistributor 1 to redistributor 2 */
883 gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
885 return CMD_CONTINUE_OK;
888 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
890 uint32_t devid, eventid;
891 uint16_t new_icid;
892 DTEntry dte;
893 CTEntry old_cte, new_cte;
894 ITEntry old_ite;
895 ItsCmdResult cmdres;
897 devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
898 eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
899 new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
901 trace_gicv3_its_cmd_movi(devid, eventid, new_icid);
903 cmdres = lookup_ite(s, __func__, devid, eventid, &old_ite, &dte);
904 if (cmdres != CMD_CONTINUE_OK) {
905 return cmdres;
908 if (old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
909 qemu_log_mask(LOG_GUEST_ERROR,
910 "%s: invalid command attributes: invalid ITE\n",
911 __func__);
912 return CMD_CONTINUE;
915 cmdres = lookup_cte(s, __func__, old_ite.icid, &old_cte);
916 if (cmdres != CMD_CONTINUE_OK) {
917 return cmdres;
919 cmdres = lookup_cte(s, __func__, new_icid, &new_cte);
920 if (cmdres != CMD_CONTINUE_OK) {
921 return cmdres;
924 if (old_cte.rdbase != new_cte.rdbase) {
925 /* Move the LPI from the old redistributor to the new one */
926 gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
927 &s->gicv3->cpu[new_cte.rdbase],
928 old_ite.intid);
931 /* Update the ICID field in the interrupt translation table entry */
932 old_ite.icid = new_icid;
933 return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE_OK : CMD_STALL;
937 * Update the vPE Table entry at index @vpeid with the entry @vte.
938 * Returns true on success, false if there was a memory access error.
940 static bool update_vte(GICv3ITSState *s, uint32_t vpeid, const VTEntry *vte)
942 AddressSpace *as = &s->gicv3->dma_as;
943 uint64_t entry_addr;
944 uint64_t vteval = 0;
945 MemTxResult res = MEMTX_OK;
947 trace_gicv3_its_vte_write(vpeid, vte->valid, vte->vptsize, vte->vptaddr,
948 vte->rdbase);
950 if (vte->valid) {
951 vteval = FIELD_DP64(vteval, VTE, VALID, 1);
952 vteval = FIELD_DP64(vteval, VTE, VPTSIZE, vte->vptsize);
953 vteval = FIELD_DP64(vteval, VTE, VPTADDR, vte->vptaddr);
954 vteval = FIELD_DP64(vteval, VTE, RDBASE, vte->rdbase);
957 entry_addr = table_entry_addr(s, &s->vpet, vpeid, &res);
958 if (res != MEMTX_OK) {
959 return false;
961 if (entry_addr == -1) {
962 /* No L2 table for this index: discard write and continue */
963 return true;
965 address_space_stq_le(as, entry_addr, vteval, MEMTXATTRS_UNSPECIFIED, &res);
966 return res == MEMTX_OK;
969 static ItsCmdResult process_vmapp(GICv3ITSState *s, const uint64_t *cmdpkt)
971 VTEntry vte;
972 uint32_t vpeid;
974 if (!its_feature_virtual(s)) {
975 return CMD_CONTINUE;
978 vpeid = FIELD_EX64(cmdpkt[1], VMAPP_1, VPEID);
979 vte.rdbase = FIELD_EX64(cmdpkt[2], VMAPP_2, RDBASE);
980 vte.valid = FIELD_EX64(cmdpkt[2], VMAPP_2, V);
981 vte.vptsize = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTSIZE);
982 vte.vptaddr = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTADDR);
984 trace_gicv3_its_cmd_vmapp(vpeid, vte.rdbase, vte.valid,
985 vte.vptaddr, vte.vptsize);
988 * For GICv4.0 the VPT_size field is only 5 bits, whereas we
989 * define our field macros to include the full GICv4.1 8 bits.
990 * The range check on VPT_size will catch the cases where
991 * the guest set the RES0-in-GICv4.0 bits [7:6].
993 if (vte.vptsize > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
994 qemu_log_mask(LOG_GUEST_ERROR,
995 "%s: invalid VPT_size 0x%x\n", __func__, vte.vptsize);
996 return CMD_CONTINUE;
999 if (vte.valid && vte.rdbase >= s->gicv3->num_cpu) {
1000 qemu_log_mask(LOG_GUEST_ERROR,
1001 "%s: invalid rdbase 0x%x\n", __func__, vte.rdbase);
1002 return CMD_CONTINUE;
1005 if (vpeid >= s->vpet.num_entries) {
1006 qemu_log_mask(LOG_GUEST_ERROR,
1007 "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
1008 __func__, vpeid, s->vpet.num_entries);
1009 return CMD_CONTINUE;
1012 return update_vte(s, vpeid, &vte) ? CMD_CONTINUE_OK : CMD_STALL;
1015 typedef struct VmovpCallbackData {
1016 uint64_t rdbase;
1017 uint32_t vpeid;
1019 * Overall command result. If more than one callback finds an
1020 * error, STALL beats CONTINUE.
1022 ItsCmdResult result;
1023 } VmovpCallbackData;
1025 static void vmovp_callback(gpointer data, gpointer opaque)
1028 * This function is called to update the VPEID field in a VPE
1029 * table entry for this ITS. This might be because of a VMOVP
1030 * command executed on any ITS that is connected to the same GIC
1031 * as this ITS. We need to read the VPE table entry for the VPEID
1032 * and update its RDBASE field.
1034 GICv3ITSState *s = data;
1035 VmovpCallbackData *cbdata = opaque;
1036 VTEntry vte;
1037 ItsCmdResult cmdres;
1039 cmdres = lookup_vte(s, __func__, cbdata->vpeid, &vte);
1040 switch (cmdres) {
1041 case CMD_STALL:
1042 cbdata->result = CMD_STALL;
1043 return;
1044 case CMD_CONTINUE:
1045 if (cbdata->result != CMD_STALL) {
1046 cbdata->result = CMD_CONTINUE;
1048 return;
1049 case CMD_CONTINUE_OK:
1050 break;
1053 vte.rdbase = cbdata->rdbase;
1054 if (!update_vte(s, cbdata->vpeid, &vte)) {
1055 cbdata->result = CMD_STALL;
1059 static ItsCmdResult process_vmovp(GICv3ITSState *s, const uint64_t *cmdpkt)
1061 VmovpCallbackData cbdata;
1063 if (!its_feature_virtual(s)) {
1064 return CMD_CONTINUE;
1067 cbdata.vpeid = FIELD_EX64(cmdpkt[1], VMOVP_1, VPEID);
1068 cbdata.rdbase = FIELD_EX64(cmdpkt[2], VMOVP_2, RDBASE);
1070 trace_gicv3_its_cmd_vmovp(cbdata.vpeid, cbdata.rdbase);
1072 if (cbdata.rdbase >= s->gicv3->num_cpu) {
1073 return CMD_CONTINUE;
1077 * Our ITS implementation reports GITS_TYPER.VMOVP == 1, which means
1078 * that when the VMOVP command is executed on an ITS to change the
1079 * VPEID field in a VPE table entry the change must be propagated
1080 * to all the ITSes connected to the same GIC.
1082 cbdata.result = CMD_CONTINUE_OK;
1083 gicv3_foreach_its(s->gicv3, vmovp_callback, &cbdata);
1084 return cbdata.result;
1087 static ItsCmdResult process_vmovi(GICv3ITSState *s, const uint64_t *cmdpkt)
1089 uint32_t devid, eventid, vpeid, doorbell;
1090 bool doorbell_valid;
1091 DTEntry dte;
1092 ITEntry ite;
1093 VTEntry old_vte, new_vte;
1094 ItsCmdResult cmdres;
1096 if (!its_feature_virtual(s)) {
1097 return CMD_CONTINUE;
1100 devid = FIELD_EX64(cmdpkt[0], VMOVI_0, DEVICEID);
1101 eventid = FIELD_EX64(cmdpkt[1], VMOVI_1, EVENTID);
1102 vpeid = FIELD_EX64(cmdpkt[1], VMOVI_1, VPEID);
1103 doorbell_valid = FIELD_EX64(cmdpkt[2], VMOVI_2, D);
1104 doorbell = FIELD_EX64(cmdpkt[2], VMOVI_2, DOORBELL);
1106 trace_gicv3_its_cmd_vmovi(devid, eventid, vpeid, doorbell_valid, doorbell);
1108 if (doorbell_valid && !valid_doorbell(doorbell)) {
1109 qemu_log_mask(LOG_GUEST_ERROR,
1110 "%s: invalid doorbell 0x%x\n", __func__, doorbell);
1111 return CMD_CONTINUE;
1114 cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte);
1115 if (cmdres != CMD_CONTINUE_OK) {
1116 return cmdres;
1119 if (ite.inttype != ITE_INTTYPE_VIRTUAL) {
1120 qemu_log_mask(LOG_GUEST_ERROR, "%s: ITE is not for virtual interrupt\n",
1121 __func__);
1122 return CMD_CONTINUE;
1125 cmdres = lookup_vte(s, __func__, ite.vpeid, &old_vte);
1126 if (cmdres != CMD_CONTINUE_OK) {
1127 return cmdres;
1129 cmdres = lookup_vte(s, __func__, vpeid, &new_vte);
1130 if (cmdres != CMD_CONTINUE_OK) {
1131 return cmdres;
1134 if (!intid_in_lpi_range(ite.intid) ||
1135 ite.intid >= (1ULL << (old_vte.vptsize + 1)) ||
1136 ite.intid >= (1ULL << (new_vte.vptsize + 1))) {
1137 qemu_log_mask(LOG_GUEST_ERROR,
1138 "%s: ITE intid 0x%x out of range\n",
1139 __func__, ite.intid);
1140 return CMD_CONTINUE;
1143 ite.vpeid = vpeid;
1144 if (doorbell_valid) {
1145 ite.doorbell = doorbell;
1149 * Move the LPI from the old redistributor to the new one. We don't
1150 * need to do anything if the guest somehow specified the
1151 * same pending table for source and destination.
1153 if (old_vte.vptaddr != new_vte.vptaddr) {
1154 gicv3_redist_mov_vlpi(&s->gicv3->cpu[old_vte.rdbase],
1155 old_vte.vptaddr << 16,
1156 &s->gicv3->cpu[new_vte.rdbase],
1157 new_vte.vptaddr << 16,
1158 ite.intid,
1159 ite.doorbell);
1162 /* Update the ITE to the new VPEID and possibly doorbell values */
1163 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
1166 static ItsCmdResult process_vinvall(GICv3ITSState *s, const uint64_t *cmdpkt)
1168 VTEntry vte;
1169 uint32_t vpeid;
1170 ItsCmdResult cmdres;
1172 if (!its_feature_virtual(s)) {
1173 return CMD_CONTINUE;
1176 vpeid = FIELD_EX64(cmdpkt[1], VINVALL_1, VPEID);
1178 trace_gicv3_its_cmd_vinvall(vpeid);
1180 cmdres = lookup_vte(s, __func__, vpeid, &vte);
1181 if (cmdres != CMD_CONTINUE_OK) {
1182 return cmdres;
1185 gicv3_redist_vinvall(&s->gicv3->cpu[vte.rdbase], vte.vptaddr << 16);
1186 return CMD_CONTINUE_OK;
1189 static ItsCmdResult process_inv(GICv3ITSState *s, const uint64_t *cmdpkt)
1191 uint32_t devid, eventid;
1192 ITEntry ite;
1193 DTEntry dte;
1194 CTEntry cte;
1195 VTEntry vte;
1196 ItsCmdResult cmdres;
1198 devid = FIELD_EX64(cmdpkt[0], INV_0, DEVICEID);
1199 eventid = FIELD_EX64(cmdpkt[1], INV_1, EVENTID);
1201 trace_gicv3_its_cmd_inv(devid, eventid);
1203 cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte);
1204 if (cmdres != CMD_CONTINUE_OK) {
1205 return cmdres;
1208 switch (ite.inttype) {
1209 case ITE_INTTYPE_PHYSICAL:
1210 cmdres = lookup_cte(s, __func__, ite.icid, &cte);
1211 if (cmdres != CMD_CONTINUE_OK) {
1212 return cmdres;
1214 gicv3_redist_inv_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid);
1215 break;
1216 case ITE_INTTYPE_VIRTUAL:
1217 if (!its_feature_virtual(s)) {
1218 /* Can't happen unless guest is illegally writing to table memory */
1219 qemu_log_mask(LOG_GUEST_ERROR,
1220 "%s: invalid type %d in ITE (table corrupted?)\n",
1221 __func__, ite.inttype);
1222 return CMD_CONTINUE;
1225 cmdres = lookup_vte(s, __func__, ite.vpeid, &vte);
1226 if (cmdres != CMD_CONTINUE_OK) {
1227 return cmdres;
1229 if (!intid_in_lpi_range(ite.intid) ||
1230 ite.intid >= (1ULL << (vte.vptsize + 1))) {
1231 qemu_log_mask(LOG_GUEST_ERROR, "%s: intid 0x%x out of range\n",
1232 __func__, ite.intid);
1233 return CMD_CONTINUE;
1235 gicv3_redist_inv_vlpi(&s->gicv3->cpu[vte.rdbase], ite.intid,
1236 vte.vptaddr << 16);
1237 break;
1238 default:
1239 g_assert_not_reached();
1242 return CMD_CONTINUE_OK;
1246 * Current implementation blocks until all
1247 * commands are processed
1249 static void process_cmdq(GICv3ITSState *s)
1251 uint32_t wr_offset = 0;
1252 uint32_t rd_offset = 0;
1253 uint32_t cq_offset = 0;
1254 AddressSpace *as = &s->gicv3->dma_as;
1255 uint8_t cmd;
1256 int i;
1258 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1259 return;
1262 wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
1264 if (wr_offset >= s->cq.num_entries) {
1265 qemu_log_mask(LOG_GUEST_ERROR,
1266 "%s: invalid write offset "
1267 "%d\n", __func__, wr_offset);
1268 return;
1271 rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
1273 if (rd_offset >= s->cq.num_entries) {
1274 qemu_log_mask(LOG_GUEST_ERROR,
1275 "%s: invalid read offset "
1276 "%d\n", __func__, rd_offset);
1277 return;
1280 while (wr_offset != rd_offset) {
1281 ItsCmdResult result = CMD_CONTINUE_OK;
1282 void *hostmem;
1283 hwaddr buflen;
1284 uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
1286 cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
1288 buflen = GITS_CMDQ_ENTRY_SIZE;
1289 hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
1290 &buflen, false, MEMTXATTRS_UNSPECIFIED);
1291 if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
1292 if (hostmem) {
1293 address_space_unmap(as, hostmem, buflen, false, 0);
1295 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
1296 qemu_log_mask(LOG_GUEST_ERROR,
1297 "%s: could not read command at 0x%" PRIx64 "\n",
1298 __func__, s->cq.base_addr + cq_offset);
1299 break;
1301 for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
1302 cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
1304 address_space_unmap(as, hostmem, buflen, false, 0);
1306 cmd = cmdpkt[0] & CMD_MASK;
1308 trace_gicv3_its_process_command(rd_offset, cmd);
1310 switch (cmd) {
1311 case GITS_CMD_INT:
1312 result = process_its_cmd(s, cmdpkt, INTERRUPT);
1313 break;
1314 case GITS_CMD_CLEAR:
1315 result = process_its_cmd(s, cmdpkt, CLEAR);
1316 break;
1317 case GITS_CMD_SYNC:
1319 * Current implementation makes a blocking synchronous call
1320 * for every command issued earlier, hence the internal state
1321 * is already consistent by the time SYNC command is executed.
1322 * Hence no further processing is required for SYNC command.
1324 trace_gicv3_its_cmd_sync();
1325 break;
1326 case GITS_CMD_VSYNC:
1328 * VSYNC also is a nop, because our implementation is always
1329 * in sync.
1331 if (!its_feature_virtual(s)) {
1332 result = CMD_CONTINUE;
1333 break;
1335 trace_gicv3_its_cmd_vsync();
1336 break;
1337 case GITS_CMD_MAPD:
1338 result = process_mapd(s, cmdpkt);
1339 break;
1340 case GITS_CMD_MAPC:
1341 result = process_mapc(s, cmdpkt);
1342 break;
1343 case GITS_CMD_MAPTI:
1344 result = process_mapti(s, cmdpkt, false);
1345 break;
1346 case GITS_CMD_MAPI:
1347 result = process_mapti(s, cmdpkt, true);
1348 break;
1349 case GITS_CMD_DISCARD:
1350 result = process_its_cmd(s, cmdpkt, DISCARD);
1351 break;
1352 case GITS_CMD_INV:
1353 result = process_inv(s, cmdpkt);
1354 break;
1355 case GITS_CMD_INVALL:
1357 * Current implementation doesn't cache any ITS tables,
1358 * but the calculated lpi priority information. We only
1359 * need to trigger lpi priority re-calculation to be in
1360 * sync with LPI config table or pending table changes.
1361 * INVALL operates on a collection specified by ICID so
1362 * it only affects physical LPIs.
1364 trace_gicv3_its_cmd_invall();
1365 for (i = 0; i < s->gicv3->num_cpu; i++) {
1366 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
1368 break;
1369 case GITS_CMD_MOVI:
1370 result = process_movi(s, cmdpkt);
1371 break;
1372 case GITS_CMD_MOVALL:
1373 result = process_movall(s, cmdpkt);
1374 break;
1375 case GITS_CMD_VMAPTI:
1376 result = process_vmapti(s, cmdpkt, false);
1377 break;
1378 case GITS_CMD_VMAPI:
1379 result = process_vmapti(s, cmdpkt, true);
1380 break;
1381 case GITS_CMD_VMAPP:
1382 result = process_vmapp(s, cmdpkt);
1383 break;
1384 case GITS_CMD_VMOVP:
1385 result = process_vmovp(s, cmdpkt);
1386 break;
1387 case GITS_CMD_VMOVI:
1388 result = process_vmovi(s, cmdpkt);
1389 break;
1390 case GITS_CMD_VINVALL:
1391 result = process_vinvall(s, cmdpkt);
1392 break;
1393 default:
1394 trace_gicv3_its_cmd_unknown(cmd);
1395 break;
1397 if (result != CMD_STALL) {
1398 /* CMD_CONTINUE or CMD_CONTINUE_OK */
1399 rd_offset++;
1400 rd_offset %= s->cq.num_entries;
1401 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
1402 } else {
1403 /* CMD_STALL */
1404 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
1405 qemu_log_mask(LOG_GUEST_ERROR,
1406 "%s: 0x%x cmd processing failed, stalling\n",
1407 __func__, cmd);
1408 break;
1414 * This function extracts the ITS Device and Collection table specific
1415 * parameters (like base_addr, size etc) from GITS_BASER register.
1416 * It is called during ITS enable and also during post_load migration
1418 static void extract_table_params(GICv3ITSState *s)
1420 uint16_t num_pages = 0;
1421 uint8_t page_sz_type;
1422 uint8_t type;
1423 uint32_t page_sz = 0;
1424 uint64_t value;
1426 for (int i = 0; i < 8; i++) {
1427 TableDesc *td;
1428 int idbits;
1430 value = s->baser[i];
1432 if (!value) {
1433 continue;
1436 page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
1438 switch (page_sz_type) {
1439 case 0:
1440 page_sz = GITS_PAGE_SIZE_4K;
1441 break;
1443 case 1:
1444 page_sz = GITS_PAGE_SIZE_16K;
1445 break;
1447 case 2:
1448 case 3:
1449 page_sz = GITS_PAGE_SIZE_64K;
1450 break;
1452 default:
1453 g_assert_not_reached();
1456 num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
1458 type = FIELD_EX64(value, GITS_BASER, TYPE);
1460 switch (type) {
1461 case GITS_BASER_TYPE_DEVICE:
1462 td = &s->dt;
1463 idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
1464 break;
1465 case GITS_BASER_TYPE_COLLECTION:
1466 td = &s->ct;
1467 if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
1468 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
1469 } else {
1470 /* 16-bit CollectionId supported when CIL == 0 */
1471 idbits = 16;
1473 break;
1474 case GITS_BASER_TYPE_VPE:
1475 td = &s->vpet;
1477 * For QEMU vPEIDs are always 16 bits. (GICv4.1 allows an
1478 * implementation to implement fewer bits and report this
1479 * via GICD_TYPER2.)
1481 idbits = 16;
1482 break;
1483 default:
1485 * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
1486 * ensures we will only see type values corresponding to
1487 * the values set up in gicv3_its_reset().
1489 g_assert_not_reached();
1492 memset(td, 0, sizeof(*td));
1494 * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
1495 * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
1496 * do not have a special case where the GITS_BASER<n>.Valid bit is 0
1497 * for the register corresponding to the Collection table but we
1498 * still have to process interrupts using non-memory-backed
1499 * Collection table entries.)
1500 * The specification makes it UNPREDICTABLE to enable the ITS without
1501 * marking each BASER<n> as valid. We choose to handle these as if
1502 * the table was zero-sized, so commands using the table will fail
1503 * and interrupts requested via GITS_TRANSLATER writes will be ignored.
1504 * This happens automatically by leaving the num_entries field at
1505 * zero, which will be caught by the bounds checks we have before
1506 * every table lookup anyway.
1508 if (!FIELD_EX64(value, GITS_BASER, VALID)) {
1509 continue;
1511 td->page_sz = page_sz;
1512 td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
1513 td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
1514 td->base_addr = baser_base_addr(value, page_sz);
1515 if (!td->indirect) {
1516 td->num_entries = (num_pages * page_sz) / td->entry_sz;
1517 } else {
1518 td->num_entries = (((num_pages * page_sz) /
1519 L1TABLE_ENTRY_SIZE) *
1520 (page_sz / td->entry_sz));
1522 td->num_entries = MIN(td->num_entries, 1ULL << idbits);
1526 static void extract_cmdq_params(GICv3ITSState *s)
1528 uint16_t num_pages = 0;
1529 uint64_t value = s->cbaser;
1531 num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
1533 memset(&s->cq, 0 , sizeof(s->cq));
1535 if (FIELD_EX64(value, GITS_CBASER, VALID)) {
1536 s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
1537 GITS_CMDQ_ENTRY_SIZE;
1538 s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
1539 s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
1543 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
1544 uint64_t *data, unsigned size,
1545 MemTxAttrs attrs)
1548 * GITS_TRANSLATER is write-only, and all other addresses
1549 * in the interrupt translation space frame are RES0.
1551 *data = 0;
1552 return MEMTX_OK;
1555 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
1556 uint64_t data, unsigned size,
1557 MemTxAttrs attrs)
1559 GICv3ITSState *s = (GICv3ITSState *)opaque;
1560 bool result = true;
1562 trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
1564 switch (offset) {
1565 case GITS_TRANSLATER:
1566 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1567 result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
1569 break;
1570 default:
1571 break;
1574 if (result) {
1575 return MEMTX_OK;
1576 } else {
1577 return MEMTX_ERROR;
1581 static bool its_writel(GICv3ITSState *s, hwaddr offset,
1582 uint64_t value, MemTxAttrs attrs)
1584 bool result = true;
1585 int index;
1587 switch (offset) {
1588 case GITS_CTLR:
1589 if (value & R_GITS_CTLR_ENABLED_MASK) {
1590 s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
1591 extract_table_params(s);
1592 extract_cmdq_params(s);
1593 process_cmdq(s);
1594 } else {
1595 s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
1597 break;
1598 case GITS_CBASER:
1600 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1601 * already enabled
1603 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1604 s->cbaser = deposit64(s->cbaser, 0, 32, value);
1605 s->creadr = 0;
1607 break;
1608 case GITS_CBASER + 4:
1610 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1611 * already enabled
1613 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1614 s->cbaser = deposit64(s->cbaser, 32, 32, value);
1615 s->creadr = 0;
1617 break;
1618 case GITS_CWRITER:
1619 s->cwriter = deposit64(s->cwriter, 0, 32,
1620 (value & ~R_GITS_CWRITER_RETRY_MASK));
1621 if (s->cwriter != s->creadr) {
1622 process_cmdq(s);
1624 break;
1625 case GITS_CWRITER + 4:
1626 s->cwriter = deposit64(s->cwriter, 32, 32, value);
1627 break;
1628 case GITS_CREADR:
1629 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1630 s->creadr = deposit64(s->creadr, 0, 32,
1631 (value & ~R_GITS_CREADR_STALLED_MASK));
1632 } else {
1633 /* RO register, ignore the write */
1634 qemu_log_mask(LOG_GUEST_ERROR,
1635 "%s: invalid guest write to RO register at offset "
1636 HWADDR_FMT_plx "\n", __func__, offset);
1638 break;
1639 case GITS_CREADR + 4:
1640 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1641 s->creadr = deposit64(s->creadr, 32, 32, value);
1642 } else {
1643 /* RO register, ignore the write */
1644 qemu_log_mask(LOG_GUEST_ERROR,
1645 "%s: invalid guest write to RO register at offset "
1646 HWADDR_FMT_plx "\n", __func__, offset);
1648 break;
1649 case GITS_BASER ... GITS_BASER + 0x3f:
1651 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1652 * already enabled
1654 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1655 index = (offset - GITS_BASER) / 8;
1657 if (s->baser[index] == 0) {
1658 /* Unimplemented GITS_BASERn: RAZ/WI */
1659 break;
1661 if (offset & 7) {
1662 value <<= 32;
1663 value &= ~GITS_BASER_RO_MASK;
1664 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
1665 s->baser[index] |= value;
1666 } else {
1667 value &= ~GITS_BASER_RO_MASK;
1668 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
1669 s->baser[index] |= value;
1672 break;
1673 case GITS_IIDR:
1674 case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1675 /* RO registers, ignore the write */
1676 qemu_log_mask(LOG_GUEST_ERROR,
1677 "%s: invalid guest write to RO register at offset "
1678 HWADDR_FMT_plx "\n", __func__, offset);
1679 break;
1680 default:
1681 result = false;
1682 break;
1684 return result;
1687 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1688 uint64_t *data, MemTxAttrs attrs)
1690 bool result = true;
1691 int index;
1693 switch (offset) {
1694 case GITS_CTLR:
1695 *data = s->ctlr;
1696 break;
1697 case GITS_IIDR:
1698 *data = gicv3_iidr();
1699 break;
1700 case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1701 /* ID registers */
1702 *data = gicv3_idreg(s->gicv3, offset - GITS_IDREGS, GICV3_PIDR0_ITS);
1703 break;
1704 case GITS_TYPER:
1705 *data = extract64(s->typer, 0, 32);
1706 break;
1707 case GITS_TYPER + 4:
1708 *data = extract64(s->typer, 32, 32);
1709 break;
1710 case GITS_CBASER:
1711 *data = extract64(s->cbaser, 0, 32);
1712 break;
1713 case GITS_CBASER + 4:
1714 *data = extract64(s->cbaser, 32, 32);
1715 break;
1716 case GITS_CREADR:
1717 *data = extract64(s->creadr, 0, 32);
1718 break;
1719 case GITS_CREADR + 4:
1720 *data = extract64(s->creadr, 32, 32);
1721 break;
1722 case GITS_CWRITER:
1723 *data = extract64(s->cwriter, 0, 32);
1724 break;
1725 case GITS_CWRITER + 4:
1726 *data = extract64(s->cwriter, 32, 32);
1727 break;
1728 case GITS_BASER ... GITS_BASER + 0x3f:
1729 index = (offset - GITS_BASER) / 8;
1730 if (offset & 7) {
1731 *data = extract64(s->baser[index], 32, 32);
1732 } else {
1733 *data = extract64(s->baser[index], 0, 32);
1735 break;
1736 default:
1737 result = false;
1738 break;
1740 return result;
1743 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1744 uint64_t value, MemTxAttrs attrs)
1746 bool result = true;
1747 int index;
1749 switch (offset) {
1750 case GITS_BASER ... GITS_BASER + 0x3f:
1752 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1753 * already enabled
1755 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1756 index = (offset - GITS_BASER) / 8;
1757 if (s->baser[index] == 0) {
1758 /* Unimplemented GITS_BASERn: RAZ/WI */
1759 break;
1761 s->baser[index] &= GITS_BASER_RO_MASK;
1762 s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1764 break;
1765 case GITS_CBASER:
1767 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1768 * already enabled
1770 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1771 s->cbaser = value;
1772 s->creadr = 0;
1774 break;
1775 case GITS_CWRITER:
1776 s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1777 if (s->cwriter != s->creadr) {
1778 process_cmdq(s);
1780 break;
1781 case GITS_CREADR:
1782 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1783 s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1784 } else {
1785 /* RO register, ignore the write */
1786 qemu_log_mask(LOG_GUEST_ERROR,
1787 "%s: invalid guest write to RO register at offset "
1788 HWADDR_FMT_plx "\n", __func__, offset);
1790 break;
1791 case GITS_TYPER:
1792 /* RO registers, ignore the write */
1793 qemu_log_mask(LOG_GUEST_ERROR,
1794 "%s: invalid guest write to RO register at offset "
1795 HWADDR_FMT_plx "\n", __func__, offset);
1796 break;
1797 default:
1798 result = false;
1799 break;
1801 return result;
1804 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1805 uint64_t *data, MemTxAttrs attrs)
1807 bool result = true;
1808 int index;
1810 switch (offset) {
1811 case GITS_TYPER:
1812 *data = s->typer;
1813 break;
1814 case GITS_BASER ... GITS_BASER + 0x3f:
1815 index = (offset - GITS_BASER) / 8;
1816 *data = s->baser[index];
1817 break;
1818 case GITS_CBASER:
1819 *data = s->cbaser;
1820 break;
1821 case GITS_CREADR:
1822 *data = s->creadr;
1823 break;
1824 case GITS_CWRITER:
1825 *data = s->cwriter;
1826 break;
1827 default:
1828 result = false;
1829 break;
1831 return result;
1834 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1835 unsigned size, MemTxAttrs attrs)
1837 GICv3ITSState *s = (GICv3ITSState *)opaque;
1838 bool result;
1840 switch (size) {
1841 case 4:
1842 result = its_readl(s, offset, data, attrs);
1843 break;
1844 case 8:
1845 result = its_readll(s, offset, data, attrs);
1846 break;
1847 default:
1848 result = false;
1849 break;
1852 if (!result) {
1853 qemu_log_mask(LOG_GUEST_ERROR,
1854 "%s: invalid guest read at offset " HWADDR_FMT_plx
1855 " size %u\n", __func__, offset, size);
1856 trace_gicv3_its_badread(offset, size);
1858 * The spec requires that reserved registers are RAZ/WI;
1859 * so use false returns from leaf functions as a way to
1860 * trigger the guest-error logging but don't return it to
1861 * the caller, or we'll cause a spurious guest data abort.
1863 *data = 0;
1864 } else {
1865 trace_gicv3_its_read(offset, *data, size);
1867 return MEMTX_OK;
1870 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1871 unsigned size, MemTxAttrs attrs)
1873 GICv3ITSState *s = (GICv3ITSState *)opaque;
1874 bool result;
1876 switch (size) {
1877 case 4:
1878 result = its_writel(s, offset, data, attrs);
1879 break;
1880 case 8:
1881 result = its_writell(s, offset, data, attrs);
1882 break;
1883 default:
1884 result = false;
1885 break;
1888 if (!result) {
1889 qemu_log_mask(LOG_GUEST_ERROR,
1890 "%s: invalid guest write at offset " HWADDR_FMT_plx
1891 " size %u\n", __func__, offset, size);
1892 trace_gicv3_its_badwrite(offset, data, size);
1894 * The spec requires that reserved registers are RAZ/WI;
1895 * so use false returns from leaf functions as a way to
1896 * trigger the guest-error logging but don't return it to
1897 * the caller, or we'll cause a spurious guest data abort.
1899 } else {
1900 trace_gicv3_its_write(offset, data, size);
1902 return MEMTX_OK;
1905 static const MemoryRegionOps gicv3_its_control_ops = {
1906 .read_with_attrs = gicv3_its_read,
1907 .write_with_attrs = gicv3_its_write,
1908 .valid.min_access_size = 4,
1909 .valid.max_access_size = 8,
1910 .impl.min_access_size = 4,
1911 .impl.max_access_size = 8,
1912 .endianness = DEVICE_NATIVE_ENDIAN,
1915 static const MemoryRegionOps gicv3_its_translation_ops = {
1916 .read_with_attrs = gicv3_its_translation_read,
1917 .write_with_attrs = gicv3_its_translation_write,
1918 .valid.min_access_size = 2,
1919 .valid.max_access_size = 4,
1920 .impl.min_access_size = 2,
1921 .impl.max_access_size = 4,
1922 .endianness = DEVICE_NATIVE_ENDIAN,
1925 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1927 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1928 int i;
1930 for (i = 0; i < s->gicv3->num_cpu; i++) {
1931 if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1932 error_setg(errp, "Physical LPI not supported by CPU %d", i);
1933 return;
1937 gicv3_add_its(s->gicv3, dev);
1939 gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1941 /* set the ITS default features supported */
1942 s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1943 s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1944 ITS_ITT_ENTRY_SIZE - 1);
1945 s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1946 s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1947 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1948 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1949 if (s->gicv3->revision >= 4) {
1950 /* Our VMOVP handles cross-ITS synchronization itself */
1951 s->typer = FIELD_DP64(s->typer, GITS_TYPER, VMOVP, 1);
1952 s->typer = FIELD_DP64(s->typer, GITS_TYPER, VIRTUAL, 1);
1956 static void gicv3_its_reset_hold(Object *obj)
1958 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(obj);
1959 GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1961 if (c->parent_phases.hold) {
1962 c->parent_phases.hold(obj);
1965 /* Quiescent bit reset to 1 */
1966 s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1969 * setting GITS_BASER0.Type = 0b001 (Device)
1970 * GITS_BASER1.Type = 0b100 (Collection Table)
1971 * GITS_BASER2.Type = 0b010 (vPE) for GICv4 and later
1972 * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1973 * GITS_BASER<0,1>.Page_Size = 64KB
1974 * and default translation table entry size to 16 bytes
1976 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1977 GITS_BASER_TYPE_DEVICE);
1978 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1979 GITS_BASER_PAGESIZE_64K);
1980 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1981 GITS_DTE_SIZE - 1);
1983 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1984 GITS_BASER_TYPE_COLLECTION);
1985 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1986 GITS_BASER_PAGESIZE_64K);
1987 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1988 GITS_CTE_SIZE - 1);
1990 if (its_feature_virtual(s)) {
1991 s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, TYPE,
1992 GITS_BASER_TYPE_VPE);
1993 s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, PAGESIZE,
1994 GITS_BASER_PAGESIZE_64K);
1995 s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, ENTRYSIZE,
1996 GITS_VPE_SIZE - 1);
2000 static void gicv3_its_post_load(GICv3ITSState *s)
2002 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
2003 extract_table_params(s);
2004 extract_cmdq_params(s);
2008 static Property gicv3_its_props[] = {
2009 DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
2010 GICv3State *),
2011 DEFINE_PROP_END_OF_LIST(),
2014 static void gicv3_its_class_init(ObjectClass *klass, void *data)
2016 DeviceClass *dc = DEVICE_CLASS(klass);
2017 ResettableClass *rc = RESETTABLE_CLASS(klass);
2018 GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
2019 GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
2021 dc->realize = gicv3_arm_its_realize;
2022 device_class_set_props(dc, gicv3_its_props);
2023 resettable_class_set_parent_phases(rc, NULL, gicv3_its_reset_hold, NULL,
2024 &ic->parent_phases);
2025 icc->post_load = gicv3_its_post_load;
2028 static const TypeInfo gicv3_its_info = {
2029 .name = TYPE_ARM_GICV3_ITS,
2030 .parent = TYPE_ARM_GICV3_ITS_COMMON,
2031 .instance_size = sizeof(GICv3ITSState),
2032 .class_init = gicv3_its_class_init,
2033 .class_size = sizeof(GICv3ITSClass),
2036 static void gicv3_its_register_types(void)
2038 type_register_static(&gicv3_its_info);
2041 type_init(gicv3_its_register_types)