block_int-common.h: split function pointers in BdrvChildClass
[qemu.git] / hw / intc / arm_gicv3_its.c
blob4f598d3c14faee1a746bc54f785fad24a6f1a5ca
1 /*
2 * ITS emulation for a GICv3-based system
4 * Copyright Linaro.org 2021
6 * Authors:
7 * Shashi Mallela <shashi.mallela@linaro.org>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10 * option) any later version. See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "trace.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
23 typedef struct GICv3ITSClass GICv3ITSClass;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
26 ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
28 struct GICv3ITSClass {
29 GICv3ITSCommonClass parent_class;
30 void (*parent_reset)(DeviceState *dev);
34 * This is an internal enum used to distinguish between LPI triggered
35 * via command queue and LPI triggered via gits_translater write.
37 typedef enum ItsCmdType {
38 NONE = 0, /* internal indication for GITS_TRANSLATER write */
39 CLEAR = 1,
40 DISCARD = 2,
41 INTERRUPT = 3,
42 } ItsCmdType;
44 typedef struct DTEntry {
45 bool valid;
46 unsigned size;
47 uint64_t ittaddr;
48 } DTEntry;
50 typedef struct CTEntry {
51 bool valid;
52 uint32_t rdbase;
53 } CTEntry;
55 typedef struct ITEntry {
56 bool valid;
57 int inttype;
58 uint32_t intid;
59 uint32_t doorbell;
60 uint32_t icid;
61 uint32_t vpeid;
62 } ITEntry;
66 * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
67 * if a command parameter is not correct. These include both "stall
68 * processing of the command queue" and "ignore this command, and
69 * keep processing the queue". In our implementation we choose that
70 * memory transaction errors reading the command packet provoke a
71 * stall, but errors in parameters cause us to ignore the command
72 * and continue processing.
73 * The process_* functions which handle individual ITS commands all
74 * return an ItsCmdResult which tells process_cmdq() whether it should
75 * stall or keep going.
77 typedef enum ItsCmdResult {
78 CMD_STALL = 0,
79 CMD_CONTINUE = 1,
80 } ItsCmdResult;
82 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
84 uint64_t result = 0;
86 switch (page_sz) {
87 case GITS_PAGE_SIZE_4K:
88 case GITS_PAGE_SIZE_16K:
89 result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
90 break;
92 case GITS_PAGE_SIZE_64K:
93 result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
94 result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
95 break;
97 default:
98 break;
100 return result;
103 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
104 uint32_t idx, MemTxResult *res)
107 * Given a TableDesc describing one of the ITS in-guest-memory
108 * tables and an index into it, return the guest address
109 * corresponding to that table entry.
110 * If there was a memory error reading the L1 table of an
111 * indirect table, *res is set accordingly, and we return -1.
112 * If the L1 table entry is marked not valid, we return -1 with
113 * *res set to MEMTX_OK.
115 * The specification defines the format of level 1 entries of a
116 * 2-level table, but the format of level 2 entries and the format
117 * of flat-mapped tables is IMPDEF.
119 AddressSpace *as = &s->gicv3->dma_as;
120 uint32_t l2idx;
121 uint64_t l2;
122 uint32_t num_l2_entries;
124 *res = MEMTX_OK;
126 if (!td->indirect) {
127 /* Single level table */
128 return td->base_addr + idx * td->entry_sz;
131 /* Two level table */
132 l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
134 l2 = address_space_ldq_le(as,
135 td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
136 MEMTXATTRS_UNSPECIFIED, res);
137 if (*res != MEMTX_OK) {
138 return -1;
140 if (!(l2 & L2_TABLE_VALID_MASK)) {
141 return -1;
144 num_l2_entries = td->page_sz / td->entry_sz;
145 return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
149 * Read the Collection Table entry at index @icid. On success (including
150 * successfully determining that there is no valid CTE for this index),
151 * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
152 * If there is an error reading memory then we return the error code.
154 static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
156 AddressSpace *as = &s->gicv3->dma_as;
157 MemTxResult res = MEMTX_OK;
158 uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
159 uint64_t cteval;
161 if (entry_addr == -1) {
162 /* No L2 table entry, i.e. no valid CTE, or a memory error */
163 cte->valid = false;
164 return res;
167 cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
168 if (res != MEMTX_OK) {
169 return res;
171 cte->valid = FIELD_EX64(cteval, CTE, VALID);
172 cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
173 return MEMTX_OK;
177 * Update the Interrupt Table entry at index @evinted in the table specified
178 * by the dte @dte. Returns true on success, false if there was a memory
179 * access error.
181 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
182 const ITEntry *ite)
184 AddressSpace *as = &s->gicv3->dma_as;
185 MemTxResult res = MEMTX_OK;
186 hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
187 uint64_t itel = 0;
188 uint32_t iteh = 0;
190 if (ite->valid) {
191 itel = FIELD_DP64(itel, ITE_L, VALID, 1);
192 itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
193 itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
194 itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
195 itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
196 iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
199 address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
200 if (res != MEMTX_OK) {
201 return false;
203 address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
204 return res == MEMTX_OK;
208 * Read the Interrupt Table entry at index @eventid from the table specified
209 * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
210 * struct @ite accordingly. If there is an error reading memory then we return
211 * the error code.
213 static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
214 const DTEntry *dte, ITEntry *ite)
216 AddressSpace *as = &s->gicv3->dma_as;
217 MemTxResult res = MEMTX_OK;
218 uint64_t itel;
219 uint32_t iteh;
220 hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
222 itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
223 if (res != MEMTX_OK) {
224 return res;
227 iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
228 if (res != MEMTX_OK) {
229 return res;
232 ite->valid = FIELD_EX64(itel, ITE_L, VALID);
233 ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
234 ite->intid = FIELD_EX64(itel, ITE_L, INTID);
235 ite->icid = FIELD_EX64(itel, ITE_L, ICID);
236 ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
237 ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
238 return MEMTX_OK;
242 * Read the Device Table entry at index @devid. On success (including
243 * successfully determining that there is no valid DTE for this index),
244 * we return MEMTX_OK and populate the DTEntry struct accordingly.
245 * If there is an error reading memory then we return the error code.
247 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
249 MemTxResult res = MEMTX_OK;
250 AddressSpace *as = &s->gicv3->dma_as;
251 uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
252 uint64_t dteval;
254 if (entry_addr == -1) {
255 /* No L2 table entry, i.e. no valid DTE, or a memory error */
256 dte->valid = false;
257 return res;
259 dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
260 if (res != MEMTX_OK) {
261 return res;
263 dte->valid = FIELD_EX64(dteval, DTE, VALID);
264 dte->size = FIELD_EX64(dteval, DTE, SIZE);
265 /* DTE word field stores bits [51:8] of the ITT address */
266 dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
267 return MEMTX_OK;
271 * This function handles the processing of following commands based on
272 * the ItsCmdType parameter passed:-
273 * 1. triggering of lpi interrupt translation via ITS INT command
274 * 2. triggering of lpi interrupt translation via gits_translater register
275 * 3. handling of ITS CLEAR command
276 * 4. handling of ITS DISCARD command
278 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
279 uint32_t eventid, ItsCmdType cmd)
281 uint64_t num_eventids;
282 DTEntry dte;
283 CTEntry cte;
284 ITEntry ite;
286 if (devid >= s->dt.num_entries) {
287 qemu_log_mask(LOG_GUEST_ERROR,
288 "%s: invalid command attributes: devid %d>=%d",
289 __func__, devid, s->dt.num_entries);
290 return CMD_CONTINUE;
293 if (get_dte(s, devid, &dte) != MEMTX_OK) {
294 return CMD_STALL;
296 if (!dte.valid) {
297 qemu_log_mask(LOG_GUEST_ERROR,
298 "%s: invalid command attributes: "
299 "invalid dte for %d\n", __func__, devid);
300 return CMD_CONTINUE;
303 num_eventids = 1ULL << (dte.size + 1);
304 if (eventid >= num_eventids) {
305 qemu_log_mask(LOG_GUEST_ERROR,
306 "%s: invalid command attributes: eventid %d >= %"
307 PRId64 "\n",
308 __func__, eventid, num_eventids);
309 return CMD_CONTINUE;
312 if (get_ite(s, eventid, &dte, &ite) != MEMTX_OK) {
313 return CMD_STALL;
316 if (!ite.valid || ite.inttype != ITE_INTTYPE_PHYSICAL) {
317 qemu_log_mask(LOG_GUEST_ERROR,
318 "%s: invalid command attributes: invalid ITE\n",
319 __func__);
320 return CMD_CONTINUE;
323 if (ite.icid >= s->ct.num_entries) {
324 qemu_log_mask(LOG_GUEST_ERROR,
325 "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
326 __func__, ite.icid);
327 return CMD_CONTINUE;
330 if (get_cte(s, ite.icid, &cte) != MEMTX_OK) {
331 return CMD_STALL;
333 if (!cte.valid) {
334 qemu_log_mask(LOG_GUEST_ERROR,
335 "%s: invalid command attributes: invalid CTE\n",
336 __func__);
337 return CMD_CONTINUE;
341 * Current implementation only supports rdbase == procnum
342 * Hence rdbase physical address is ignored
344 if (cte.rdbase >= s->gicv3->num_cpu) {
345 return CMD_CONTINUE;
348 if ((cmd == CLEAR) || (cmd == DISCARD)) {
349 gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 0);
350 } else {
351 gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 1);
354 if (cmd == DISCARD) {
355 ITEntry ite = {};
356 /* remove mapping from interrupt translation table */
357 ite.valid = false;
358 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
360 return CMD_CONTINUE;
362 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
363 ItsCmdType cmd)
365 uint32_t devid, eventid;
367 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
368 eventid = cmdpkt[1] & EVENTID_MASK;
369 return do_process_its_cmd(s, devid, eventid, cmd);
372 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
373 bool ignore_pInt)
375 uint32_t devid, eventid;
376 uint32_t pIntid = 0;
377 uint64_t num_eventids;
378 uint32_t num_intids;
379 uint16_t icid = 0;
380 DTEntry dte;
381 ITEntry ite;
383 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
384 eventid = cmdpkt[1] & EVENTID_MASK;
386 if (ignore_pInt) {
387 pIntid = eventid;
388 } else {
389 pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
392 icid = cmdpkt[2] & ICID_MASK;
394 if (devid >= s->dt.num_entries) {
395 qemu_log_mask(LOG_GUEST_ERROR,
396 "%s: invalid command attributes: devid %d>=%d",
397 __func__, devid, s->dt.num_entries);
398 return CMD_CONTINUE;
401 if (get_dte(s, devid, &dte) != MEMTX_OK) {
402 return CMD_STALL;
404 num_eventids = 1ULL << (dte.size + 1);
405 num_intids = 1ULL << (GICD_TYPER_IDBITS + 1);
407 if (icid >= s->ct.num_entries) {
408 qemu_log_mask(LOG_GUEST_ERROR,
409 "%s: invalid ICID 0x%x >= 0x%x\n",
410 __func__, icid, s->ct.num_entries);
411 return CMD_CONTINUE;
414 if (!dte.valid) {
415 qemu_log_mask(LOG_GUEST_ERROR,
416 "%s: no valid DTE for devid 0x%x\n", __func__, devid);
417 return CMD_CONTINUE;
420 if (eventid >= num_eventids) {
421 qemu_log_mask(LOG_GUEST_ERROR,
422 "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n",
423 __func__, eventid, num_eventids);
424 return CMD_CONTINUE;
427 if (pIntid < GICV3_LPI_INTID_START || pIntid >= num_intids) {
428 qemu_log_mask(LOG_GUEST_ERROR,
429 "%s: invalid interrupt ID 0x%x\n", __func__, pIntid);
430 return CMD_CONTINUE;
433 /* add ite entry to interrupt translation table */
434 ite.valid = true;
435 ite.inttype = ITE_INTTYPE_PHYSICAL;
436 ite.intid = pIntid;
437 ite.icid = icid;
438 ite.doorbell = INTID_SPURIOUS;
439 ite.vpeid = 0;
440 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
444 * Update the Collection Table entry for @icid to @cte. Returns true
445 * on success, false if there was a memory access error.
447 static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
449 AddressSpace *as = &s->gicv3->dma_as;
450 uint64_t entry_addr;
451 uint64_t cteval = 0;
452 MemTxResult res = MEMTX_OK;
454 if (cte->valid) {
455 /* add mapping entry to collection table */
456 cteval = FIELD_DP64(cteval, CTE, VALID, 1);
457 cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
460 entry_addr = table_entry_addr(s, &s->ct, icid, &res);
461 if (res != MEMTX_OK) {
462 /* memory access error: stall */
463 return false;
465 if (entry_addr == -1) {
466 /* No L2 table for this index: discard write and continue */
467 return true;
470 address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
471 return res == MEMTX_OK;
474 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
476 uint16_t icid;
477 CTEntry cte;
479 icid = cmdpkt[2] & ICID_MASK;
480 cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
481 if (cte.valid) {
482 cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
483 cte.rdbase &= RDBASE_PROCNUM_MASK;
484 } else {
485 cte.rdbase = 0;
488 if (icid >= s->ct.num_entries) {
489 qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%d", icid);
490 return CMD_CONTINUE;
492 if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) {
493 qemu_log_mask(LOG_GUEST_ERROR,
494 "ITS MAPC: invalid RDBASE %u ", cte.rdbase);
495 return CMD_CONTINUE;
498 return update_cte(s, icid, &cte) ? CMD_CONTINUE : CMD_STALL;
502 * Update the Device Table entry for @devid to @dte. Returns true
503 * on success, false if there was a memory access error.
505 static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
507 AddressSpace *as = &s->gicv3->dma_as;
508 uint64_t entry_addr;
509 uint64_t dteval = 0;
510 MemTxResult res = MEMTX_OK;
512 if (dte->valid) {
513 /* add mapping entry to device table */
514 dteval = FIELD_DP64(dteval, DTE, VALID, 1);
515 dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
516 dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
519 entry_addr = table_entry_addr(s, &s->dt, devid, &res);
520 if (res != MEMTX_OK) {
521 /* memory access error: stall */
522 return false;
524 if (entry_addr == -1) {
525 /* No L2 table for this index: discard write and continue */
526 return true;
528 address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
529 return res == MEMTX_OK;
532 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
534 uint32_t devid;
535 DTEntry dte;
537 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
538 dte.size = cmdpkt[1] & SIZE_MASK;
539 dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
540 dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
542 if (devid >= s->dt.num_entries) {
543 qemu_log_mask(LOG_GUEST_ERROR,
544 "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
545 devid, s->dt.num_entries);
546 return CMD_CONTINUE;
549 if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
550 qemu_log_mask(LOG_GUEST_ERROR,
551 "ITS MAPD: invalid size %d\n", dte.size);
552 return CMD_CONTINUE;
555 return update_dte(s, devid, &dte) ? CMD_CONTINUE : CMD_STALL;
558 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
560 uint64_t rd1, rd2;
562 rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
563 rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
565 if (rd1 >= s->gicv3->num_cpu) {
566 qemu_log_mask(LOG_GUEST_ERROR,
567 "%s: RDBASE1 %" PRId64
568 " out of range (must be less than %d)\n",
569 __func__, rd1, s->gicv3->num_cpu);
570 return CMD_CONTINUE;
572 if (rd2 >= s->gicv3->num_cpu) {
573 qemu_log_mask(LOG_GUEST_ERROR,
574 "%s: RDBASE2 %" PRId64
575 " out of range (must be less than %d)\n",
576 __func__, rd2, s->gicv3->num_cpu);
577 return CMD_CONTINUE;
580 if (rd1 == rd2) {
581 /* Move to same target must succeed as a no-op */
582 return CMD_CONTINUE;
585 /* Move all pending LPIs from redistributor 1 to redistributor 2 */
586 gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
588 return CMD_CONTINUE;
591 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
593 uint32_t devid, eventid;
594 uint16_t new_icid;
595 uint64_t num_eventids;
596 DTEntry dte;
597 CTEntry old_cte, new_cte;
598 ITEntry old_ite;
600 devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
601 eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
602 new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
604 if (devid >= s->dt.num_entries) {
605 qemu_log_mask(LOG_GUEST_ERROR,
606 "%s: invalid command attributes: devid %d>=%d",
607 __func__, devid, s->dt.num_entries);
608 return CMD_CONTINUE;
610 if (get_dte(s, devid, &dte) != MEMTX_OK) {
611 return CMD_STALL;
614 if (!dte.valid) {
615 qemu_log_mask(LOG_GUEST_ERROR,
616 "%s: invalid command attributes: "
617 "invalid dte for %d\n", __func__, devid);
618 return CMD_CONTINUE;
621 num_eventids = 1ULL << (dte.size + 1);
622 if (eventid >= num_eventids) {
623 qemu_log_mask(LOG_GUEST_ERROR,
624 "%s: invalid command attributes: eventid %d >= %"
625 PRId64 "\n",
626 __func__, eventid, num_eventids);
627 return CMD_CONTINUE;
630 if (get_ite(s, eventid, &dte, &old_ite) != MEMTX_OK) {
631 return CMD_STALL;
634 if (!old_ite.valid || old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
635 qemu_log_mask(LOG_GUEST_ERROR,
636 "%s: invalid command attributes: invalid ITE\n",
637 __func__);
638 return CMD_CONTINUE;
641 if (old_ite.icid >= s->ct.num_entries) {
642 qemu_log_mask(LOG_GUEST_ERROR,
643 "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
644 __func__, old_ite.icid);
645 return CMD_CONTINUE;
648 if (new_icid >= s->ct.num_entries) {
649 qemu_log_mask(LOG_GUEST_ERROR,
650 "%s: invalid command attributes: ICID 0x%x\n",
651 __func__, new_icid);
652 return CMD_CONTINUE;
655 if (get_cte(s, old_ite.icid, &old_cte) != MEMTX_OK) {
656 return CMD_STALL;
658 if (!old_cte.valid) {
659 qemu_log_mask(LOG_GUEST_ERROR,
660 "%s: invalid command attributes: "
661 "invalid CTE for old ICID 0x%x\n",
662 __func__, old_ite.icid);
663 return CMD_CONTINUE;
666 if (get_cte(s, new_icid, &new_cte) != MEMTX_OK) {
667 return CMD_STALL;
669 if (!new_cte.valid) {
670 qemu_log_mask(LOG_GUEST_ERROR,
671 "%s: invalid command attributes: "
672 "invalid CTE for new ICID 0x%x\n",
673 __func__, new_icid);
674 return CMD_CONTINUE;
677 if (old_cte.rdbase >= s->gicv3->num_cpu) {
678 qemu_log_mask(LOG_GUEST_ERROR,
679 "%s: CTE has invalid rdbase 0x%x\n",
680 __func__, old_cte.rdbase);
681 return CMD_CONTINUE;
684 if (new_cte.rdbase >= s->gicv3->num_cpu) {
685 qemu_log_mask(LOG_GUEST_ERROR,
686 "%s: CTE has invalid rdbase 0x%x\n",
687 __func__, new_cte.rdbase);
688 return CMD_CONTINUE;
691 if (old_cte.rdbase != new_cte.rdbase) {
692 /* Move the LPI from the old redistributor to the new one */
693 gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
694 &s->gicv3->cpu[new_cte.rdbase],
695 old_ite.intid);
698 /* Update the ICID field in the interrupt translation table entry */
699 old_ite.icid = new_icid;
700 return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE : CMD_STALL;
704 * Current implementation blocks until all
705 * commands are processed
707 static void process_cmdq(GICv3ITSState *s)
709 uint32_t wr_offset = 0;
710 uint32_t rd_offset = 0;
711 uint32_t cq_offset = 0;
712 AddressSpace *as = &s->gicv3->dma_as;
713 uint8_t cmd;
714 int i;
716 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
717 return;
720 wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
722 if (wr_offset >= s->cq.num_entries) {
723 qemu_log_mask(LOG_GUEST_ERROR,
724 "%s: invalid write offset "
725 "%d\n", __func__, wr_offset);
726 return;
729 rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
731 if (rd_offset >= s->cq.num_entries) {
732 qemu_log_mask(LOG_GUEST_ERROR,
733 "%s: invalid read offset "
734 "%d\n", __func__, rd_offset);
735 return;
738 while (wr_offset != rd_offset) {
739 ItsCmdResult result = CMD_CONTINUE;
740 void *hostmem;
741 hwaddr buflen;
742 uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
744 cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
746 buflen = GITS_CMDQ_ENTRY_SIZE;
747 hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
748 &buflen, false, MEMTXATTRS_UNSPECIFIED);
749 if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
750 if (hostmem) {
751 address_space_unmap(as, hostmem, buflen, false, 0);
753 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
754 qemu_log_mask(LOG_GUEST_ERROR,
755 "%s: could not read command at 0x%" PRIx64 "\n",
756 __func__, s->cq.base_addr + cq_offset);
757 break;
759 for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
760 cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
762 address_space_unmap(as, hostmem, buflen, false, 0);
764 cmd = cmdpkt[0] & CMD_MASK;
766 trace_gicv3_its_process_command(rd_offset, cmd);
768 switch (cmd) {
769 case GITS_CMD_INT:
770 result = process_its_cmd(s, cmdpkt, INTERRUPT);
771 break;
772 case GITS_CMD_CLEAR:
773 result = process_its_cmd(s, cmdpkt, CLEAR);
774 break;
775 case GITS_CMD_SYNC:
777 * Current implementation makes a blocking synchronous call
778 * for every command issued earlier, hence the internal state
779 * is already consistent by the time SYNC command is executed.
780 * Hence no further processing is required for SYNC command.
782 break;
783 case GITS_CMD_MAPD:
784 result = process_mapd(s, cmdpkt);
785 break;
786 case GITS_CMD_MAPC:
787 result = process_mapc(s, cmdpkt);
788 break;
789 case GITS_CMD_MAPTI:
790 result = process_mapti(s, cmdpkt, false);
791 break;
792 case GITS_CMD_MAPI:
793 result = process_mapti(s, cmdpkt, true);
794 break;
795 case GITS_CMD_DISCARD:
796 result = process_its_cmd(s, cmdpkt, DISCARD);
797 break;
798 case GITS_CMD_INV:
799 case GITS_CMD_INVALL:
801 * Current implementation doesn't cache any ITS tables,
802 * but the calculated lpi priority information. We only
803 * need to trigger lpi priority re-calculation to be in
804 * sync with LPI config table or pending table changes.
806 for (i = 0; i < s->gicv3->num_cpu; i++) {
807 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
809 break;
810 case GITS_CMD_MOVI:
811 result = process_movi(s, cmdpkt);
812 break;
813 case GITS_CMD_MOVALL:
814 result = process_movall(s, cmdpkt);
815 break;
816 default:
817 break;
819 if (result == CMD_CONTINUE) {
820 rd_offset++;
821 rd_offset %= s->cq.num_entries;
822 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
823 } else {
824 /* CMD_STALL */
825 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
826 qemu_log_mask(LOG_GUEST_ERROR,
827 "%s: 0x%x cmd processing failed, stalling\n",
828 __func__, cmd);
829 break;
835 * This function extracts the ITS Device and Collection table specific
836 * parameters (like base_addr, size etc) from GITS_BASER register.
837 * It is called during ITS enable and also during post_load migration
839 static void extract_table_params(GICv3ITSState *s)
841 uint16_t num_pages = 0;
842 uint8_t page_sz_type;
843 uint8_t type;
844 uint32_t page_sz = 0;
845 uint64_t value;
847 for (int i = 0; i < 8; i++) {
848 TableDesc *td;
849 int idbits;
851 value = s->baser[i];
853 if (!value) {
854 continue;
857 page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
859 switch (page_sz_type) {
860 case 0:
861 page_sz = GITS_PAGE_SIZE_4K;
862 break;
864 case 1:
865 page_sz = GITS_PAGE_SIZE_16K;
866 break;
868 case 2:
869 case 3:
870 page_sz = GITS_PAGE_SIZE_64K;
871 break;
873 default:
874 g_assert_not_reached();
877 num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
879 type = FIELD_EX64(value, GITS_BASER, TYPE);
881 switch (type) {
882 case GITS_BASER_TYPE_DEVICE:
883 td = &s->dt;
884 idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
885 break;
886 case GITS_BASER_TYPE_COLLECTION:
887 td = &s->ct;
888 if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
889 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
890 } else {
891 /* 16-bit CollectionId supported when CIL == 0 */
892 idbits = 16;
894 break;
895 default:
897 * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
898 * ensures we will only see type values corresponding to
899 * the values set up in gicv3_its_reset().
901 g_assert_not_reached();
904 memset(td, 0, sizeof(*td));
906 * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
907 * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
908 * do not have a special case where the GITS_BASER<n>.Valid bit is 0
909 * for the register corresponding to the Collection table but we
910 * still have to process interrupts using non-memory-backed
911 * Collection table entries.)
912 * The specification makes it UNPREDICTABLE to enable the ITS without
913 * marking each BASER<n> as valid. We choose to handle these as if
914 * the table was zero-sized, so commands using the table will fail
915 * and interrupts requested via GITS_TRANSLATER writes will be ignored.
916 * This happens automatically by leaving the num_entries field at
917 * zero, which will be caught by the bounds checks we have before
918 * every table lookup anyway.
920 if (!FIELD_EX64(value, GITS_BASER, VALID)) {
921 continue;
923 td->page_sz = page_sz;
924 td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
925 td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
926 td->base_addr = baser_base_addr(value, page_sz);
927 if (!td->indirect) {
928 td->num_entries = (num_pages * page_sz) / td->entry_sz;
929 } else {
930 td->num_entries = (((num_pages * page_sz) /
931 L1TABLE_ENTRY_SIZE) *
932 (page_sz / td->entry_sz));
934 td->num_entries = MIN(td->num_entries, 1ULL << idbits);
938 static void extract_cmdq_params(GICv3ITSState *s)
940 uint16_t num_pages = 0;
941 uint64_t value = s->cbaser;
943 num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
945 memset(&s->cq, 0 , sizeof(s->cq));
947 if (FIELD_EX64(value, GITS_CBASER, VALID)) {
948 s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
949 GITS_CMDQ_ENTRY_SIZE;
950 s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
951 s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
955 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
956 uint64_t *data, unsigned size,
957 MemTxAttrs attrs)
960 * GITS_TRANSLATER is write-only, and all other addresses
961 * in the interrupt translation space frame are RES0.
963 *data = 0;
964 return MEMTX_OK;
967 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
968 uint64_t data, unsigned size,
969 MemTxAttrs attrs)
971 GICv3ITSState *s = (GICv3ITSState *)opaque;
972 bool result = true;
974 trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
976 switch (offset) {
977 case GITS_TRANSLATER:
978 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
979 result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
981 break;
982 default:
983 break;
986 if (result) {
987 return MEMTX_OK;
988 } else {
989 return MEMTX_ERROR;
993 static bool its_writel(GICv3ITSState *s, hwaddr offset,
994 uint64_t value, MemTxAttrs attrs)
996 bool result = true;
997 int index;
999 switch (offset) {
1000 case GITS_CTLR:
1001 if (value & R_GITS_CTLR_ENABLED_MASK) {
1002 s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
1003 extract_table_params(s);
1004 extract_cmdq_params(s);
1005 process_cmdq(s);
1006 } else {
1007 s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
1009 break;
1010 case GITS_CBASER:
1012 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1013 * already enabled
1015 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1016 s->cbaser = deposit64(s->cbaser, 0, 32, value);
1017 s->creadr = 0;
1019 break;
1020 case GITS_CBASER + 4:
1022 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1023 * already enabled
1025 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1026 s->cbaser = deposit64(s->cbaser, 32, 32, value);
1027 s->creadr = 0;
1029 break;
1030 case GITS_CWRITER:
1031 s->cwriter = deposit64(s->cwriter, 0, 32,
1032 (value & ~R_GITS_CWRITER_RETRY_MASK));
1033 if (s->cwriter != s->creadr) {
1034 process_cmdq(s);
1036 break;
1037 case GITS_CWRITER + 4:
1038 s->cwriter = deposit64(s->cwriter, 32, 32, value);
1039 break;
1040 case GITS_CREADR:
1041 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1042 s->creadr = deposit64(s->creadr, 0, 32,
1043 (value & ~R_GITS_CREADR_STALLED_MASK));
1044 } else {
1045 /* RO register, ignore the write */
1046 qemu_log_mask(LOG_GUEST_ERROR,
1047 "%s: invalid guest write to RO register at offset "
1048 TARGET_FMT_plx "\n", __func__, offset);
1050 break;
1051 case GITS_CREADR + 4:
1052 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1053 s->creadr = deposit64(s->creadr, 32, 32, value);
1054 } else {
1055 /* RO register, ignore the write */
1056 qemu_log_mask(LOG_GUEST_ERROR,
1057 "%s: invalid guest write to RO register at offset "
1058 TARGET_FMT_plx "\n", __func__, offset);
1060 break;
1061 case GITS_BASER ... GITS_BASER + 0x3f:
1063 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1064 * already enabled
1066 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1067 index = (offset - GITS_BASER) / 8;
1069 if (s->baser[index] == 0) {
1070 /* Unimplemented GITS_BASERn: RAZ/WI */
1071 break;
1073 if (offset & 7) {
1074 value <<= 32;
1075 value &= ~GITS_BASER_RO_MASK;
1076 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
1077 s->baser[index] |= value;
1078 } else {
1079 value &= ~GITS_BASER_RO_MASK;
1080 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
1081 s->baser[index] |= value;
1084 break;
1085 case GITS_IIDR:
1086 case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1087 /* RO registers, ignore the write */
1088 qemu_log_mask(LOG_GUEST_ERROR,
1089 "%s: invalid guest write to RO register at offset "
1090 TARGET_FMT_plx "\n", __func__, offset);
1091 break;
1092 default:
1093 result = false;
1094 break;
1096 return result;
1099 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1100 uint64_t *data, MemTxAttrs attrs)
1102 bool result = true;
1103 int index;
1105 switch (offset) {
1106 case GITS_CTLR:
1107 *data = s->ctlr;
1108 break;
1109 case GITS_IIDR:
1110 *data = gicv3_iidr();
1111 break;
1112 case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1113 /* ID registers */
1114 *data = gicv3_idreg(offset - GITS_IDREGS);
1115 break;
1116 case GITS_TYPER:
1117 *data = extract64(s->typer, 0, 32);
1118 break;
1119 case GITS_TYPER + 4:
1120 *data = extract64(s->typer, 32, 32);
1121 break;
1122 case GITS_CBASER:
1123 *data = extract64(s->cbaser, 0, 32);
1124 break;
1125 case GITS_CBASER + 4:
1126 *data = extract64(s->cbaser, 32, 32);
1127 break;
1128 case GITS_CREADR:
1129 *data = extract64(s->creadr, 0, 32);
1130 break;
1131 case GITS_CREADR + 4:
1132 *data = extract64(s->creadr, 32, 32);
1133 break;
1134 case GITS_CWRITER:
1135 *data = extract64(s->cwriter, 0, 32);
1136 break;
1137 case GITS_CWRITER + 4:
1138 *data = extract64(s->cwriter, 32, 32);
1139 break;
1140 case GITS_BASER ... GITS_BASER + 0x3f:
1141 index = (offset - GITS_BASER) / 8;
1142 if (offset & 7) {
1143 *data = extract64(s->baser[index], 32, 32);
1144 } else {
1145 *data = extract64(s->baser[index], 0, 32);
1147 break;
1148 default:
1149 result = false;
1150 break;
1152 return result;
1155 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1156 uint64_t value, MemTxAttrs attrs)
1158 bool result = true;
1159 int index;
1161 switch (offset) {
1162 case GITS_BASER ... GITS_BASER + 0x3f:
1164 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1165 * already enabled
1167 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1168 index = (offset - GITS_BASER) / 8;
1169 if (s->baser[index] == 0) {
1170 /* Unimplemented GITS_BASERn: RAZ/WI */
1171 break;
1173 s->baser[index] &= GITS_BASER_RO_MASK;
1174 s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1176 break;
1177 case GITS_CBASER:
1179 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1180 * already enabled
1182 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1183 s->cbaser = value;
1184 s->creadr = 0;
1186 break;
1187 case GITS_CWRITER:
1188 s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1189 if (s->cwriter != s->creadr) {
1190 process_cmdq(s);
1192 break;
1193 case GITS_CREADR:
1194 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1195 s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1196 } else {
1197 /* RO register, ignore the write */
1198 qemu_log_mask(LOG_GUEST_ERROR,
1199 "%s: invalid guest write to RO register at offset "
1200 TARGET_FMT_plx "\n", __func__, offset);
1202 break;
1203 case GITS_TYPER:
1204 /* RO registers, ignore the write */
1205 qemu_log_mask(LOG_GUEST_ERROR,
1206 "%s: invalid guest write to RO register at offset "
1207 TARGET_FMT_plx "\n", __func__, offset);
1208 break;
1209 default:
1210 result = false;
1211 break;
1213 return result;
1216 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1217 uint64_t *data, MemTxAttrs attrs)
1219 bool result = true;
1220 int index;
1222 switch (offset) {
1223 case GITS_TYPER:
1224 *data = s->typer;
1225 break;
1226 case GITS_BASER ... GITS_BASER + 0x3f:
1227 index = (offset - GITS_BASER) / 8;
1228 *data = s->baser[index];
1229 break;
1230 case GITS_CBASER:
1231 *data = s->cbaser;
1232 break;
1233 case GITS_CREADR:
1234 *data = s->creadr;
1235 break;
1236 case GITS_CWRITER:
1237 *data = s->cwriter;
1238 break;
1239 default:
1240 result = false;
1241 break;
1243 return result;
1246 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1247 unsigned size, MemTxAttrs attrs)
1249 GICv3ITSState *s = (GICv3ITSState *)opaque;
1250 bool result;
1252 switch (size) {
1253 case 4:
1254 result = its_readl(s, offset, data, attrs);
1255 break;
1256 case 8:
1257 result = its_readll(s, offset, data, attrs);
1258 break;
1259 default:
1260 result = false;
1261 break;
1264 if (!result) {
1265 qemu_log_mask(LOG_GUEST_ERROR,
1266 "%s: invalid guest read at offset " TARGET_FMT_plx
1267 "size %u\n", __func__, offset, size);
1268 trace_gicv3_its_badread(offset, size);
1270 * The spec requires that reserved registers are RAZ/WI;
1271 * so use false returns from leaf functions as a way to
1272 * trigger the guest-error logging but don't return it to
1273 * the caller, or we'll cause a spurious guest data abort.
1275 *data = 0;
1276 } else {
1277 trace_gicv3_its_read(offset, *data, size);
1279 return MEMTX_OK;
1282 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1283 unsigned size, MemTxAttrs attrs)
1285 GICv3ITSState *s = (GICv3ITSState *)opaque;
1286 bool result;
1288 switch (size) {
1289 case 4:
1290 result = its_writel(s, offset, data, attrs);
1291 break;
1292 case 8:
1293 result = its_writell(s, offset, data, attrs);
1294 break;
1295 default:
1296 result = false;
1297 break;
1300 if (!result) {
1301 qemu_log_mask(LOG_GUEST_ERROR,
1302 "%s: invalid guest write at offset " TARGET_FMT_plx
1303 "size %u\n", __func__, offset, size);
1304 trace_gicv3_its_badwrite(offset, data, size);
1306 * The spec requires that reserved registers are RAZ/WI;
1307 * so use false returns from leaf functions as a way to
1308 * trigger the guest-error logging but don't return it to
1309 * the caller, or we'll cause a spurious guest data abort.
1311 } else {
1312 trace_gicv3_its_write(offset, data, size);
1314 return MEMTX_OK;
1317 static const MemoryRegionOps gicv3_its_control_ops = {
1318 .read_with_attrs = gicv3_its_read,
1319 .write_with_attrs = gicv3_its_write,
1320 .valid.min_access_size = 4,
1321 .valid.max_access_size = 8,
1322 .impl.min_access_size = 4,
1323 .impl.max_access_size = 8,
1324 .endianness = DEVICE_NATIVE_ENDIAN,
1327 static const MemoryRegionOps gicv3_its_translation_ops = {
1328 .read_with_attrs = gicv3_its_translation_read,
1329 .write_with_attrs = gicv3_its_translation_write,
1330 .valid.min_access_size = 2,
1331 .valid.max_access_size = 4,
1332 .impl.min_access_size = 2,
1333 .impl.max_access_size = 4,
1334 .endianness = DEVICE_NATIVE_ENDIAN,
1337 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1339 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1340 int i;
1342 for (i = 0; i < s->gicv3->num_cpu; i++) {
1343 if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1344 error_setg(errp, "Physical LPI not supported by CPU %d", i);
1345 return;
1349 gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1351 /* set the ITS default features supported */
1352 s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1353 s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1354 ITS_ITT_ENTRY_SIZE - 1);
1355 s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1356 s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1357 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1358 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1361 static void gicv3_its_reset(DeviceState *dev)
1363 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1364 GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1366 c->parent_reset(dev);
1368 /* Quiescent bit reset to 1 */
1369 s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1372 * setting GITS_BASER0.Type = 0b001 (Device)
1373 * GITS_BASER1.Type = 0b100 (Collection Table)
1374 * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1375 * GITS_BASER<0,1>.Page_Size = 64KB
1376 * and default translation table entry size to 16 bytes
1378 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1379 GITS_BASER_TYPE_DEVICE);
1380 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1381 GITS_BASER_PAGESIZE_64K);
1382 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1383 GITS_DTE_SIZE - 1);
1385 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1386 GITS_BASER_TYPE_COLLECTION);
1387 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1388 GITS_BASER_PAGESIZE_64K);
1389 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1390 GITS_CTE_SIZE - 1);
1393 static void gicv3_its_post_load(GICv3ITSState *s)
1395 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1396 extract_table_params(s);
1397 extract_cmdq_params(s);
1401 static Property gicv3_its_props[] = {
1402 DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1403 GICv3State *),
1404 DEFINE_PROP_END_OF_LIST(),
1407 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1409 DeviceClass *dc = DEVICE_CLASS(klass);
1410 GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1411 GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1413 dc->realize = gicv3_arm_its_realize;
1414 device_class_set_props(dc, gicv3_its_props);
1415 device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1416 icc->post_load = gicv3_its_post_load;
1419 static const TypeInfo gicv3_its_info = {
1420 .name = TYPE_ARM_GICV3_ITS,
1421 .parent = TYPE_ARM_GICV3_ITS_COMMON,
1422 .instance_size = sizeof(GICv3ITSState),
1423 .class_init = gicv3_its_class_init,
1424 .class_size = sizeof(GICv3ITSClass),
1427 static void gicv3_its_register_types(void)
1429 type_register_static(&gicv3_its_info);
1432 type_init(gicv3_its_register_types)