hvf: Make hvf_get_segments() / hvf_put_segments() local
[qemu/ar7.git] / hw / intc / arm_gicv3_its.c
blobb96b874afdf8d8f98cbadc2e65497b334656cb70
1 /*
2 * ITS emulation for a GICv3-based system
4 * Copyright Linaro.org 2021
6 * Authors:
7 * Shashi Mallela <shashi.mallela@linaro.org>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10 * option) any later version. See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "trace.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
23 typedef struct GICv3ITSClass GICv3ITSClass;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
26 ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
28 struct GICv3ITSClass {
29 GICv3ITSCommonClass parent_class;
30 void (*parent_reset)(DeviceState *dev);
34 * This is an internal enum used to distinguish between LPI triggered
35 * via command queue and LPI triggered via gits_translater write.
37 typedef enum ItsCmdType {
38 NONE = 0, /* internal indication for GITS_TRANSLATER write */
39 CLEAR = 1,
40 DISCARD = 2,
41 INTERRUPT = 3,
42 } ItsCmdType;
44 typedef struct DTEntry {
45 bool valid;
46 unsigned size;
47 uint64_t ittaddr;
48 } DTEntry;
50 typedef struct CTEntry {
51 bool valid;
52 uint32_t rdbase;
53 } CTEntry;
55 typedef struct ITEntry {
56 bool valid;
57 int inttype;
58 uint32_t intid;
59 uint32_t doorbell;
60 uint32_t icid;
61 uint32_t vpeid;
62 } ITEntry;
66 * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
67 * if a command parameter is not correct. These include both "stall
68 * processing of the command queue" and "ignore this command, and
69 * keep processing the queue". In our implementation we choose that
70 * memory transaction errors reading the command packet provoke a
71 * stall, but errors in parameters cause us to ignore the command
72 * and continue processing.
73 * The process_* functions which handle individual ITS commands all
74 * return an ItsCmdResult which tells process_cmdq() whether it should
75 * stall or keep going.
77 typedef enum ItsCmdResult {
78 CMD_STALL = 0,
79 CMD_CONTINUE = 1,
80 } ItsCmdResult;
82 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
84 uint64_t result = 0;
86 switch (page_sz) {
87 case GITS_PAGE_SIZE_4K:
88 case GITS_PAGE_SIZE_16K:
89 result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
90 break;
92 case GITS_PAGE_SIZE_64K:
93 result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
94 result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
95 break;
97 default:
98 break;
100 return result;
103 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
104 uint32_t idx, MemTxResult *res)
107 * Given a TableDesc describing one of the ITS in-guest-memory
108 * tables and an index into it, return the guest address
109 * corresponding to that table entry.
110 * If there was a memory error reading the L1 table of an
111 * indirect table, *res is set accordingly, and we return -1.
112 * If the L1 table entry is marked not valid, we return -1 with
113 * *res set to MEMTX_OK.
115 * The specification defines the format of level 1 entries of a
116 * 2-level table, but the format of level 2 entries and the format
117 * of flat-mapped tables is IMPDEF.
119 AddressSpace *as = &s->gicv3->dma_as;
120 uint32_t l2idx;
121 uint64_t l2;
122 uint32_t num_l2_entries;
124 *res = MEMTX_OK;
126 if (!td->indirect) {
127 /* Single level table */
128 return td->base_addr + idx * td->entry_sz;
131 /* Two level table */
132 l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
134 l2 = address_space_ldq_le(as,
135 td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
136 MEMTXATTRS_UNSPECIFIED, res);
137 if (*res != MEMTX_OK) {
138 return -1;
140 if (!(l2 & L2_TABLE_VALID_MASK)) {
141 return -1;
144 num_l2_entries = td->page_sz / td->entry_sz;
145 return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
149 * Read the Collection Table entry at index @icid. On success (including
150 * successfully determining that there is no valid CTE for this index),
151 * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
152 * If there is an error reading memory then we return the error code.
154 static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
156 AddressSpace *as = &s->gicv3->dma_as;
157 MemTxResult res = MEMTX_OK;
158 uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
159 uint64_t cteval;
161 if (entry_addr == -1) {
162 /* No L2 table entry, i.e. no valid CTE, or a memory error */
163 cte->valid = false;
164 goto out;
167 cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
168 if (res != MEMTX_OK) {
169 goto out;
171 cte->valid = FIELD_EX64(cteval, CTE, VALID);
172 cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
173 out:
174 if (res != MEMTX_OK) {
175 trace_gicv3_its_cte_read_fault(icid);
176 } else {
177 trace_gicv3_its_cte_read(icid, cte->valid, cte->rdbase);
179 return res;
183 * Update the Interrupt Table entry at index @evinted in the table specified
184 * by the dte @dte. Returns true on success, false if there was a memory
185 * access error.
187 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
188 const ITEntry *ite)
190 AddressSpace *as = &s->gicv3->dma_as;
191 MemTxResult res = MEMTX_OK;
192 hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
193 uint64_t itel = 0;
194 uint32_t iteh = 0;
196 trace_gicv3_its_ite_write(dte->ittaddr, eventid, ite->valid,
197 ite->inttype, ite->intid, ite->icid,
198 ite->vpeid, ite->doorbell);
200 if (ite->valid) {
201 itel = FIELD_DP64(itel, ITE_L, VALID, 1);
202 itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
203 itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
204 itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
205 itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
206 iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
209 address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
210 if (res != MEMTX_OK) {
211 return false;
213 address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
214 return res == MEMTX_OK;
218 * Read the Interrupt Table entry at index @eventid from the table specified
219 * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
220 * struct @ite accordingly. If there is an error reading memory then we return
221 * the error code.
223 static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
224 const DTEntry *dte, ITEntry *ite)
226 AddressSpace *as = &s->gicv3->dma_as;
227 MemTxResult res = MEMTX_OK;
228 uint64_t itel;
229 uint32_t iteh;
230 hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
232 itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
233 if (res != MEMTX_OK) {
234 trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
235 return res;
238 iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
239 if (res != MEMTX_OK) {
240 trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
241 return res;
244 ite->valid = FIELD_EX64(itel, ITE_L, VALID);
245 ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
246 ite->intid = FIELD_EX64(itel, ITE_L, INTID);
247 ite->icid = FIELD_EX64(itel, ITE_L, ICID);
248 ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
249 ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
250 trace_gicv3_its_ite_read(dte->ittaddr, eventid, ite->valid,
251 ite->inttype, ite->intid, ite->icid,
252 ite->vpeid, ite->doorbell);
253 return MEMTX_OK;
257 * Read the Device Table entry at index @devid. On success (including
258 * successfully determining that there is no valid DTE for this index),
259 * we return MEMTX_OK and populate the DTEntry struct accordingly.
260 * If there is an error reading memory then we return the error code.
262 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
264 MemTxResult res = MEMTX_OK;
265 AddressSpace *as = &s->gicv3->dma_as;
266 uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
267 uint64_t dteval;
269 if (entry_addr == -1) {
270 /* No L2 table entry, i.e. no valid DTE, or a memory error */
271 dte->valid = false;
272 goto out;
274 dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
275 if (res != MEMTX_OK) {
276 goto out;
278 dte->valid = FIELD_EX64(dteval, DTE, VALID);
279 dte->size = FIELD_EX64(dteval, DTE, SIZE);
280 /* DTE word field stores bits [51:8] of the ITT address */
281 dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
282 out:
283 if (res != MEMTX_OK) {
284 trace_gicv3_its_dte_read_fault(devid);
285 } else {
286 trace_gicv3_its_dte_read(devid, dte->valid, dte->size, dte->ittaddr);
288 return res;
292 * This function handles the processing of following commands based on
293 * the ItsCmdType parameter passed:-
294 * 1. triggering of lpi interrupt translation via ITS INT command
295 * 2. triggering of lpi interrupt translation via gits_translater register
296 * 3. handling of ITS CLEAR command
297 * 4. handling of ITS DISCARD command
299 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
300 uint32_t eventid, ItsCmdType cmd)
302 uint64_t num_eventids;
303 DTEntry dte;
304 CTEntry cte;
305 ITEntry ite;
307 if (devid >= s->dt.num_entries) {
308 qemu_log_mask(LOG_GUEST_ERROR,
309 "%s: invalid command attributes: devid %d>=%d",
310 __func__, devid, s->dt.num_entries);
311 return CMD_CONTINUE;
314 if (get_dte(s, devid, &dte) != MEMTX_OK) {
315 return CMD_STALL;
317 if (!dte.valid) {
318 qemu_log_mask(LOG_GUEST_ERROR,
319 "%s: invalid command attributes: "
320 "invalid dte for %d\n", __func__, devid);
321 return CMD_CONTINUE;
324 num_eventids = 1ULL << (dte.size + 1);
325 if (eventid >= num_eventids) {
326 qemu_log_mask(LOG_GUEST_ERROR,
327 "%s: invalid command attributes: eventid %d >= %"
328 PRId64 "\n",
329 __func__, eventid, num_eventids);
330 return CMD_CONTINUE;
333 if (get_ite(s, eventid, &dte, &ite) != MEMTX_OK) {
334 return CMD_STALL;
337 if (!ite.valid || ite.inttype != ITE_INTTYPE_PHYSICAL) {
338 qemu_log_mask(LOG_GUEST_ERROR,
339 "%s: invalid command attributes: invalid ITE\n",
340 __func__);
341 return CMD_CONTINUE;
344 if (ite.icid >= s->ct.num_entries) {
345 qemu_log_mask(LOG_GUEST_ERROR,
346 "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
347 __func__, ite.icid);
348 return CMD_CONTINUE;
351 if (get_cte(s, ite.icid, &cte) != MEMTX_OK) {
352 return CMD_STALL;
354 if (!cte.valid) {
355 qemu_log_mask(LOG_GUEST_ERROR,
356 "%s: invalid command attributes: invalid CTE\n",
357 __func__);
358 return CMD_CONTINUE;
362 * Current implementation only supports rdbase == procnum
363 * Hence rdbase physical address is ignored
365 if (cte.rdbase >= s->gicv3->num_cpu) {
366 return CMD_CONTINUE;
369 if ((cmd == CLEAR) || (cmd == DISCARD)) {
370 gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 0);
371 } else {
372 gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 1);
375 if (cmd == DISCARD) {
376 ITEntry ite = {};
377 /* remove mapping from interrupt translation table */
378 ite.valid = false;
379 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
381 return CMD_CONTINUE;
383 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
384 ItsCmdType cmd)
386 uint32_t devid, eventid;
388 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
389 eventid = cmdpkt[1] & EVENTID_MASK;
390 switch (cmd) {
391 case INTERRUPT:
392 trace_gicv3_its_cmd_int(devid, eventid);
393 break;
394 case CLEAR:
395 trace_gicv3_its_cmd_clear(devid, eventid);
396 break;
397 case DISCARD:
398 trace_gicv3_its_cmd_discard(devid, eventid);
399 break;
400 default:
401 g_assert_not_reached();
403 return do_process_its_cmd(s, devid, eventid, cmd);
406 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
407 bool ignore_pInt)
409 uint32_t devid, eventid;
410 uint32_t pIntid = 0;
411 uint64_t num_eventids;
412 uint32_t num_intids;
413 uint16_t icid = 0;
414 DTEntry dte;
415 ITEntry ite;
417 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
418 eventid = cmdpkt[1] & EVENTID_MASK;
419 icid = cmdpkt[2] & ICID_MASK;
421 if (ignore_pInt) {
422 pIntid = eventid;
423 trace_gicv3_its_cmd_mapi(devid, eventid, icid);
424 } else {
425 pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
426 trace_gicv3_its_cmd_mapti(devid, eventid, icid, pIntid);
429 if (devid >= s->dt.num_entries) {
430 qemu_log_mask(LOG_GUEST_ERROR,
431 "%s: invalid command attributes: devid %d>=%d",
432 __func__, devid, s->dt.num_entries);
433 return CMD_CONTINUE;
436 if (get_dte(s, devid, &dte) != MEMTX_OK) {
437 return CMD_STALL;
439 num_eventids = 1ULL << (dte.size + 1);
440 num_intids = 1ULL << (GICD_TYPER_IDBITS + 1);
442 if (icid >= s->ct.num_entries) {
443 qemu_log_mask(LOG_GUEST_ERROR,
444 "%s: invalid ICID 0x%x >= 0x%x\n",
445 __func__, icid, s->ct.num_entries);
446 return CMD_CONTINUE;
449 if (!dte.valid) {
450 qemu_log_mask(LOG_GUEST_ERROR,
451 "%s: no valid DTE for devid 0x%x\n", __func__, devid);
452 return CMD_CONTINUE;
455 if (eventid >= num_eventids) {
456 qemu_log_mask(LOG_GUEST_ERROR,
457 "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n",
458 __func__, eventid, num_eventids);
459 return CMD_CONTINUE;
462 if (pIntid < GICV3_LPI_INTID_START || pIntid >= num_intids) {
463 qemu_log_mask(LOG_GUEST_ERROR,
464 "%s: invalid interrupt ID 0x%x\n", __func__, pIntid);
465 return CMD_CONTINUE;
468 /* add ite entry to interrupt translation table */
469 ite.valid = true;
470 ite.inttype = ITE_INTTYPE_PHYSICAL;
471 ite.intid = pIntid;
472 ite.icid = icid;
473 ite.doorbell = INTID_SPURIOUS;
474 ite.vpeid = 0;
475 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
479 * Update the Collection Table entry for @icid to @cte. Returns true
480 * on success, false if there was a memory access error.
482 static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
484 AddressSpace *as = &s->gicv3->dma_as;
485 uint64_t entry_addr;
486 uint64_t cteval = 0;
487 MemTxResult res = MEMTX_OK;
489 trace_gicv3_its_cte_write(icid, cte->valid, cte->rdbase);
491 if (cte->valid) {
492 /* add mapping entry to collection table */
493 cteval = FIELD_DP64(cteval, CTE, VALID, 1);
494 cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
497 entry_addr = table_entry_addr(s, &s->ct, icid, &res);
498 if (res != MEMTX_OK) {
499 /* memory access error: stall */
500 return false;
502 if (entry_addr == -1) {
503 /* No L2 table for this index: discard write and continue */
504 return true;
507 address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
508 return res == MEMTX_OK;
511 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
513 uint16_t icid;
514 CTEntry cte;
516 icid = cmdpkt[2] & ICID_MASK;
517 cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
518 if (cte.valid) {
519 cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
520 cte.rdbase &= RDBASE_PROCNUM_MASK;
521 } else {
522 cte.rdbase = 0;
524 trace_gicv3_its_cmd_mapc(icid, cte.rdbase, cte.valid);
526 if (icid >= s->ct.num_entries) {
527 qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%d", icid);
528 return CMD_CONTINUE;
530 if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) {
531 qemu_log_mask(LOG_GUEST_ERROR,
532 "ITS MAPC: invalid RDBASE %u ", cte.rdbase);
533 return CMD_CONTINUE;
536 return update_cte(s, icid, &cte) ? CMD_CONTINUE : CMD_STALL;
540 * Update the Device Table entry for @devid to @dte. Returns true
541 * on success, false if there was a memory access error.
543 static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
545 AddressSpace *as = &s->gicv3->dma_as;
546 uint64_t entry_addr;
547 uint64_t dteval = 0;
548 MemTxResult res = MEMTX_OK;
550 trace_gicv3_its_dte_write(devid, dte->valid, dte->size, dte->ittaddr);
552 if (dte->valid) {
553 /* add mapping entry to device table */
554 dteval = FIELD_DP64(dteval, DTE, VALID, 1);
555 dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
556 dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
559 entry_addr = table_entry_addr(s, &s->dt, devid, &res);
560 if (res != MEMTX_OK) {
561 /* memory access error: stall */
562 return false;
564 if (entry_addr == -1) {
565 /* No L2 table for this index: discard write and continue */
566 return true;
568 address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
569 return res == MEMTX_OK;
572 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
574 uint32_t devid;
575 DTEntry dte;
577 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
578 dte.size = cmdpkt[1] & SIZE_MASK;
579 dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
580 dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
582 trace_gicv3_its_cmd_mapd(devid, dte.size, dte.ittaddr, dte.valid);
584 if (devid >= s->dt.num_entries) {
585 qemu_log_mask(LOG_GUEST_ERROR,
586 "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
587 devid, s->dt.num_entries);
588 return CMD_CONTINUE;
591 if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
592 qemu_log_mask(LOG_GUEST_ERROR,
593 "ITS MAPD: invalid size %d\n", dte.size);
594 return CMD_CONTINUE;
597 return update_dte(s, devid, &dte) ? CMD_CONTINUE : CMD_STALL;
600 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
602 uint64_t rd1, rd2;
604 rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
605 rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
607 trace_gicv3_its_cmd_movall(rd1, rd2);
609 if (rd1 >= s->gicv3->num_cpu) {
610 qemu_log_mask(LOG_GUEST_ERROR,
611 "%s: RDBASE1 %" PRId64
612 " out of range (must be less than %d)\n",
613 __func__, rd1, s->gicv3->num_cpu);
614 return CMD_CONTINUE;
616 if (rd2 >= s->gicv3->num_cpu) {
617 qemu_log_mask(LOG_GUEST_ERROR,
618 "%s: RDBASE2 %" PRId64
619 " out of range (must be less than %d)\n",
620 __func__, rd2, s->gicv3->num_cpu);
621 return CMD_CONTINUE;
624 if (rd1 == rd2) {
625 /* Move to same target must succeed as a no-op */
626 return CMD_CONTINUE;
629 /* Move all pending LPIs from redistributor 1 to redistributor 2 */
630 gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
632 return CMD_CONTINUE;
635 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
637 uint32_t devid, eventid;
638 uint16_t new_icid;
639 uint64_t num_eventids;
640 DTEntry dte;
641 CTEntry old_cte, new_cte;
642 ITEntry old_ite;
644 devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
645 eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
646 new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
648 trace_gicv3_its_cmd_movi(devid, eventid, new_icid);
650 if (devid >= s->dt.num_entries) {
651 qemu_log_mask(LOG_GUEST_ERROR,
652 "%s: invalid command attributes: devid %d>=%d",
653 __func__, devid, s->dt.num_entries);
654 return CMD_CONTINUE;
656 if (get_dte(s, devid, &dte) != MEMTX_OK) {
657 return CMD_STALL;
660 if (!dte.valid) {
661 qemu_log_mask(LOG_GUEST_ERROR,
662 "%s: invalid command attributes: "
663 "invalid dte for %d\n", __func__, devid);
664 return CMD_CONTINUE;
667 num_eventids = 1ULL << (dte.size + 1);
668 if (eventid >= num_eventids) {
669 qemu_log_mask(LOG_GUEST_ERROR,
670 "%s: invalid command attributes: eventid %d >= %"
671 PRId64 "\n",
672 __func__, eventid, num_eventids);
673 return CMD_CONTINUE;
676 if (get_ite(s, eventid, &dte, &old_ite) != MEMTX_OK) {
677 return CMD_STALL;
680 if (!old_ite.valid || old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
681 qemu_log_mask(LOG_GUEST_ERROR,
682 "%s: invalid command attributes: invalid ITE\n",
683 __func__);
684 return CMD_CONTINUE;
687 if (old_ite.icid >= s->ct.num_entries) {
688 qemu_log_mask(LOG_GUEST_ERROR,
689 "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
690 __func__, old_ite.icid);
691 return CMD_CONTINUE;
694 if (new_icid >= s->ct.num_entries) {
695 qemu_log_mask(LOG_GUEST_ERROR,
696 "%s: invalid command attributes: ICID 0x%x\n",
697 __func__, new_icid);
698 return CMD_CONTINUE;
701 if (get_cte(s, old_ite.icid, &old_cte) != MEMTX_OK) {
702 return CMD_STALL;
704 if (!old_cte.valid) {
705 qemu_log_mask(LOG_GUEST_ERROR,
706 "%s: invalid command attributes: "
707 "invalid CTE for old ICID 0x%x\n",
708 __func__, old_ite.icid);
709 return CMD_CONTINUE;
712 if (get_cte(s, new_icid, &new_cte) != MEMTX_OK) {
713 return CMD_STALL;
715 if (!new_cte.valid) {
716 qemu_log_mask(LOG_GUEST_ERROR,
717 "%s: invalid command attributes: "
718 "invalid CTE for new ICID 0x%x\n",
719 __func__, new_icid);
720 return CMD_CONTINUE;
723 if (old_cte.rdbase >= s->gicv3->num_cpu) {
724 qemu_log_mask(LOG_GUEST_ERROR,
725 "%s: CTE has invalid rdbase 0x%x\n",
726 __func__, old_cte.rdbase);
727 return CMD_CONTINUE;
730 if (new_cte.rdbase >= s->gicv3->num_cpu) {
731 qemu_log_mask(LOG_GUEST_ERROR,
732 "%s: CTE has invalid rdbase 0x%x\n",
733 __func__, new_cte.rdbase);
734 return CMD_CONTINUE;
737 if (old_cte.rdbase != new_cte.rdbase) {
738 /* Move the LPI from the old redistributor to the new one */
739 gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
740 &s->gicv3->cpu[new_cte.rdbase],
741 old_ite.intid);
744 /* Update the ICID field in the interrupt translation table entry */
745 old_ite.icid = new_icid;
746 return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE : CMD_STALL;
750 * Current implementation blocks until all
751 * commands are processed
753 static void process_cmdq(GICv3ITSState *s)
755 uint32_t wr_offset = 0;
756 uint32_t rd_offset = 0;
757 uint32_t cq_offset = 0;
758 AddressSpace *as = &s->gicv3->dma_as;
759 uint8_t cmd;
760 int i;
762 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
763 return;
766 wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
768 if (wr_offset >= s->cq.num_entries) {
769 qemu_log_mask(LOG_GUEST_ERROR,
770 "%s: invalid write offset "
771 "%d\n", __func__, wr_offset);
772 return;
775 rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
777 if (rd_offset >= s->cq.num_entries) {
778 qemu_log_mask(LOG_GUEST_ERROR,
779 "%s: invalid read offset "
780 "%d\n", __func__, rd_offset);
781 return;
784 while (wr_offset != rd_offset) {
785 ItsCmdResult result = CMD_CONTINUE;
786 void *hostmem;
787 hwaddr buflen;
788 uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
790 cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
792 buflen = GITS_CMDQ_ENTRY_SIZE;
793 hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
794 &buflen, false, MEMTXATTRS_UNSPECIFIED);
795 if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
796 if (hostmem) {
797 address_space_unmap(as, hostmem, buflen, false, 0);
799 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
800 qemu_log_mask(LOG_GUEST_ERROR,
801 "%s: could not read command at 0x%" PRIx64 "\n",
802 __func__, s->cq.base_addr + cq_offset);
803 break;
805 for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
806 cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
808 address_space_unmap(as, hostmem, buflen, false, 0);
810 cmd = cmdpkt[0] & CMD_MASK;
812 trace_gicv3_its_process_command(rd_offset, cmd);
814 switch (cmd) {
815 case GITS_CMD_INT:
816 result = process_its_cmd(s, cmdpkt, INTERRUPT);
817 break;
818 case GITS_CMD_CLEAR:
819 result = process_its_cmd(s, cmdpkt, CLEAR);
820 break;
821 case GITS_CMD_SYNC:
823 * Current implementation makes a blocking synchronous call
824 * for every command issued earlier, hence the internal state
825 * is already consistent by the time SYNC command is executed.
826 * Hence no further processing is required for SYNC command.
828 trace_gicv3_its_cmd_sync();
829 break;
830 case GITS_CMD_MAPD:
831 result = process_mapd(s, cmdpkt);
832 break;
833 case GITS_CMD_MAPC:
834 result = process_mapc(s, cmdpkt);
835 break;
836 case GITS_CMD_MAPTI:
837 result = process_mapti(s, cmdpkt, false);
838 break;
839 case GITS_CMD_MAPI:
840 result = process_mapti(s, cmdpkt, true);
841 break;
842 case GITS_CMD_DISCARD:
843 result = process_its_cmd(s, cmdpkt, DISCARD);
844 break;
845 case GITS_CMD_INV:
846 case GITS_CMD_INVALL:
848 * Current implementation doesn't cache any ITS tables,
849 * but the calculated lpi priority information. We only
850 * need to trigger lpi priority re-calculation to be in
851 * sync with LPI config table or pending table changes.
853 trace_gicv3_its_cmd_inv();
854 for (i = 0; i < s->gicv3->num_cpu; i++) {
855 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
857 break;
858 case GITS_CMD_MOVI:
859 result = process_movi(s, cmdpkt);
860 break;
861 case GITS_CMD_MOVALL:
862 result = process_movall(s, cmdpkt);
863 break;
864 default:
865 trace_gicv3_its_cmd_unknown(cmd);
866 break;
868 if (result == CMD_CONTINUE) {
869 rd_offset++;
870 rd_offset %= s->cq.num_entries;
871 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
872 } else {
873 /* CMD_STALL */
874 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
875 qemu_log_mask(LOG_GUEST_ERROR,
876 "%s: 0x%x cmd processing failed, stalling\n",
877 __func__, cmd);
878 break;
884 * This function extracts the ITS Device and Collection table specific
885 * parameters (like base_addr, size etc) from GITS_BASER register.
886 * It is called during ITS enable and also during post_load migration
888 static void extract_table_params(GICv3ITSState *s)
890 uint16_t num_pages = 0;
891 uint8_t page_sz_type;
892 uint8_t type;
893 uint32_t page_sz = 0;
894 uint64_t value;
896 for (int i = 0; i < 8; i++) {
897 TableDesc *td;
898 int idbits;
900 value = s->baser[i];
902 if (!value) {
903 continue;
906 page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
908 switch (page_sz_type) {
909 case 0:
910 page_sz = GITS_PAGE_SIZE_4K;
911 break;
913 case 1:
914 page_sz = GITS_PAGE_SIZE_16K;
915 break;
917 case 2:
918 case 3:
919 page_sz = GITS_PAGE_SIZE_64K;
920 break;
922 default:
923 g_assert_not_reached();
926 num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
928 type = FIELD_EX64(value, GITS_BASER, TYPE);
930 switch (type) {
931 case GITS_BASER_TYPE_DEVICE:
932 td = &s->dt;
933 idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
934 break;
935 case GITS_BASER_TYPE_COLLECTION:
936 td = &s->ct;
937 if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
938 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
939 } else {
940 /* 16-bit CollectionId supported when CIL == 0 */
941 idbits = 16;
943 break;
944 default:
946 * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
947 * ensures we will only see type values corresponding to
948 * the values set up in gicv3_its_reset().
950 g_assert_not_reached();
953 memset(td, 0, sizeof(*td));
955 * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
956 * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
957 * do not have a special case where the GITS_BASER<n>.Valid bit is 0
958 * for the register corresponding to the Collection table but we
959 * still have to process interrupts using non-memory-backed
960 * Collection table entries.)
961 * The specification makes it UNPREDICTABLE to enable the ITS without
962 * marking each BASER<n> as valid. We choose to handle these as if
963 * the table was zero-sized, so commands using the table will fail
964 * and interrupts requested via GITS_TRANSLATER writes will be ignored.
965 * This happens automatically by leaving the num_entries field at
966 * zero, which will be caught by the bounds checks we have before
967 * every table lookup anyway.
969 if (!FIELD_EX64(value, GITS_BASER, VALID)) {
970 continue;
972 td->page_sz = page_sz;
973 td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
974 td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
975 td->base_addr = baser_base_addr(value, page_sz);
976 if (!td->indirect) {
977 td->num_entries = (num_pages * page_sz) / td->entry_sz;
978 } else {
979 td->num_entries = (((num_pages * page_sz) /
980 L1TABLE_ENTRY_SIZE) *
981 (page_sz / td->entry_sz));
983 td->num_entries = MIN(td->num_entries, 1ULL << idbits);
987 static void extract_cmdq_params(GICv3ITSState *s)
989 uint16_t num_pages = 0;
990 uint64_t value = s->cbaser;
992 num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
994 memset(&s->cq, 0 , sizeof(s->cq));
996 if (FIELD_EX64(value, GITS_CBASER, VALID)) {
997 s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
998 GITS_CMDQ_ENTRY_SIZE;
999 s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
1000 s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
1004 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
1005 uint64_t *data, unsigned size,
1006 MemTxAttrs attrs)
1009 * GITS_TRANSLATER is write-only, and all other addresses
1010 * in the interrupt translation space frame are RES0.
1012 *data = 0;
1013 return MEMTX_OK;
1016 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
1017 uint64_t data, unsigned size,
1018 MemTxAttrs attrs)
1020 GICv3ITSState *s = (GICv3ITSState *)opaque;
1021 bool result = true;
1023 trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
1025 switch (offset) {
1026 case GITS_TRANSLATER:
1027 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1028 result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
1030 break;
1031 default:
1032 break;
1035 if (result) {
1036 return MEMTX_OK;
1037 } else {
1038 return MEMTX_ERROR;
1042 static bool its_writel(GICv3ITSState *s, hwaddr offset,
1043 uint64_t value, MemTxAttrs attrs)
1045 bool result = true;
1046 int index;
1048 switch (offset) {
1049 case GITS_CTLR:
1050 if (value & R_GITS_CTLR_ENABLED_MASK) {
1051 s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
1052 extract_table_params(s);
1053 extract_cmdq_params(s);
1054 process_cmdq(s);
1055 } else {
1056 s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
1058 break;
1059 case GITS_CBASER:
1061 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1062 * already enabled
1064 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1065 s->cbaser = deposit64(s->cbaser, 0, 32, value);
1066 s->creadr = 0;
1068 break;
1069 case GITS_CBASER + 4:
1071 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1072 * already enabled
1074 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1075 s->cbaser = deposit64(s->cbaser, 32, 32, value);
1076 s->creadr = 0;
1078 break;
1079 case GITS_CWRITER:
1080 s->cwriter = deposit64(s->cwriter, 0, 32,
1081 (value & ~R_GITS_CWRITER_RETRY_MASK));
1082 if (s->cwriter != s->creadr) {
1083 process_cmdq(s);
1085 break;
1086 case GITS_CWRITER + 4:
1087 s->cwriter = deposit64(s->cwriter, 32, 32, value);
1088 break;
1089 case GITS_CREADR:
1090 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1091 s->creadr = deposit64(s->creadr, 0, 32,
1092 (value & ~R_GITS_CREADR_STALLED_MASK));
1093 } else {
1094 /* RO register, ignore the write */
1095 qemu_log_mask(LOG_GUEST_ERROR,
1096 "%s: invalid guest write to RO register at offset "
1097 TARGET_FMT_plx "\n", __func__, offset);
1099 break;
1100 case GITS_CREADR + 4:
1101 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1102 s->creadr = deposit64(s->creadr, 32, 32, value);
1103 } else {
1104 /* RO register, ignore the write */
1105 qemu_log_mask(LOG_GUEST_ERROR,
1106 "%s: invalid guest write to RO register at offset "
1107 TARGET_FMT_plx "\n", __func__, offset);
1109 break;
1110 case GITS_BASER ... GITS_BASER + 0x3f:
1112 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1113 * already enabled
1115 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1116 index = (offset - GITS_BASER) / 8;
1118 if (s->baser[index] == 0) {
1119 /* Unimplemented GITS_BASERn: RAZ/WI */
1120 break;
1122 if (offset & 7) {
1123 value <<= 32;
1124 value &= ~GITS_BASER_RO_MASK;
1125 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
1126 s->baser[index] |= value;
1127 } else {
1128 value &= ~GITS_BASER_RO_MASK;
1129 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
1130 s->baser[index] |= value;
1133 break;
1134 case GITS_IIDR:
1135 case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1136 /* RO registers, ignore the write */
1137 qemu_log_mask(LOG_GUEST_ERROR,
1138 "%s: invalid guest write to RO register at offset "
1139 TARGET_FMT_plx "\n", __func__, offset);
1140 break;
1141 default:
1142 result = false;
1143 break;
1145 return result;
1148 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1149 uint64_t *data, MemTxAttrs attrs)
1151 bool result = true;
1152 int index;
1154 switch (offset) {
1155 case GITS_CTLR:
1156 *data = s->ctlr;
1157 break;
1158 case GITS_IIDR:
1159 *data = gicv3_iidr();
1160 break;
1161 case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1162 /* ID registers */
1163 *data = gicv3_idreg(offset - GITS_IDREGS);
1164 break;
1165 case GITS_TYPER:
1166 *data = extract64(s->typer, 0, 32);
1167 break;
1168 case GITS_TYPER + 4:
1169 *data = extract64(s->typer, 32, 32);
1170 break;
1171 case GITS_CBASER:
1172 *data = extract64(s->cbaser, 0, 32);
1173 break;
1174 case GITS_CBASER + 4:
1175 *data = extract64(s->cbaser, 32, 32);
1176 break;
1177 case GITS_CREADR:
1178 *data = extract64(s->creadr, 0, 32);
1179 break;
1180 case GITS_CREADR + 4:
1181 *data = extract64(s->creadr, 32, 32);
1182 break;
1183 case GITS_CWRITER:
1184 *data = extract64(s->cwriter, 0, 32);
1185 break;
1186 case GITS_CWRITER + 4:
1187 *data = extract64(s->cwriter, 32, 32);
1188 break;
1189 case GITS_BASER ... GITS_BASER + 0x3f:
1190 index = (offset - GITS_BASER) / 8;
1191 if (offset & 7) {
1192 *data = extract64(s->baser[index], 32, 32);
1193 } else {
1194 *data = extract64(s->baser[index], 0, 32);
1196 break;
1197 default:
1198 result = false;
1199 break;
1201 return result;
1204 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1205 uint64_t value, MemTxAttrs attrs)
1207 bool result = true;
1208 int index;
1210 switch (offset) {
1211 case GITS_BASER ... GITS_BASER + 0x3f:
1213 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1214 * already enabled
1216 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1217 index = (offset - GITS_BASER) / 8;
1218 if (s->baser[index] == 0) {
1219 /* Unimplemented GITS_BASERn: RAZ/WI */
1220 break;
1222 s->baser[index] &= GITS_BASER_RO_MASK;
1223 s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1225 break;
1226 case GITS_CBASER:
1228 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1229 * already enabled
1231 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1232 s->cbaser = value;
1233 s->creadr = 0;
1235 break;
1236 case GITS_CWRITER:
1237 s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1238 if (s->cwriter != s->creadr) {
1239 process_cmdq(s);
1241 break;
1242 case GITS_CREADR:
1243 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1244 s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1245 } else {
1246 /* RO register, ignore the write */
1247 qemu_log_mask(LOG_GUEST_ERROR,
1248 "%s: invalid guest write to RO register at offset "
1249 TARGET_FMT_plx "\n", __func__, offset);
1251 break;
1252 case GITS_TYPER:
1253 /* RO registers, ignore the write */
1254 qemu_log_mask(LOG_GUEST_ERROR,
1255 "%s: invalid guest write to RO register at offset "
1256 TARGET_FMT_plx "\n", __func__, offset);
1257 break;
1258 default:
1259 result = false;
1260 break;
1262 return result;
1265 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1266 uint64_t *data, MemTxAttrs attrs)
1268 bool result = true;
1269 int index;
1271 switch (offset) {
1272 case GITS_TYPER:
1273 *data = s->typer;
1274 break;
1275 case GITS_BASER ... GITS_BASER + 0x3f:
1276 index = (offset - GITS_BASER) / 8;
1277 *data = s->baser[index];
1278 break;
1279 case GITS_CBASER:
1280 *data = s->cbaser;
1281 break;
1282 case GITS_CREADR:
1283 *data = s->creadr;
1284 break;
1285 case GITS_CWRITER:
1286 *data = s->cwriter;
1287 break;
1288 default:
1289 result = false;
1290 break;
1292 return result;
1295 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1296 unsigned size, MemTxAttrs attrs)
1298 GICv3ITSState *s = (GICv3ITSState *)opaque;
1299 bool result;
1301 switch (size) {
1302 case 4:
1303 result = its_readl(s, offset, data, attrs);
1304 break;
1305 case 8:
1306 result = its_readll(s, offset, data, attrs);
1307 break;
1308 default:
1309 result = false;
1310 break;
1313 if (!result) {
1314 qemu_log_mask(LOG_GUEST_ERROR,
1315 "%s: invalid guest read at offset " TARGET_FMT_plx
1316 " size %u\n", __func__, offset, size);
1317 trace_gicv3_its_badread(offset, size);
1319 * The spec requires that reserved registers are RAZ/WI;
1320 * so use false returns from leaf functions as a way to
1321 * trigger the guest-error logging but don't return it to
1322 * the caller, or we'll cause a spurious guest data abort.
1324 *data = 0;
1325 } else {
1326 trace_gicv3_its_read(offset, *data, size);
1328 return MEMTX_OK;
1331 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1332 unsigned size, MemTxAttrs attrs)
1334 GICv3ITSState *s = (GICv3ITSState *)opaque;
1335 bool result;
1337 switch (size) {
1338 case 4:
1339 result = its_writel(s, offset, data, attrs);
1340 break;
1341 case 8:
1342 result = its_writell(s, offset, data, attrs);
1343 break;
1344 default:
1345 result = false;
1346 break;
1349 if (!result) {
1350 qemu_log_mask(LOG_GUEST_ERROR,
1351 "%s: invalid guest write at offset " TARGET_FMT_plx
1352 " size %u\n", __func__, offset, size);
1353 trace_gicv3_its_badwrite(offset, data, size);
1355 * The spec requires that reserved registers are RAZ/WI;
1356 * so use false returns from leaf functions as a way to
1357 * trigger the guest-error logging but don't return it to
1358 * the caller, or we'll cause a spurious guest data abort.
1360 } else {
1361 trace_gicv3_its_write(offset, data, size);
1363 return MEMTX_OK;
1366 static const MemoryRegionOps gicv3_its_control_ops = {
1367 .read_with_attrs = gicv3_its_read,
1368 .write_with_attrs = gicv3_its_write,
1369 .valid.min_access_size = 4,
1370 .valid.max_access_size = 8,
1371 .impl.min_access_size = 4,
1372 .impl.max_access_size = 8,
1373 .endianness = DEVICE_NATIVE_ENDIAN,
1376 static const MemoryRegionOps gicv3_its_translation_ops = {
1377 .read_with_attrs = gicv3_its_translation_read,
1378 .write_with_attrs = gicv3_its_translation_write,
1379 .valid.min_access_size = 2,
1380 .valid.max_access_size = 4,
1381 .impl.min_access_size = 2,
1382 .impl.max_access_size = 4,
1383 .endianness = DEVICE_NATIVE_ENDIAN,
1386 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1388 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1389 int i;
1391 for (i = 0; i < s->gicv3->num_cpu; i++) {
1392 if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1393 error_setg(errp, "Physical LPI not supported by CPU %d", i);
1394 return;
1398 gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1400 /* set the ITS default features supported */
1401 s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1402 s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1403 ITS_ITT_ENTRY_SIZE - 1);
1404 s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1405 s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1406 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1407 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1410 static void gicv3_its_reset(DeviceState *dev)
1412 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1413 GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1415 c->parent_reset(dev);
1417 /* Quiescent bit reset to 1 */
1418 s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1421 * setting GITS_BASER0.Type = 0b001 (Device)
1422 * GITS_BASER1.Type = 0b100 (Collection Table)
1423 * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1424 * GITS_BASER<0,1>.Page_Size = 64KB
1425 * and default translation table entry size to 16 bytes
1427 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1428 GITS_BASER_TYPE_DEVICE);
1429 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1430 GITS_BASER_PAGESIZE_64K);
1431 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1432 GITS_DTE_SIZE - 1);
1434 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1435 GITS_BASER_TYPE_COLLECTION);
1436 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1437 GITS_BASER_PAGESIZE_64K);
1438 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1439 GITS_CTE_SIZE - 1);
1442 static void gicv3_its_post_load(GICv3ITSState *s)
1444 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1445 extract_table_params(s);
1446 extract_cmdq_params(s);
1450 static Property gicv3_its_props[] = {
1451 DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1452 GICv3State *),
1453 DEFINE_PROP_END_OF_LIST(),
1456 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1458 DeviceClass *dc = DEVICE_CLASS(klass);
1459 GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1460 GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1462 dc->realize = gicv3_arm_its_realize;
1463 device_class_set_props(dc, gicv3_its_props);
1464 device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1465 icc->post_load = gicv3_its_post_load;
1468 static const TypeInfo gicv3_its_info = {
1469 .name = TYPE_ARM_GICV3_ITS,
1470 .parent = TYPE_ARM_GICV3_ITS_COMMON,
1471 .instance_size = sizeof(GICv3ITSState),
1472 .class_init = gicv3_its_class_init,
1473 .class_size = sizeof(GICv3ITSClass),
1476 static void gicv3_its_register_types(void)
1478 type_register_static(&gicv3_its_info);
1481 type_init(gicv3_its_register_types)