block-backend: process I/O in the current AioContext
[qemu/kevin.git] / hw / arm / smmuv3.c
blob1e9be8e89afc093fb8cd59ab071d08a92d32e4b3
1 /*
2 * Copyright (C) 2014-2016 Broadcom Corporation
3 * Copyright (c) 2017 Red Hat, Inc.
4 * Written by Prem Mallappa, Eric Auger
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include "hw/irq.h"
22 #include "hw/sysbus.h"
23 #include "migration/vmstate.h"
24 #include "hw/qdev-properties.h"
25 #include "hw/qdev-core.h"
26 #include "hw/pci/pci.h"
27 #include "cpu.h"
28 #include "trace.h"
29 #include "qemu/log.h"
30 #include "qemu/error-report.h"
31 #include "qapi/error.h"
33 #include "hw/arm/smmuv3.h"
34 #include "smmuv3-internal.h"
35 #include "smmu-internal.h"
37 #define PTW_RECORD_FAULT(cfg) (((cfg)->stage == 1) ? (cfg)->record_faults : \
38 (cfg)->s2cfg.record_faults)
40 /**
41 * smmuv3_trigger_irq - pulse @irq if enabled and update
42 * GERROR register in case of GERROR interrupt
44 * @irq: irq type
45 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
47 static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
48 uint32_t gerror_mask)
51 bool pulse = false;
53 switch (irq) {
54 case SMMU_IRQ_EVTQ:
55 pulse = smmuv3_eventq_irq_enabled(s);
56 break;
57 case SMMU_IRQ_PRIQ:
58 qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
59 break;
60 case SMMU_IRQ_CMD_SYNC:
61 pulse = true;
62 break;
63 case SMMU_IRQ_GERROR:
65 uint32_t pending = s->gerror ^ s->gerrorn;
66 uint32_t new_gerrors = ~pending & gerror_mask;
68 if (!new_gerrors) {
69 /* only toggle non pending errors */
70 return;
72 s->gerror ^= new_gerrors;
73 trace_smmuv3_write_gerror(new_gerrors, s->gerror);
75 pulse = smmuv3_gerror_irq_enabled(s);
76 break;
79 if (pulse) {
80 trace_smmuv3_trigger_irq(irq);
81 qemu_irq_pulse(s->irq[irq]);
85 static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
87 uint32_t pending = s->gerror ^ s->gerrorn;
88 uint32_t toggled = s->gerrorn ^ new_gerrorn;
90 if (toggled & ~pending) {
91 qemu_log_mask(LOG_GUEST_ERROR,
92 "guest toggles non pending errors = 0x%x\n",
93 toggled & ~pending);
97 * We do not raise any error in case guest toggles bits corresponding
98 * to not active IRQs (CONSTRAINED UNPREDICTABLE)
100 s->gerrorn = new_gerrorn;
102 trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
105 static inline MemTxResult queue_read(SMMUQueue *q, Cmd *cmd)
107 dma_addr_t addr = Q_CONS_ENTRY(q);
108 MemTxResult ret;
109 int i;
111 ret = dma_memory_read(&address_space_memory, addr, cmd, sizeof(Cmd),
112 MEMTXATTRS_UNSPECIFIED);
113 if (ret != MEMTX_OK) {
114 return ret;
116 for (i = 0; i < ARRAY_SIZE(cmd->word); i++) {
117 le32_to_cpus(&cmd->word[i]);
119 return ret;
122 static MemTxResult queue_write(SMMUQueue *q, Evt *evt_in)
124 dma_addr_t addr = Q_PROD_ENTRY(q);
125 MemTxResult ret;
126 Evt evt = *evt_in;
127 int i;
129 for (i = 0; i < ARRAY_SIZE(evt.word); i++) {
130 cpu_to_le32s(&evt.word[i]);
132 ret = dma_memory_write(&address_space_memory, addr, &evt, sizeof(Evt),
133 MEMTXATTRS_UNSPECIFIED);
134 if (ret != MEMTX_OK) {
135 return ret;
138 queue_prod_incr(q);
139 return MEMTX_OK;
142 static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
144 SMMUQueue *q = &s->eventq;
145 MemTxResult r;
147 if (!smmuv3_eventq_enabled(s)) {
148 return MEMTX_ERROR;
151 if (smmuv3_q_full(q)) {
152 return MEMTX_ERROR;
155 r = queue_write(q, evt);
156 if (r != MEMTX_OK) {
157 return r;
160 if (!smmuv3_q_empty(q)) {
161 smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
163 return MEMTX_OK;
166 void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
168 Evt evt = {};
169 MemTxResult r;
171 if (!smmuv3_eventq_enabled(s)) {
172 return;
175 EVT_SET_TYPE(&evt, info->type);
176 EVT_SET_SID(&evt, info->sid);
178 switch (info->type) {
179 case SMMU_EVT_NONE:
180 return;
181 case SMMU_EVT_F_UUT:
182 EVT_SET_SSID(&evt, info->u.f_uut.ssid);
183 EVT_SET_SSV(&evt, info->u.f_uut.ssv);
184 EVT_SET_ADDR(&evt, info->u.f_uut.addr);
185 EVT_SET_RNW(&evt, info->u.f_uut.rnw);
186 EVT_SET_PNU(&evt, info->u.f_uut.pnu);
187 EVT_SET_IND(&evt, info->u.f_uut.ind);
188 break;
189 case SMMU_EVT_C_BAD_STREAMID:
190 EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid);
191 EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv);
192 break;
193 case SMMU_EVT_F_STE_FETCH:
194 EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid);
195 EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv);
196 EVT_SET_ADDR2(&evt, info->u.f_ste_fetch.addr);
197 break;
198 case SMMU_EVT_C_BAD_STE:
199 EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid);
200 EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv);
201 break;
202 case SMMU_EVT_F_STREAM_DISABLED:
203 break;
204 case SMMU_EVT_F_TRANS_FORBIDDEN:
205 EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr);
206 EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw);
207 break;
208 case SMMU_EVT_C_BAD_SUBSTREAMID:
209 EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid);
210 break;
211 case SMMU_EVT_F_CD_FETCH:
212 EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid);
213 EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv);
214 EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr);
215 break;
216 case SMMU_EVT_C_BAD_CD:
217 EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid);
218 EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv);
219 break;
220 case SMMU_EVT_F_WALK_EABT:
221 case SMMU_EVT_F_TRANSLATION:
222 case SMMU_EVT_F_ADDR_SIZE:
223 case SMMU_EVT_F_ACCESS:
224 case SMMU_EVT_F_PERMISSION:
225 EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall);
226 EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag);
227 EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid);
228 EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv);
229 EVT_SET_S2(&evt, info->u.f_walk_eabt.s2);
230 EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr);
231 EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw);
232 EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu);
233 EVT_SET_IND(&evt, info->u.f_walk_eabt.ind);
234 EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class);
235 EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2);
236 break;
237 case SMMU_EVT_F_CFG_CONFLICT:
238 EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid);
239 EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv);
240 break;
241 /* rest is not implemented */
242 case SMMU_EVT_F_BAD_ATS_TREQ:
243 case SMMU_EVT_F_TLB_CONFLICT:
244 case SMMU_EVT_E_PAGE_REQ:
245 default:
246 g_assert_not_reached();
249 trace_smmuv3_record_event(smmu_event_string(info->type), info->sid);
250 r = smmuv3_write_eventq(s, &evt);
251 if (r != MEMTX_OK) {
252 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
254 info->recorded = true;
257 static void smmuv3_init_regs(SMMUv3State *s)
259 /* Based on sys property, the stages supported in smmu will be advertised.*/
260 if (s->stage && !strcmp("2", s->stage)) {
261 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S2P, 1);
262 } else {
263 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1);
266 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
267 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
268 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
269 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, VMID16, 1); /* 16-bit VMID */
270 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
271 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
272 /* terminated transaction will always be aborted/error returned */
273 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
274 /* 2-level stream table supported */
275 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
277 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
278 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
279 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS);
281 s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1);
282 s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1);
283 s->idr[3] = FIELD_DP32(s->idr[3], IDR3, BBML, 2);
285 /* 4K, 16K and 64K granule support */
286 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
287 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1);
288 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
289 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
291 s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
292 s->cmdq.prod = 0;
293 s->cmdq.cons = 0;
294 s->cmdq.entry_size = sizeof(struct Cmd);
295 s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
296 s->eventq.prod = 0;
297 s->eventq.cons = 0;
298 s->eventq.entry_size = sizeof(struct Evt);
300 s->features = 0;
301 s->sid_split = 0;
302 s->aidr = 0x1;
303 s->cr[0] = 0;
304 s->cr0ack = 0;
305 s->irq_ctrl = 0;
306 s->gerror = 0;
307 s->gerrorn = 0;
308 s->statusr = 0;
309 s->gbpa = SMMU_GBPA_RESET_VAL;
312 static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
313 SMMUEventInfo *event)
315 int ret, i;
317 trace_smmuv3_get_ste(addr);
318 /* TODO: guarantee 64-bit single-copy atomicity */
319 ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf),
320 MEMTXATTRS_UNSPECIFIED);
321 if (ret != MEMTX_OK) {
322 qemu_log_mask(LOG_GUEST_ERROR,
323 "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
324 event->type = SMMU_EVT_F_STE_FETCH;
325 event->u.f_ste_fetch.addr = addr;
326 return -EINVAL;
328 for (i = 0; i < ARRAY_SIZE(buf->word); i++) {
329 le32_to_cpus(&buf->word[i]);
331 return 0;
335 /* @ssid > 0 not supported yet */
336 static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
337 CD *buf, SMMUEventInfo *event)
339 dma_addr_t addr = STE_CTXPTR(ste);
340 int ret, i;
342 trace_smmuv3_get_cd(addr);
343 /* TODO: guarantee 64-bit single-copy atomicity */
344 ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf),
345 MEMTXATTRS_UNSPECIFIED);
346 if (ret != MEMTX_OK) {
347 qemu_log_mask(LOG_GUEST_ERROR,
348 "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
349 event->type = SMMU_EVT_F_CD_FETCH;
350 event->u.f_ste_fetch.addr = addr;
351 return -EINVAL;
353 for (i = 0; i < ARRAY_SIZE(buf->word); i++) {
354 le32_to_cpus(&buf->word[i]);
356 return 0;
360 * Max valid value is 39 when SMMU_IDR3.STT == 0.
361 * In architectures after SMMUv3.0:
362 * - If STE.S2TG selects a 4KB or 16KB granule, the minimum valid value for this
363 * field is MAX(16, 64-IAS)
364 * - If STE.S2TG selects a 64KB granule, the minimum valid value for this field
365 * is (64-IAS).
366 * As we only support AA64, IAS = OAS.
368 static bool s2t0sz_valid(SMMUTransCfg *cfg)
370 if (cfg->s2cfg.tsz > 39) {
371 return false;
374 if (cfg->s2cfg.granule_sz == 16) {
375 return (cfg->s2cfg.tsz >= 64 - oas2bits(SMMU_IDR5_OAS));
378 return (cfg->s2cfg.tsz >= MAX(64 - oas2bits(SMMU_IDR5_OAS), 16));
382 * Return true if s2 page table config is valid.
383 * This checks with the configured start level, ias_bits and granularity we can
384 * have a valid page table as described in ARM ARM D8.2 Translation process.
385 * The idea here is to see for the highest possible number of IPA bits, how
386 * many concatenated tables we would need, if it is more than 16, then this is
387 * not possible.
389 static bool s2_pgtable_config_valid(uint8_t sl0, uint8_t t0sz, uint8_t gran)
391 int level = get_start_level(sl0, gran);
392 uint64_t ipa_bits = 64 - t0sz;
393 uint64_t max_ipa = (1ULL << ipa_bits) - 1;
394 int nr_concat = pgd_concat_idx(level, gran, max_ipa) + 1;
396 return nr_concat <= VMSA_MAX_S2_CONCAT;
399 static int decode_ste_s2_cfg(SMMUTransCfg *cfg, STE *ste)
401 cfg->stage = 2;
403 if (STE_S2AA64(ste) == 0x0) {
404 qemu_log_mask(LOG_UNIMP,
405 "SMMUv3 AArch32 tables not supported\n");
406 g_assert_not_reached();
409 switch (STE_S2TG(ste)) {
410 case 0x0: /* 4KB */
411 cfg->s2cfg.granule_sz = 12;
412 break;
413 case 0x1: /* 64KB */
414 cfg->s2cfg.granule_sz = 16;
415 break;
416 case 0x2: /* 16KB */
417 cfg->s2cfg.granule_sz = 14;
418 break;
419 default:
420 qemu_log_mask(LOG_GUEST_ERROR,
421 "SMMUv3 bad STE S2TG: %x\n", STE_S2TG(ste));
422 goto bad_ste;
425 cfg->s2cfg.vttb = STE_S2TTB(ste);
427 cfg->s2cfg.sl0 = STE_S2SL0(ste);
428 /* FEAT_TTST not supported. */
429 if (cfg->s2cfg.sl0 == 0x3) {
430 qemu_log_mask(LOG_UNIMP, "SMMUv3 S2SL0 = 0x3 has no meaning!\n");
431 goto bad_ste;
434 /* For AA64, The effective S2PS size is capped to the OAS. */
435 cfg->s2cfg.eff_ps = oas2bits(MIN(STE_S2PS(ste), SMMU_IDR5_OAS));
437 * It is ILLEGAL for the address in S2TTB to be outside the range
438 * described by the effective S2PS value.
440 if (cfg->s2cfg.vttb & ~(MAKE_64BIT_MASK(0, cfg->s2cfg.eff_ps))) {
441 qemu_log_mask(LOG_GUEST_ERROR,
442 "SMMUv3 S2TTB too large 0x%" PRIx64
443 ", effective PS %d bits\n",
444 cfg->s2cfg.vttb, cfg->s2cfg.eff_ps);
445 goto bad_ste;
448 cfg->s2cfg.tsz = STE_S2T0SZ(ste);
450 if (!s2t0sz_valid(cfg)) {
451 qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 bad STE S2T0SZ = %d\n",
452 cfg->s2cfg.tsz);
453 goto bad_ste;
456 if (!s2_pgtable_config_valid(cfg->s2cfg.sl0, cfg->s2cfg.tsz,
457 cfg->s2cfg.granule_sz)) {
458 qemu_log_mask(LOG_GUEST_ERROR,
459 "SMMUv3 STE stage 2 config not valid!\n");
460 goto bad_ste;
463 /* Only LE supported(IDR0.TTENDIAN). */
464 if (STE_S2ENDI(ste)) {
465 qemu_log_mask(LOG_GUEST_ERROR,
466 "SMMUv3 STE_S2ENDI only supports LE!\n");
467 goto bad_ste;
470 cfg->s2cfg.affd = STE_S2AFFD(ste);
472 cfg->s2cfg.record_faults = STE_S2R(ste);
473 /* As stall is not supported. */
474 if (STE_S2S(ste)) {
475 qemu_log_mask(LOG_UNIMP, "SMMUv3 Stall not implemented!\n");
476 goto bad_ste;
479 return 0;
481 bad_ste:
482 return -EINVAL;
485 /* Returns < 0 in case of invalid STE, 0 otherwise */
486 static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
487 STE *ste, SMMUEventInfo *event)
489 uint32_t config;
490 int ret;
492 if (!STE_VALID(ste)) {
493 if (!event->inval_ste_allowed) {
494 qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n");
496 goto bad_ste;
499 config = STE_CONFIG(ste);
501 if (STE_CFG_ABORT(config)) {
502 cfg->aborted = true;
503 return 0;
506 if (STE_CFG_BYPASS(config)) {
507 cfg->bypassed = true;
508 return 0;
512 * If a stage is enabled in SW while not advertised, throw bad ste
513 * according to user manual(IHI0070E) "5.2 Stream Table Entry".
515 if (!STAGE1_SUPPORTED(s) && STE_CFG_S1_ENABLED(config)) {
516 qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S1 used but not supported.\n");
517 goto bad_ste;
519 if (!STAGE2_SUPPORTED(s) && STE_CFG_S2_ENABLED(config)) {
520 qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S2 used but not supported.\n");
521 goto bad_ste;
524 if (STAGE2_SUPPORTED(s)) {
525 /* VMID is considered even if s2 is disabled. */
526 cfg->s2cfg.vmid = STE_S2VMID(ste);
527 } else {
528 /* Default to -1 */
529 cfg->s2cfg.vmid = -1;
532 if (STE_CFG_S2_ENABLED(config)) {
534 * Stage-1 OAS defaults to OAS even if not enabled as it would be used
535 * in input address check for stage-2.
537 cfg->oas = oas2bits(SMMU_IDR5_OAS);
538 ret = decode_ste_s2_cfg(cfg, ste);
539 if (ret) {
540 goto bad_ste;
544 if (STE_S1CDMAX(ste) != 0) {
545 qemu_log_mask(LOG_UNIMP,
546 "SMMUv3 does not support multiple context descriptors yet\n");
547 goto bad_ste;
550 if (STE_S1STALLD(ste)) {
551 qemu_log_mask(LOG_UNIMP,
552 "SMMUv3 S1 stalling fault model not allowed yet\n");
553 goto bad_ste;
555 return 0;
557 bad_ste:
558 event->type = SMMU_EVT_C_BAD_STE;
559 return -EINVAL;
563 * smmu_find_ste - Return the stream table entry associated
564 * to the sid
566 * @s: smmuv3 handle
567 * @sid: stream ID
568 * @ste: returned stream table entry
569 * @event: handle to an event info
571 * Supports linear and 2-level stream table
572 * Return 0 on success, -EINVAL otherwise
574 static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
575 SMMUEventInfo *event)
577 dma_addr_t addr, strtab_base;
578 uint32_t log2size;
579 int strtab_size_shift;
580 int ret;
582 trace_smmuv3_find_ste(sid, s->features, s->sid_split);
583 log2size = FIELD_EX32(s->strtab_base_cfg, STRTAB_BASE_CFG, LOG2SIZE);
585 * Check SID range against both guest-configured and implementation limits
587 if (sid >= (1 << MIN(log2size, SMMU_IDR1_SIDSIZE))) {
588 event->type = SMMU_EVT_C_BAD_STREAMID;
589 return -EINVAL;
591 if (s->features & SMMU_FEATURE_2LVL_STE) {
592 int l1_ste_offset, l2_ste_offset, max_l2_ste, span, i;
593 dma_addr_t l1ptr, l2ptr;
594 STEDesc l1std;
597 * Align strtab base address to table size. For this purpose, assume it
598 * is not bounded by SMMU_IDR1_SIDSIZE.
600 strtab_size_shift = MAX(5, (int)log2size - s->sid_split - 1 + 3);
601 strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
602 ~MAKE_64BIT_MASK(0, strtab_size_shift);
603 l1_ste_offset = sid >> s->sid_split;
604 l2_ste_offset = sid & ((1 << s->sid_split) - 1);
605 l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
606 /* TODO: guarantee 64-bit single-copy atomicity */
607 ret = dma_memory_read(&address_space_memory, l1ptr, &l1std,
608 sizeof(l1std), MEMTXATTRS_UNSPECIFIED);
609 if (ret != MEMTX_OK) {
610 qemu_log_mask(LOG_GUEST_ERROR,
611 "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
612 event->type = SMMU_EVT_F_STE_FETCH;
613 event->u.f_ste_fetch.addr = l1ptr;
614 return -EINVAL;
616 for (i = 0; i < ARRAY_SIZE(l1std.word); i++) {
617 le32_to_cpus(&l1std.word[i]);
620 span = L1STD_SPAN(&l1std);
622 if (!span) {
623 /* l2ptr is not valid */
624 if (!event->inval_ste_allowed) {
625 qemu_log_mask(LOG_GUEST_ERROR,
626 "invalid sid=%d (L1STD span=0)\n", sid);
628 event->type = SMMU_EVT_C_BAD_STREAMID;
629 return -EINVAL;
631 max_l2_ste = (1 << span) - 1;
632 l2ptr = l1std_l2ptr(&l1std);
633 trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset,
634 l2ptr, l2_ste_offset, max_l2_ste);
635 if (l2_ste_offset > max_l2_ste) {
636 qemu_log_mask(LOG_GUEST_ERROR,
637 "l2_ste_offset=%d > max_l2_ste=%d\n",
638 l2_ste_offset, max_l2_ste);
639 event->type = SMMU_EVT_C_BAD_STE;
640 return -EINVAL;
642 addr = l2ptr + l2_ste_offset * sizeof(*ste);
643 } else {
644 strtab_size_shift = log2size + 5;
645 strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
646 ~MAKE_64BIT_MASK(0, strtab_size_shift);
647 addr = strtab_base + sid * sizeof(*ste);
650 if (smmu_get_ste(s, addr, ste, event)) {
651 return -EINVAL;
654 return 0;
657 static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
659 int ret = -EINVAL;
660 int i;
662 if (!CD_VALID(cd) || !CD_AARCH64(cd)) {
663 goto bad_cd;
665 if (!CD_A(cd)) {
666 goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */
668 if (CD_S(cd)) {
669 goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
671 if (CD_HA(cd) || CD_HD(cd)) {
672 goto bad_cd; /* HTTU = 0 */
675 /* we support only those at the moment */
676 cfg->aa64 = true;
677 cfg->stage = 1;
679 cfg->oas = oas2bits(CD_IPS(cd));
680 cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas);
681 cfg->tbi = CD_TBI(cd);
682 cfg->asid = CD_ASID(cd);
684 trace_smmuv3_decode_cd(cfg->oas);
686 /* decode data dependent on TT */
687 for (i = 0; i <= 1; i++) {
688 int tg, tsz;
689 SMMUTransTableInfo *tt = &cfg->tt[i];
691 cfg->tt[i].disabled = CD_EPD(cd, i);
692 if (cfg->tt[i].disabled) {
693 continue;
696 tsz = CD_TSZ(cd, i);
697 if (tsz < 16 || tsz > 39) {
698 goto bad_cd;
701 tg = CD_TG(cd, i);
702 tt->granule_sz = tg2granule(tg, i);
703 if ((tt->granule_sz != 12 && tt->granule_sz != 14 &&
704 tt->granule_sz != 16) || CD_ENDI(cd)) {
705 goto bad_cd;
708 tt->tsz = tsz;
709 tt->ttb = CD_TTB(cd, i);
710 if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
711 goto bad_cd;
713 tt->had = CD_HAD(cd, i);
714 trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz, tt->had);
717 cfg->record_faults = CD_R(cd);
719 return 0;
721 bad_cd:
722 event->type = SMMU_EVT_C_BAD_CD;
723 return ret;
727 * smmuv3_decode_config - Prepare the translation configuration
728 * for the @mr iommu region
729 * @mr: iommu memory region the translation config must be prepared for
730 * @cfg: output translation configuration which is populated through
731 * the different configuration decoding steps
732 * @event: must be zero'ed by the caller
734 * return < 0 in case of config decoding error (@event is filled
735 * accordingly). Return 0 otherwise.
737 static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
738 SMMUEventInfo *event)
740 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
741 uint32_t sid = smmu_get_sid(sdev);
742 SMMUv3State *s = sdev->smmu;
743 int ret;
744 STE ste;
745 CD cd;
747 /* ASID defaults to -1 (if s1 is not supported). */
748 cfg->asid = -1;
750 ret = smmu_find_ste(s, sid, &ste, event);
751 if (ret) {
752 return ret;
755 ret = decode_ste(s, cfg, &ste, event);
756 if (ret) {
757 return ret;
760 if (cfg->aborted || cfg->bypassed || (cfg->stage == 2)) {
761 return 0;
764 ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event);
765 if (ret) {
766 return ret;
769 return decode_cd(cfg, &cd, event);
773 * smmuv3_get_config - Look up for a cached copy of configuration data for
774 * @sdev and on cache miss performs a configuration structure decoding from
775 * guest RAM.
777 * @sdev: SMMUDevice handle
778 * @event: output event info
780 * The configuration cache contains data resulting from both STE and CD
781 * decoding under the form of an SMMUTransCfg struct. The hash table is indexed
782 * by the SMMUDevice handle.
784 static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event)
786 SMMUv3State *s = sdev->smmu;
787 SMMUState *bc = &s->smmu_state;
788 SMMUTransCfg *cfg;
790 cfg = g_hash_table_lookup(bc->configs, sdev);
791 if (cfg) {
792 sdev->cfg_cache_hits++;
793 trace_smmuv3_config_cache_hit(smmu_get_sid(sdev),
794 sdev->cfg_cache_hits, sdev->cfg_cache_misses,
795 100 * sdev->cfg_cache_hits /
796 (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
797 } else {
798 sdev->cfg_cache_misses++;
799 trace_smmuv3_config_cache_miss(smmu_get_sid(sdev),
800 sdev->cfg_cache_hits, sdev->cfg_cache_misses,
801 100 * sdev->cfg_cache_hits /
802 (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
803 cfg = g_new0(SMMUTransCfg, 1);
805 if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) {
806 g_hash_table_insert(bc->configs, sdev, cfg);
807 } else {
808 g_free(cfg);
809 cfg = NULL;
812 return cfg;
815 static void smmuv3_flush_config(SMMUDevice *sdev)
817 SMMUv3State *s = sdev->smmu;
818 SMMUState *bc = &s->smmu_state;
820 trace_smmuv3_config_cache_inv(smmu_get_sid(sdev));
821 g_hash_table_remove(bc->configs, sdev);
824 static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
825 IOMMUAccessFlags flag, int iommu_idx)
827 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
828 SMMUv3State *s = sdev->smmu;
829 uint32_t sid = smmu_get_sid(sdev);
830 SMMUEventInfo event = {.type = SMMU_EVT_NONE,
831 .sid = sid,
832 .inval_ste_allowed = false};
833 SMMUPTWEventInfo ptw_info = {};
834 SMMUTranslationStatus status;
835 SMMUState *bs = ARM_SMMU(s);
836 uint64_t page_mask, aligned_addr;
837 SMMUTLBEntry *cached_entry = NULL;
838 SMMUTransTableInfo *tt;
839 SMMUTransCfg *cfg = NULL;
840 IOMMUTLBEntry entry = {
841 .target_as = &address_space_memory,
842 .iova = addr,
843 .translated_addr = addr,
844 .addr_mask = ~(hwaddr)0,
845 .perm = IOMMU_NONE,
848 * Combined attributes used for TLB lookup, as only one stage is supported,
849 * it will hold attributes based on the enabled stage.
851 SMMUTransTableInfo tt_combined;
853 qemu_mutex_lock(&s->mutex);
855 if (!smmu_enabled(s)) {
856 if (FIELD_EX32(s->gbpa, GBPA, ABORT)) {
857 status = SMMU_TRANS_ABORT;
858 } else {
859 status = SMMU_TRANS_DISABLE;
861 goto epilogue;
864 cfg = smmuv3_get_config(sdev, &event);
865 if (!cfg) {
866 status = SMMU_TRANS_ERROR;
867 goto epilogue;
870 if (cfg->aborted) {
871 status = SMMU_TRANS_ABORT;
872 goto epilogue;
875 if (cfg->bypassed) {
876 status = SMMU_TRANS_BYPASS;
877 goto epilogue;
880 if (cfg->stage == 1) {
881 /* Select stage1 translation table. */
882 tt = select_tt(cfg, addr);
883 if (!tt) {
884 if (cfg->record_faults) {
885 event.type = SMMU_EVT_F_TRANSLATION;
886 event.u.f_translation.addr = addr;
887 event.u.f_translation.rnw = flag & 0x1;
889 status = SMMU_TRANS_ERROR;
890 goto epilogue;
892 tt_combined.granule_sz = tt->granule_sz;
893 tt_combined.tsz = tt->tsz;
895 } else {
896 /* Stage2. */
897 tt_combined.granule_sz = cfg->s2cfg.granule_sz;
898 tt_combined.tsz = cfg->s2cfg.tsz;
901 * TLB lookup looks for granule and input size for a translation stage,
902 * as only one stage is supported right now, choose the right values
903 * from the configuration.
905 page_mask = (1ULL << tt_combined.granule_sz) - 1;
906 aligned_addr = addr & ~page_mask;
908 cached_entry = smmu_iotlb_lookup(bs, cfg, &tt_combined, aligned_addr);
909 if (cached_entry) {
910 if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) {
911 status = SMMU_TRANS_ERROR;
913 * We know that the TLB only contains either stage-1 or stage-2 as
914 * nesting is not supported. So it is sufficient to check the
915 * translation stage to know the TLB stage for now.
917 event.u.f_walk_eabt.s2 = (cfg->stage == 2);
918 if (PTW_RECORD_FAULT(cfg)) {
919 event.type = SMMU_EVT_F_PERMISSION;
920 event.u.f_permission.addr = addr;
921 event.u.f_permission.rnw = flag & 0x1;
923 } else {
924 status = SMMU_TRANS_SUCCESS;
926 goto epilogue;
929 cached_entry = g_new0(SMMUTLBEntry, 1);
931 if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) {
932 /* All faults from PTW has S2 field. */
933 event.u.f_walk_eabt.s2 = (ptw_info.stage == 2);
934 g_free(cached_entry);
935 switch (ptw_info.type) {
936 case SMMU_PTW_ERR_WALK_EABT:
937 event.type = SMMU_EVT_F_WALK_EABT;
938 event.u.f_walk_eabt.addr = addr;
939 event.u.f_walk_eabt.rnw = flag & 0x1;
940 event.u.f_walk_eabt.class = 0x1;
941 event.u.f_walk_eabt.addr2 = ptw_info.addr;
942 break;
943 case SMMU_PTW_ERR_TRANSLATION:
944 if (PTW_RECORD_FAULT(cfg)) {
945 event.type = SMMU_EVT_F_TRANSLATION;
946 event.u.f_translation.addr = addr;
947 event.u.f_translation.rnw = flag & 0x1;
949 break;
950 case SMMU_PTW_ERR_ADDR_SIZE:
951 if (PTW_RECORD_FAULT(cfg)) {
952 event.type = SMMU_EVT_F_ADDR_SIZE;
953 event.u.f_addr_size.addr = addr;
954 event.u.f_addr_size.rnw = flag & 0x1;
956 break;
957 case SMMU_PTW_ERR_ACCESS:
958 if (PTW_RECORD_FAULT(cfg)) {
959 event.type = SMMU_EVT_F_ACCESS;
960 event.u.f_access.addr = addr;
961 event.u.f_access.rnw = flag & 0x1;
963 break;
964 case SMMU_PTW_ERR_PERMISSION:
965 if (PTW_RECORD_FAULT(cfg)) {
966 event.type = SMMU_EVT_F_PERMISSION;
967 event.u.f_permission.addr = addr;
968 event.u.f_permission.rnw = flag & 0x1;
970 break;
971 default:
972 g_assert_not_reached();
974 status = SMMU_TRANS_ERROR;
975 } else {
976 smmu_iotlb_insert(bs, cfg, cached_entry);
977 status = SMMU_TRANS_SUCCESS;
980 epilogue:
981 qemu_mutex_unlock(&s->mutex);
982 switch (status) {
983 case SMMU_TRANS_SUCCESS:
984 entry.perm = cached_entry->entry.perm;
985 entry.translated_addr = cached_entry->entry.translated_addr +
986 (addr & cached_entry->entry.addr_mask);
987 entry.addr_mask = cached_entry->entry.addr_mask;
988 trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr,
989 entry.translated_addr, entry.perm);
990 break;
991 case SMMU_TRANS_DISABLE:
992 entry.perm = flag;
993 entry.addr_mask = ~TARGET_PAGE_MASK;
994 trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr,
995 entry.perm);
996 break;
997 case SMMU_TRANS_BYPASS:
998 entry.perm = flag;
999 entry.addr_mask = ~TARGET_PAGE_MASK;
1000 trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr,
1001 entry.perm);
1002 break;
1003 case SMMU_TRANS_ABORT:
1004 /* no event is recorded on abort */
1005 trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr,
1006 entry.perm);
1007 break;
1008 case SMMU_TRANS_ERROR:
1009 qemu_log_mask(LOG_GUEST_ERROR,
1010 "%s translation failed for iova=0x%"PRIx64" (%s)\n",
1011 mr->parent_obj.name, addr, smmu_event_string(event.type));
1012 smmuv3_record_event(s, &event);
1013 break;
1016 return entry;
1020 * smmuv3_notify_iova - call the notifier @n for a given
1021 * @asid and @iova tuple.
1023 * @mr: IOMMU mr region handle
1024 * @n: notifier to be called
1025 * @asid: address space ID or negative value if we don't care
1026 * @vmid: virtual machine ID or negative value if we don't care
1027 * @iova: iova
1028 * @tg: translation granule (if communicated through range invalidation)
1029 * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1
1031 static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
1032 IOMMUNotifier *n,
1033 int asid, int vmid,
1034 dma_addr_t iova, uint8_t tg,
1035 uint64_t num_pages)
1037 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
1038 IOMMUTLBEvent event;
1039 uint8_t granule;
1040 SMMUv3State *s = sdev->smmu;
1042 if (!tg) {
1043 SMMUEventInfo event = {.inval_ste_allowed = true};
1044 SMMUTransCfg *cfg = smmuv3_get_config(sdev, &event);
1045 SMMUTransTableInfo *tt;
1047 if (!cfg) {
1048 return;
1051 if (asid >= 0 && cfg->asid != asid) {
1052 return;
1055 if (vmid >= 0 && cfg->s2cfg.vmid != vmid) {
1056 return;
1059 if (STAGE1_SUPPORTED(s)) {
1060 tt = select_tt(cfg, iova);
1061 if (!tt) {
1062 return;
1064 granule = tt->granule_sz;
1065 } else {
1066 granule = cfg->s2cfg.granule_sz;
1069 } else {
1070 granule = tg * 2 + 10;
1073 event.type = IOMMU_NOTIFIER_UNMAP;
1074 event.entry.target_as = &address_space_memory;
1075 event.entry.iova = iova;
1076 event.entry.addr_mask = num_pages * (1 << granule) - 1;
1077 event.entry.perm = IOMMU_NONE;
1079 memory_region_notify_iommu_one(n, &event);
1082 /* invalidate an asid/vmid/iova range tuple in all mr's */
1083 static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, int vmid,
1084 dma_addr_t iova, uint8_t tg,
1085 uint64_t num_pages)
1087 SMMUDevice *sdev;
1089 QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
1090 IOMMUMemoryRegion *mr = &sdev->iommu;
1091 IOMMUNotifier *n;
1093 trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, vmid,
1094 iova, tg, num_pages);
1096 IOMMU_NOTIFIER_FOREACH(n, mr) {
1097 smmuv3_notify_iova(mr, n, asid, vmid, iova, tg, num_pages);
1102 static void smmuv3_range_inval(SMMUState *s, Cmd *cmd)
1104 dma_addr_t end, addr = CMD_ADDR(cmd);
1105 uint8_t type = CMD_TYPE(cmd);
1106 int vmid = -1;
1107 uint8_t scale = CMD_SCALE(cmd);
1108 uint8_t num = CMD_NUM(cmd);
1109 uint8_t ttl = CMD_TTL(cmd);
1110 bool leaf = CMD_LEAF(cmd);
1111 uint8_t tg = CMD_TG(cmd);
1112 uint64_t num_pages;
1113 uint8_t granule;
1114 int asid = -1;
1115 SMMUv3State *smmuv3 = ARM_SMMUV3(s);
1117 /* Only consider VMID if stage-2 is supported. */
1118 if (STAGE2_SUPPORTED(smmuv3)) {
1119 vmid = CMD_VMID(cmd);
1122 if (type == SMMU_CMD_TLBI_NH_VA) {
1123 asid = CMD_ASID(cmd);
1126 if (!tg) {
1127 trace_smmuv3_range_inval(vmid, asid, addr, tg, 1, ttl, leaf);
1128 smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, 1);
1129 smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, 1, ttl);
1130 return;
1133 /* RIL in use */
1135 num_pages = (num + 1) * BIT_ULL(scale);
1136 granule = tg * 2 + 10;
1138 /* Split invalidations into ^2 range invalidations */
1139 end = addr + (num_pages << granule) - 1;
1141 while (addr != end + 1) {
1142 uint64_t mask = dma_aligned_pow2_mask(addr, end, 64);
1144 num_pages = (mask + 1) >> granule;
1145 trace_smmuv3_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
1146 smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, num_pages);
1147 smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, num_pages, ttl);
1148 addr += mask + 1;
1152 static gboolean
1153 smmuv3_invalidate_ste(gpointer key, gpointer value, gpointer user_data)
1155 SMMUDevice *sdev = (SMMUDevice *)key;
1156 uint32_t sid = smmu_get_sid(sdev);
1157 SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data;
1159 if (sid < sid_range->start || sid > sid_range->end) {
1160 return false;
1162 trace_smmuv3_config_cache_inv(sid);
1163 return true;
1166 static int smmuv3_cmdq_consume(SMMUv3State *s)
1168 SMMUState *bs = ARM_SMMU(s);
1169 SMMUCmdError cmd_error = SMMU_CERROR_NONE;
1170 SMMUQueue *q = &s->cmdq;
1171 SMMUCommandType type = 0;
1173 if (!smmuv3_cmdq_enabled(s)) {
1174 return 0;
1177 * some commands depend on register values, typically CR0. In case those
1178 * register values change while handling the command, spec says it
1179 * is UNPREDICTABLE whether the command is interpreted under the new
1180 * or old value.
1183 while (!smmuv3_q_empty(q)) {
1184 uint32_t pending = s->gerror ^ s->gerrorn;
1185 Cmd cmd;
1187 trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
1188 Q_PROD_WRAP(q), Q_CONS_WRAP(q));
1190 if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
1191 break;
1194 if (queue_read(q, &cmd) != MEMTX_OK) {
1195 cmd_error = SMMU_CERROR_ABT;
1196 break;
1199 type = CMD_TYPE(&cmd);
1201 trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
1203 qemu_mutex_lock(&s->mutex);
1204 switch (type) {
1205 case SMMU_CMD_SYNC:
1206 if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
1207 smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
1209 break;
1210 case SMMU_CMD_PREFETCH_CONFIG:
1211 case SMMU_CMD_PREFETCH_ADDR:
1212 break;
1213 case SMMU_CMD_CFGI_STE:
1215 uint32_t sid = CMD_SID(&cmd);
1216 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
1217 SMMUDevice *sdev;
1219 if (CMD_SSEC(&cmd)) {
1220 cmd_error = SMMU_CERROR_ILL;
1221 break;
1224 if (!mr) {
1225 break;
1228 trace_smmuv3_cmdq_cfgi_ste(sid);
1229 sdev = container_of(mr, SMMUDevice, iommu);
1230 smmuv3_flush_config(sdev);
1232 break;
1234 case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
1236 uint32_t sid = CMD_SID(&cmd), mask;
1237 uint8_t range = CMD_STE_RANGE(&cmd);
1238 SMMUSIDRange sid_range;
1240 if (CMD_SSEC(&cmd)) {
1241 cmd_error = SMMU_CERROR_ILL;
1242 break;
1245 mask = (1ULL << (range + 1)) - 1;
1246 sid_range.start = sid & ~mask;
1247 sid_range.end = sid_range.start + mask;
1249 trace_smmuv3_cmdq_cfgi_ste_range(sid_range.start, sid_range.end);
1250 g_hash_table_foreach_remove(bs->configs, smmuv3_invalidate_ste,
1251 &sid_range);
1252 break;
1254 case SMMU_CMD_CFGI_CD:
1255 case SMMU_CMD_CFGI_CD_ALL:
1257 uint32_t sid = CMD_SID(&cmd);
1258 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
1259 SMMUDevice *sdev;
1261 if (CMD_SSEC(&cmd)) {
1262 cmd_error = SMMU_CERROR_ILL;
1263 break;
1266 if (!mr) {
1267 break;
1270 trace_smmuv3_cmdq_cfgi_cd(sid);
1271 sdev = container_of(mr, SMMUDevice, iommu);
1272 smmuv3_flush_config(sdev);
1273 break;
1275 case SMMU_CMD_TLBI_NH_ASID:
1277 uint16_t asid = CMD_ASID(&cmd);
1279 if (!STAGE1_SUPPORTED(s)) {
1280 cmd_error = SMMU_CERROR_ILL;
1281 break;
1284 trace_smmuv3_cmdq_tlbi_nh_asid(asid);
1285 smmu_inv_notifiers_all(&s->smmu_state);
1286 smmu_iotlb_inv_asid(bs, asid);
1287 break;
1289 case SMMU_CMD_TLBI_NH_ALL:
1290 if (!STAGE1_SUPPORTED(s)) {
1291 cmd_error = SMMU_CERROR_ILL;
1292 break;
1294 QEMU_FALLTHROUGH;
1295 case SMMU_CMD_TLBI_NSNH_ALL:
1296 trace_smmuv3_cmdq_tlbi_nh();
1297 smmu_inv_notifiers_all(&s->smmu_state);
1298 smmu_iotlb_inv_all(bs);
1299 break;
1300 case SMMU_CMD_TLBI_NH_VAA:
1301 case SMMU_CMD_TLBI_NH_VA:
1302 if (!STAGE1_SUPPORTED(s)) {
1303 cmd_error = SMMU_CERROR_ILL;
1304 break;
1306 smmuv3_range_inval(bs, &cmd);
1307 break;
1308 case SMMU_CMD_TLBI_S12_VMALL:
1310 uint16_t vmid = CMD_VMID(&cmd);
1312 if (!STAGE2_SUPPORTED(s)) {
1313 cmd_error = SMMU_CERROR_ILL;
1314 break;
1317 trace_smmuv3_cmdq_tlbi_s12_vmid(vmid);
1318 smmu_inv_notifiers_all(&s->smmu_state);
1319 smmu_iotlb_inv_vmid(bs, vmid);
1320 break;
1322 case SMMU_CMD_TLBI_S2_IPA:
1323 if (!STAGE2_SUPPORTED(s)) {
1324 cmd_error = SMMU_CERROR_ILL;
1325 break;
1328 * As currently only either s1 or s2 are supported
1329 * we can reuse same function for s2.
1331 smmuv3_range_inval(bs, &cmd);
1332 break;
1333 case SMMU_CMD_TLBI_EL3_ALL:
1334 case SMMU_CMD_TLBI_EL3_VA:
1335 case SMMU_CMD_TLBI_EL2_ALL:
1336 case SMMU_CMD_TLBI_EL2_ASID:
1337 case SMMU_CMD_TLBI_EL2_VA:
1338 case SMMU_CMD_TLBI_EL2_VAA:
1339 case SMMU_CMD_ATC_INV:
1340 case SMMU_CMD_PRI_RESP:
1341 case SMMU_CMD_RESUME:
1342 case SMMU_CMD_STALL_TERM:
1343 trace_smmuv3_unhandled_cmd(type);
1344 break;
1345 default:
1346 cmd_error = SMMU_CERROR_ILL;
1347 break;
1349 qemu_mutex_unlock(&s->mutex);
1350 if (cmd_error) {
1351 if (cmd_error == SMMU_CERROR_ILL) {
1352 qemu_log_mask(LOG_GUEST_ERROR,
1353 "Illegal command type: %d\n", CMD_TYPE(&cmd));
1355 break;
1358 * We only increment the cons index after the completion of
1359 * the command. We do that because the SYNC returns immediately
1360 * and does not check the completion of previous commands
1362 queue_cons_incr(q);
1365 if (cmd_error) {
1366 trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
1367 smmu_write_cmdq_err(s, cmd_error);
1368 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
1371 trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
1372 Q_PROD_WRAP(q), Q_CONS_WRAP(q));
1374 return 0;
1377 static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
1378 uint64_t data, MemTxAttrs attrs)
1380 switch (offset) {
1381 case A_GERROR_IRQ_CFG0:
1382 s->gerror_irq_cfg0 = data;
1383 return MEMTX_OK;
1384 case A_STRTAB_BASE:
1385 s->strtab_base = data;
1386 return MEMTX_OK;
1387 case A_CMDQ_BASE:
1388 s->cmdq.base = data;
1389 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1390 if (s->cmdq.log2size > SMMU_CMDQS) {
1391 s->cmdq.log2size = SMMU_CMDQS;
1393 return MEMTX_OK;
1394 case A_EVENTQ_BASE:
1395 s->eventq.base = data;
1396 s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1397 if (s->eventq.log2size > SMMU_EVENTQS) {
1398 s->eventq.log2size = SMMU_EVENTQS;
1400 return MEMTX_OK;
1401 case A_EVENTQ_IRQ_CFG0:
1402 s->eventq_irq_cfg0 = data;
1403 return MEMTX_OK;
1404 default:
1405 qemu_log_mask(LOG_UNIMP,
1406 "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
1407 __func__, offset);
1408 return MEMTX_OK;
1412 static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
1413 uint64_t data, MemTxAttrs attrs)
1415 switch (offset) {
1416 case A_CR0:
1417 s->cr[0] = data;
1418 s->cr0ack = data & ~SMMU_CR0_RESERVED;
1419 /* in case the command queue has been enabled */
1420 smmuv3_cmdq_consume(s);
1421 return MEMTX_OK;
1422 case A_CR1:
1423 s->cr[1] = data;
1424 return MEMTX_OK;
1425 case A_CR2:
1426 s->cr[2] = data;
1427 return MEMTX_OK;
1428 case A_IRQ_CTRL:
1429 s->irq_ctrl = data;
1430 return MEMTX_OK;
1431 case A_GERRORN:
1432 smmuv3_write_gerrorn(s, data);
1434 * By acknowledging the CMDQ_ERR, SW may notify cmds can
1435 * be processed again
1437 smmuv3_cmdq_consume(s);
1438 return MEMTX_OK;
1439 case A_GERROR_IRQ_CFG0: /* 64b */
1440 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
1441 return MEMTX_OK;
1442 case A_GERROR_IRQ_CFG0 + 4:
1443 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
1444 return MEMTX_OK;
1445 case A_GERROR_IRQ_CFG1:
1446 s->gerror_irq_cfg1 = data;
1447 return MEMTX_OK;
1448 case A_GERROR_IRQ_CFG2:
1449 s->gerror_irq_cfg2 = data;
1450 return MEMTX_OK;
1451 case A_GBPA:
1453 * If UPDATE is not set, the write is ignored. This is the only
1454 * permitted behavior in SMMUv3.2 and later.
1456 if (data & R_GBPA_UPDATE_MASK) {
1457 /* Ignore update bit as write is synchronous. */
1458 s->gbpa = data & ~R_GBPA_UPDATE_MASK;
1460 return MEMTX_OK;
1461 case A_STRTAB_BASE: /* 64b */
1462 s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
1463 return MEMTX_OK;
1464 case A_STRTAB_BASE + 4:
1465 s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
1466 return MEMTX_OK;
1467 case A_STRTAB_BASE_CFG:
1468 s->strtab_base_cfg = data;
1469 if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
1470 s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
1471 s->features |= SMMU_FEATURE_2LVL_STE;
1473 return MEMTX_OK;
1474 case A_CMDQ_BASE: /* 64b */
1475 s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
1476 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1477 if (s->cmdq.log2size > SMMU_CMDQS) {
1478 s->cmdq.log2size = SMMU_CMDQS;
1480 return MEMTX_OK;
1481 case A_CMDQ_BASE + 4: /* 64b */
1482 s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
1483 return MEMTX_OK;
1484 case A_CMDQ_PROD:
1485 s->cmdq.prod = data;
1486 smmuv3_cmdq_consume(s);
1487 return MEMTX_OK;
1488 case A_CMDQ_CONS:
1489 s->cmdq.cons = data;
1490 return MEMTX_OK;
1491 case A_EVENTQ_BASE: /* 64b */
1492 s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
1493 s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1494 if (s->eventq.log2size > SMMU_EVENTQS) {
1495 s->eventq.log2size = SMMU_EVENTQS;
1497 return MEMTX_OK;
1498 case A_EVENTQ_BASE + 4:
1499 s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
1500 return MEMTX_OK;
1501 case A_EVENTQ_PROD:
1502 s->eventq.prod = data;
1503 return MEMTX_OK;
1504 case A_EVENTQ_CONS:
1505 s->eventq.cons = data;
1506 return MEMTX_OK;
1507 case A_EVENTQ_IRQ_CFG0: /* 64b */
1508 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
1509 return MEMTX_OK;
1510 case A_EVENTQ_IRQ_CFG0 + 4:
1511 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
1512 return MEMTX_OK;
1513 case A_EVENTQ_IRQ_CFG1:
1514 s->eventq_irq_cfg1 = data;
1515 return MEMTX_OK;
1516 case A_EVENTQ_IRQ_CFG2:
1517 s->eventq_irq_cfg2 = data;
1518 return MEMTX_OK;
1519 default:
1520 qemu_log_mask(LOG_UNIMP,
1521 "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
1522 __func__, offset);
1523 return MEMTX_OK;
1527 static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
1528 unsigned size, MemTxAttrs attrs)
1530 SMMUState *sys = opaque;
1531 SMMUv3State *s = ARM_SMMUV3(sys);
1532 MemTxResult r;
1534 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1535 offset &= ~0x10000;
1537 switch (size) {
1538 case 8:
1539 r = smmu_writell(s, offset, data, attrs);
1540 break;
1541 case 4:
1542 r = smmu_writel(s, offset, data, attrs);
1543 break;
1544 default:
1545 r = MEMTX_ERROR;
1546 break;
1549 trace_smmuv3_write_mmio(offset, data, size, r);
1550 return r;
1553 static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
1554 uint64_t *data, MemTxAttrs attrs)
1556 switch (offset) {
1557 case A_GERROR_IRQ_CFG0:
1558 *data = s->gerror_irq_cfg0;
1559 return MEMTX_OK;
1560 case A_STRTAB_BASE:
1561 *data = s->strtab_base;
1562 return MEMTX_OK;
1563 case A_CMDQ_BASE:
1564 *data = s->cmdq.base;
1565 return MEMTX_OK;
1566 case A_EVENTQ_BASE:
1567 *data = s->eventq.base;
1568 return MEMTX_OK;
1569 default:
1570 *data = 0;
1571 qemu_log_mask(LOG_UNIMP,
1572 "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
1573 __func__, offset);
1574 return MEMTX_OK;
1578 static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
1579 uint64_t *data, MemTxAttrs attrs)
1581 switch (offset) {
1582 case A_IDREGS ... A_IDREGS + 0x2f:
1583 *data = smmuv3_idreg(offset - A_IDREGS);
1584 return MEMTX_OK;
1585 case A_IDR0 ... A_IDR5:
1586 *data = s->idr[(offset - A_IDR0) / 4];
1587 return MEMTX_OK;
1588 case A_IIDR:
1589 *data = s->iidr;
1590 return MEMTX_OK;
1591 case A_AIDR:
1592 *data = s->aidr;
1593 return MEMTX_OK;
1594 case A_CR0:
1595 *data = s->cr[0];
1596 return MEMTX_OK;
1597 case A_CR0ACK:
1598 *data = s->cr0ack;
1599 return MEMTX_OK;
1600 case A_CR1:
1601 *data = s->cr[1];
1602 return MEMTX_OK;
1603 case A_CR2:
1604 *data = s->cr[2];
1605 return MEMTX_OK;
1606 case A_STATUSR:
1607 *data = s->statusr;
1608 return MEMTX_OK;
1609 case A_GBPA:
1610 *data = s->gbpa;
1611 return MEMTX_OK;
1612 case A_IRQ_CTRL:
1613 case A_IRQ_CTRL_ACK:
1614 *data = s->irq_ctrl;
1615 return MEMTX_OK;
1616 case A_GERROR:
1617 *data = s->gerror;
1618 return MEMTX_OK;
1619 case A_GERRORN:
1620 *data = s->gerrorn;
1621 return MEMTX_OK;
1622 case A_GERROR_IRQ_CFG0: /* 64b */
1623 *data = extract64(s->gerror_irq_cfg0, 0, 32);
1624 return MEMTX_OK;
1625 case A_GERROR_IRQ_CFG0 + 4:
1626 *data = extract64(s->gerror_irq_cfg0, 32, 32);
1627 return MEMTX_OK;
1628 case A_GERROR_IRQ_CFG1:
1629 *data = s->gerror_irq_cfg1;
1630 return MEMTX_OK;
1631 case A_GERROR_IRQ_CFG2:
1632 *data = s->gerror_irq_cfg2;
1633 return MEMTX_OK;
1634 case A_STRTAB_BASE: /* 64b */
1635 *data = extract64(s->strtab_base, 0, 32);
1636 return MEMTX_OK;
1637 case A_STRTAB_BASE + 4: /* 64b */
1638 *data = extract64(s->strtab_base, 32, 32);
1639 return MEMTX_OK;
1640 case A_STRTAB_BASE_CFG:
1641 *data = s->strtab_base_cfg;
1642 return MEMTX_OK;
1643 case A_CMDQ_BASE: /* 64b */
1644 *data = extract64(s->cmdq.base, 0, 32);
1645 return MEMTX_OK;
1646 case A_CMDQ_BASE + 4:
1647 *data = extract64(s->cmdq.base, 32, 32);
1648 return MEMTX_OK;
1649 case A_CMDQ_PROD:
1650 *data = s->cmdq.prod;
1651 return MEMTX_OK;
1652 case A_CMDQ_CONS:
1653 *data = s->cmdq.cons;
1654 return MEMTX_OK;
1655 case A_EVENTQ_BASE: /* 64b */
1656 *data = extract64(s->eventq.base, 0, 32);
1657 return MEMTX_OK;
1658 case A_EVENTQ_BASE + 4: /* 64b */
1659 *data = extract64(s->eventq.base, 32, 32);
1660 return MEMTX_OK;
1661 case A_EVENTQ_PROD:
1662 *data = s->eventq.prod;
1663 return MEMTX_OK;
1664 case A_EVENTQ_CONS:
1665 *data = s->eventq.cons;
1666 return MEMTX_OK;
1667 default:
1668 *data = 0;
1669 qemu_log_mask(LOG_UNIMP,
1670 "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
1671 __func__, offset);
1672 return MEMTX_OK;
1676 static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
1677 unsigned size, MemTxAttrs attrs)
1679 SMMUState *sys = opaque;
1680 SMMUv3State *s = ARM_SMMUV3(sys);
1681 MemTxResult r;
1683 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1684 offset &= ~0x10000;
1686 switch (size) {
1687 case 8:
1688 r = smmu_readll(s, offset, data, attrs);
1689 break;
1690 case 4:
1691 r = smmu_readl(s, offset, data, attrs);
1692 break;
1693 default:
1694 r = MEMTX_ERROR;
1695 break;
1698 trace_smmuv3_read_mmio(offset, *data, size, r);
1699 return r;
1702 static const MemoryRegionOps smmu_mem_ops = {
1703 .read_with_attrs = smmu_read_mmio,
1704 .write_with_attrs = smmu_write_mmio,
1705 .endianness = DEVICE_LITTLE_ENDIAN,
1706 .valid = {
1707 .min_access_size = 4,
1708 .max_access_size = 8,
1710 .impl = {
1711 .min_access_size = 4,
1712 .max_access_size = 8,
1716 static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
1718 int i;
1720 for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
1721 sysbus_init_irq(dev, &s->irq[i]);
1725 static void smmu_reset_hold(Object *obj)
1727 SMMUv3State *s = ARM_SMMUV3(obj);
1728 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1730 if (c->parent_phases.hold) {
1731 c->parent_phases.hold(obj);
1734 smmuv3_init_regs(s);
1737 static void smmu_realize(DeviceState *d, Error **errp)
1739 SMMUState *sys = ARM_SMMU(d);
1740 SMMUv3State *s = ARM_SMMUV3(sys);
1741 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1742 SysBusDevice *dev = SYS_BUS_DEVICE(d);
1743 Error *local_err = NULL;
1745 c->parent_realize(d, &local_err);
1746 if (local_err) {
1747 error_propagate(errp, local_err);
1748 return;
1751 qemu_mutex_init(&s->mutex);
1753 memory_region_init_io(&sys->iomem, OBJECT(s),
1754 &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
1756 sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
1758 sysbus_init_mmio(dev, &sys->iomem);
1760 smmu_init_irq(s, dev);
1763 static const VMStateDescription vmstate_smmuv3_queue = {
1764 .name = "smmuv3_queue",
1765 .version_id = 1,
1766 .minimum_version_id = 1,
1767 .fields = (VMStateField[]) {
1768 VMSTATE_UINT64(base, SMMUQueue),
1769 VMSTATE_UINT32(prod, SMMUQueue),
1770 VMSTATE_UINT32(cons, SMMUQueue),
1771 VMSTATE_UINT8(log2size, SMMUQueue),
1772 VMSTATE_END_OF_LIST(),
1776 static bool smmuv3_gbpa_needed(void *opaque)
1778 SMMUv3State *s = opaque;
1780 /* Only migrate GBPA if it has different reset value. */
1781 return s->gbpa != SMMU_GBPA_RESET_VAL;
1784 static const VMStateDescription vmstate_gbpa = {
1785 .name = "smmuv3/gbpa",
1786 .version_id = 1,
1787 .minimum_version_id = 1,
1788 .needed = smmuv3_gbpa_needed,
1789 .fields = (VMStateField[]) {
1790 VMSTATE_UINT32(gbpa, SMMUv3State),
1791 VMSTATE_END_OF_LIST()
1795 static const VMStateDescription vmstate_smmuv3 = {
1796 .name = "smmuv3",
1797 .version_id = 1,
1798 .minimum_version_id = 1,
1799 .priority = MIG_PRI_IOMMU,
1800 .fields = (VMStateField[]) {
1801 VMSTATE_UINT32(features, SMMUv3State),
1802 VMSTATE_UINT8(sid_size, SMMUv3State),
1803 VMSTATE_UINT8(sid_split, SMMUv3State),
1805 VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
1806 VMSTATE_UINT32(cr0ack, SMMUv3State),
1807 VMSTATE_UINT32(statusr, SMMUv3State),
1808 VMSTATE_UINT32(irq_ctrl, SMMUv3State),
1809 VMSTATE_UINT32(gerror, SMMUv3State),
1810 VMSTATE_UINT32(gerrorn, SMMUv3State),
1811 VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
1812 VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
1813 VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
1814 VMSTATE_UINT64(strtab_base, SMMUv3State),
1815 VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
1816 VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
1817 VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
1818 VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
1820 VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1821 VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1823 VMSTATE_END_OF_LIST(),
1825 .subsections = (const VMStateDescription * []) {
1826 &vmstate_gbpa,
1827 NULL
1831 static Property smmuv3_properties[] = {
1833 * Stages of translation advertised.
1834 * "1": Stage 1
1835 * "2": Stage 2
1836 * Defaults to stage 1
1838 DEFINE_PROP_STRING("stage", SMMUv3State, stage),
1839 DEFINE_PROP_END_OF_LIST()
1842 static void smmuv3_instance_init(Object *obj)
1844 /* Nothing much to do here as of now */
1847 static void smmuv3_class_init(ObjectClass *klass, void *data)
1849 DeviceClass *dc = DEVICE_CLASS(klass);
1850 ResettableClass *rc = RESETTABLE_CLASS(klass);
1851 SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
1853 dc->vmsd = &vmstate_smmuv3;
1854 resettable_class_set_parent_phases(rc, NULL, smmu_reset_hold, NULL,
1855 &c->parent_phases);
1856 c->parent_realize = dc->realize;
1857 dc->realize = smmu_realize;
1858 device_class_set_props(dc, smmuv3_properties);
1861 static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
1862 IOMMUNotifierFlag old,
1863 IOMMUNotifierFlag new,
1864 Error **errp)
1866 SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu);
1867 SMMUv3State *s3 = sdev->smmu;
1868 SMMUState *s = &(s3->smmu_state);
1870 if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
1871 error_setg(errp, "SMMUv3 does not support dev-iotlb yet");
1872 return -EINVAL;
1875 if (new & IOMMU_NOTIFIER_MAP) {
1876 error_setg(errp,
1877 "device %02x.%02x.%x requires iommu MAP notifier which is "
1878 "not currently supported", pci_bus_num(sdev->bus),
1879 PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn));
1880 return -EINVAL;
1883 if (old == IOMMU_NOTIFIER_NONE) {
1884 trace_smmuv3_notify_flag_add(iommu->parent_obj.name);
1885 QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next);
1886 } else if (new == IOMMU_NOTIFIER_NONE) {
1887 trace_smmuv3_notify_flag_del(iommu->parent_obj.name);
1888 QLIST_REMOVE(sdev, next);
1890 return 0;
1893 static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
1894 void *data)
1896 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1898 imrc->translate = smmuv3_translate;
1899 imrc->notify_flag_changed = smmuv3_notify_flag_changed;
1902 static const TypeInfo smmuv3_type_info = {
1903 .name = TYPE_ARM_SMMUV3,
1904 .parent = TYPE_ARM_SMMU,
1905 .instance_size = sizeof(SMMUv3State),
1906 .instance_init = smmuv3_instance_init,
1907 .class_size = sizeof(SMMUv3Class),
1908 .class_init = smmuv3_class_init,
1911 static const TypeInfo smmuv3_iommu_memory_region_info = {
1912 .parent = TYPE_IOMMU_MEMORY_REGION,
1913 .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
1914 .class_init = smmuv3_iommu_memory_region_class_init,
1917 static void smmuv3_register_types(void)
1919 type_register(&smmuv3_type_info);
1920 type_register(&smmuv3_iommu_memory_region_info);
1923 type_init(smmuv3_register_types)