2 * Copyright (C) 2014-2016 Broadcom Corporation
3 * Copyright (c) 2017 Red Hat, Inc.
4 * Written by Prem Mallappa, Eric Auger
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
22 #include "hw/sysbus.h"
23 #include "migration/vmstate.h"
24 #include "hw/qdev-properties.h"
25 #include "hw/qdev-core.h"
26 #include "hw/pci/pci.h"
30 #include "qemu/error-report.h"
31 #include "qapi/error.h"
33 #include "hw/arm/smmuv3.h"
34 #include "smmuv3-internal.h"
35 #include "smmu-internal.h"
37 #define PTW_RECORD_FAULT(ptw_info, cfg) (((ptw_info).stage == SMMU_STAGE_1 && \
38 (cfg)->record_faults) || \
39 ((ptw_info).stage == SMMU_STAGE_2 && \
40 (cfg)->s2cfg.record_faults))
43 * smmuv3_trigger_irq - pulse @irq if enabled and update
44 * GERROR register in case of GERROR interrupt
47 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
49 static void smmuv3_trigger_irq(SMMUv3State
*s
, SMMUIrq irq
,
57 pulse
= smmuv3_eventq_irq_enabled(s
);
60 qemu_log_mask(LOG_UNIMP
, "PRI not yet supported\n");
62 case SMMU_IRQ_CMD_SYNC
:
67 uint32_t pending
= s
->gerror
^ s
->gerrorn
;
68 uint32_t new_gerrors
= ~pending
& gerror_mask
;
71 /* only toggle non pending errors */
74 s
->gerror
^= new_gerrors
;
75 trace_smmuv3_write_gerror(new_gerrors
, s
->gerror
);
77 pulse
= smmuv3_gerror_irq_enabled(s
);
82 trace_smmuv3_trigger_irq(irq
);
83 qemu_irq_pulse(s
->irq
[irq
]);
87 static void smmuv3_write_gerrorn(SMMUv3State
*s
, uint32_t new_gerrorn
)
89 uint32_t pending
= s
->gerror
^ s
->gerrorn
;
90 uint32_t toggled
= s
->gerrorn
^ new_gerrorn
;
92 if (toggled
& ~pending
) {
93 qemu_log_mask(LOG_GUEST_ERROR
,
94 "guest toggles non pending errors = 0x%x\n",
99 * We do not raise any error in case guest toggles bits corresponding
100 * to not active IRQs (CONSTRAINED UNPREDICTABLE)
102 s
->gerrorn
= new_gerrorn
;
104 trace_smmuv3_write_gerrorn(toggled
& pending
, s
->gerrorn
);
107 static inline MemTxResult
queue_read(SMMUQueue
*q
, Cmd
*cmd
)
109 dma_addr_t addr
= Q_CONS_ENTRY(q
);
113 ret
= dma_memory_read(&address_space_memory
, addr
, cmd
, sizeof(Cmd
),
114 MEMTXATTRS_UNSPECIFIED
);
115 if (ret
!= MEMTX_OK
) {
118 for (i
= 0; i
< ARRAY_SIZE(cmd
->word
); i
++) {
119 le32_to_cpus(&cmd
->word
[i
]);
124 static MemTxResult
queue_write(SMMUQueue
*q
, Evt
*evt_in
)
126 dma_addr_t addr
= Q_PROD_ENTRY(q
);
131 for (i
= 0; i
< ARRAY_SIZE(evt
.word
); i
++) {
132 cpu_to_le32s(&evt
.word
[i
]);
134 ret
= dma_memory_write(&address_space_memory
, addr
, &evt
, sizeof(Evt
),
135 MEMTXATTRS_UNSPECIFIED
);
136 if (ret
!= MEMTX_OK
) {
144 static MemTxResult
smmuv3_write_eventq(SMMUv3State
*s
, Evt
*evt
)
146 SMMUQueue
*q
= &s
->eventq
;
149 if (!smmuv3_eventq_enabled(s
)) {
153 if (smmuv3_q_full(q
)) {
157 r
= queue_write(q
, evt
);
162 if (!smmuv3_q_empty(q
)) {
163 smmuv3_trigger_irq(s
, SMMU_IRQ_EVTQ
, 0);
168 void smmuv3_record_event(SMMUv3State
*s
, SMMUEventInfo
*info
)
173 if (!smmuv3_eventq_enabled(s
)) {
177 EVT_SET_TYPE(&evt
, info
->type
);
178 EVT_SET_SID(&evt
, info
->sid
);
180 switch (info
->type
) {
184 EVT_SET_SSID(&evt
, info
->u
.f_uut
.ssid
);
185 EVT_SET_SSV(&evt
, info
->u
.f_uut
.ssv
);
186 EVT_SET_ADDR(&evt
, info
->u
.f_uut
.addr
);
187 EVT_SET_RNW(&evt
, info
->u
.f_uut
.rnw
);
188 EVT_SET_PNU(&evt
, info
->u
.f_uut
.pnu
);
189 EVT_SET_IND(&evt
, info
->u
.f_uut
.ind
);
191 case SMMU_EVT_C_BAD_STREAMID
:
192 EVT_SET_SSID(&evt
, info
->u
.c_bad_streamid
.ssid
);
193 EVT_SET_SSV(&evt
, info
->u
.c_bad_streamid
.ssv
);
195 case SMMU_EVT_F_STE_FETCH
:
196 EVT_SET_SSID(&evt
, info
->u
.f_ste_fetch
.ssid
);
197 EVT_SET_SSV(&evt
, info
->u
.f_ste_fetch
.ssv
);
198 EVT_SET_ADDR2(&evt
, info
->u
.f_ste_fetch
.addr
);
200 case SMMU_EVT_C_BAD_STE
:
201 EVT_SET_SSID(&evt
, info
->u
.c_bad_ste
.ssid
);
202 EVT_SET_SSV(&evt
, info
->u
.c_bad_ste
.ssv
);
204 case SMMU_EVT_F_STREAM_DISABLED
:
206 case SMMU_EVT_F_TRANS_FORBIDDEN
:
207 EVT_SET_ADDR(&evt
, info
->u
.f_transl_forbidden
.addr
);
208 EVT_SET_RNW(&evt
, info
->u
.f_transl_forbidden
.rnw
);
210 case SMMU_EVT_C_BAD_SUBSTREAMID
:
211 EVT_SET_SSID(&evt
, info
->u
.c_bad_substream
.ssid
);
213 case SMMU_EVT_F_CD_FETCH
:
214 EVT_SET_SSID(&evt
, info
->u
.f_cd_fetch
.ssid
);
215 EVT_SET_SSV(&evt
, info
->u
.f_cd_fetch
.ssv
);
216 EVT_SET_ADDR(&evt
, info
->u
.f_cd_fetch
.addr
);
218 case SMMU_EVT_C_BAD_CD
:
219 EVT_SET_SSID(&evt
, info
->u
.c_bad_cd
.ssid
);
220 EVT_SET_SSV(&evt
, info
->u
.c_bad_cd
.ssv
);
222 case SMMU_EVT_F_WALK_EABT
:
223 case SMMU_EVT_F_TRANSLATION
:
224 case SMMU_EVT_F_ADDR_SIZE
:
225 case SMMU_EVT_F_ACCESS
:
226 case SMMU_EVT_F_PERMISSION
:
227 EVT_SET_STALL(&evt
, info
->u
.f_walk_eabt
.stall
);
228 EVT_SET_STAG(&evt
, info
->u
.f_walk_eabt
.stag
);
229 EVT_SET_SSID(&evt
, info
->u
.f_walk_eabt
.ssid
);
230 EVT_SET_SSV(&evt
, info
->u
.f_walk_eabt
.ssv
);
231 EVT_SET_S2(&evt
, info
->u
.f_walk_eabt
.s2
);
232 EVT_SET_ADDR(&evt
, info
->u
.f_walk_eabt
.addr
);
233 EVT_SET_RNW(&evt
, info
->u
.f_walk_eabt
.rnw
);
234 EVT_SET_PNU(&evt
, info
->u
.f_walk_eabt
.pnu
);
235 EVT_SET_IND(&evt
, info
->u
.f_walk_eabt
.ind
);
236 EVT_SET_CLASS(&evt
, info
->u
.f_walk_eabt
.class);
237 EVT_SET_ADDR2(&evt
, info
->u
.f_walk_eabt
.addr2
);
239 case SMMU_EVT_F_CFG_CONFLICT
:
240 EVT_SET_SSID(&evt
, info
->u
.f_cfg_conflict
.ssid
);
241 EVT_SET_SSV(&evt
, info
->u
.f_cfg_conflict
.ssv
);
243 /* rest is not implemented */
244 case SMMU_EVT_F_BAD_ATS_TREQ
:
245 case SMMU_EVT_F_TLB_CONFLICT
:
246 case SMMU_EVT_E_PAGE_REQ
:
248 g_assert_not_reached();
251 trace_smmuv3_record_event(smmu_event_string(info
->type
), info
->sid
);
252 r
= smmuv3_write_eventq(s
, &evt
);
254 smmuv3_trigger_irq(s
, SMMU_IRQ_GERROR
, R_GERROR_EVENTQ_ABT_ERR_MASK
);
256 info
->recorded
= true;
259 static void smmuv3_init_regs(SMMUv3State
*s
)
261 /* Based on sys property, the stages supported in smmu will be advertised.*/
262 if (s
->stage
&& !strcmp("2", s
->stage
)) {
263 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, S2P
, 1);
264 } else if (s
->stage
&& !strcmp("nested", s
->stage
)) {
265 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, S1P
, 1);
266 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, S2P
, 1);
268 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, S1P
, 1);
271 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, TTF
, 2); /* AArch64 PTW only */
272 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, COHACC
, 1); /* IO coherent */
273 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, ASID16
, 1); /* 16-bit ASID */
274 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, VMID16
, 1); /* 16-bit VMID */
275 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, TTENDIAN
, 2); /* little endian */
276 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, STALL_MODEL
, 1); /* No stall */
277 /* terminated transaction will always be aborted/error returned */
278 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, TERM_MODEL
, 1);
279 /* 2-level stream table supported */
280 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, STLEVEL
, 1);
282 s
->idr
[1] = FIELD_DP32(s
->idr
[1], IDR1
, SIDSIZE
, SMMU_IDR1_SIDSIZE
);
283 s
->idr
[1] = FIELD_DP32(s
->idr
[1], IDR1
, EVENTQS
, SMMU_EVENTQS
);
284 s
->idr
[1] = FIELD_DP32(s
->idr
[1], IDR1
, CMDQS
, SMMU_CMDQS
);
286 s
->idr
[3] = FIELD_DP32(s
->idr
[3], IDR3
, HAD
, 1);
287 if (FIELD_EX32(s
->idr
[0], IDR0
, S2P
)) {
288 /* XNX is a stage-2-specific feature */
289 s
->idr
[3] = FIELD_DP32(s
->idr
[3], IDR3
, XNX
, 1);
291 s
->idr
[3] = FIELD_DP32(s
->idr
[3], IDR3
, RIL
, 1);
292 s
->idr
[3] = FIELD_DP32(s
->idr
[3], IDR3
, BBML
, 2);
294 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, OAS
, SMMU_IDR5_OAS
); /* 44 bits */
295 /* 4K, 16K and 64K granule support */
296 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, GRAN4K
, 1);
297 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, GRAN16K
, 1);
298 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, GRAN64K
, 1);
300 s
->cmdq
.base
= deposit64(s
->cmdq
.base
, 0, 5, SMMU_CMDQS
);
303 s
->cmdq
.entry_size
= sizeof(struct Cmd
);
304 s
->eventq
.base
= deposit64(s
->eventq
.base
, 0, 5, SMMU_EVENTQS
);
307 s
->eventq
.entry_size
= sizeof(struct Evt
);
318 s
->gbpa
= SMMU_GBPA_RESET_VAL
;
321 static int smmu_get_ste(SMMUv3State
*s
, dma_addr_t addr
, STE
*buf
,
322 SMMUEventInfo
*event
)
326 trace_smmuv3_get_ste(addr
);
327 /* TODO: guarantee 64-bit single-copy atomicity */
328 ret
= dma_memory_read(&address_space_memory
, addr
, buf
, sizeof(*buf
),
329 MEMTXATTRS_UNSPECIFIED
);
330 if (ret
!= MEMTX_OK
) {
331 qemu_log_mask(LOG_GUEST_ERROR
,
332 "Cannot fetch pte at address=0x%"PRIx64
"\n", addr
);
333 event
->type
= SMMU_EVT_F_STE_FETCH
;
334 event
->u
.f_ste_fetch
.addr
= addr
;
337 for (i
= 0; i
< ARRAY_SIZE(buf
->word
); i
++) {
338 le32_to_cpus(&buf
->word
[i
]);
344 static SMMUTranslationStatus
smmuv3_do_translate(SMMUv3State
*s
, hwaddr addr
,
346 SMMUEventInfo
*event
,
347 IOMMUAccessFlags flag
,
348 SMMUTLBEntry
**out_entry
,
349 SMMUTranslationClass
class);
350 /* @ssid > 0 not supported yet */
351 static int smmu_get_cd(SMMUv3State
*s
, STE
*ste
, SMMUTransCfg
*cfg
,
352 uint32_t ssid
, CD
*buf
, SMMUEventInfo
*event
)
354 dma_addr_t addr
= STE_CTXPTR(ste
);
356 SMMUTranslationStatus status
;
359 trace_smmuv3_get_cd(addr
);
361 if (cfg
->stage
== SMMU_NESTED
) {
362 status
= smmuv3_do_translate(s
, addr
, cfg
, event
,
363 IOMMU_RO
, &entry
, SMMU_CLASS_CD
);
365 /* Same PTW faults are reported but with CLASS = CD. */
366 if (status
!= SMMU_TRANS_SUCCESS
) {
370 addr
= CACHED_ENTRY_TO_ADDR(entry
, addr
);
373 /* TODO: guarantee 64-bit single-copy atomicity */
374 ret
= dma_memory_read(&address_space_memory
, addr
, buf
, sizeof(*buf
),
375 MEMTXATTRS_UNSPECIFIED
);
376 if (ret
!= MEMTX_OK
) {
377 qemu_log_mask(LOG_GUEST_ERROR
,
378 "Cannot fetch pte at address=0x%"PRIx64
"\n", addr
);
379 event
->type
= SMMU_EVT_F_CD_FETCH
;
380 event
->u
.f_ste_fetch
.addr
= addr
;
383 for (i
= 0; i
< ARRAY_SIZE(buf
->word
); i
++) {
384 le32_to_cpus(&buf
->word
[i
]);
390 * Max valid value is 39 when SMMU_IDR3.STT == 0.
391 * In architectures after SMMUv3.0:
392 * - If STE.S2TG selects a 4KB or 16KB granule, the minimum valid value for this
393 * field is MAX(16, 64-IAS)
394 * - If STE.S2TG selects a 64KB granule, the minimum valid value for this field
396 * As we only support AA64, IAS = OAS.
398 static bool s2t0sz_valid(SMMUTransCfg
*cfg
)
400 if (cfg
->s2cfg
.tsz
> 39) {
404 if (cfg
->s2cfg
.granule_sz
== 16) {
405 return (cfg
->s2cfg
.tsz
>= 64 - cfg
->s2cfg
.eff_ps
);
408 return (cfg
->s2cfg
.tsz
>= MAX(64 - cfg
->s2cfg
.eff_ps
, 16));
412 * Return true if s2 page table config is valid.
413 * This checks with the configured start level, ias_bits and granularity we can
414 * have a valid page table as described in ARM ARM D8.2 Translation process.
415 * The idea here is to see for the highest possible number of IPA bits, how
416 * many concatenated tables we would need, if it is more than 16, then this is
419 static bool s2_pgtable_config_valid(uint8_t sl0
, uint8_t t0sz
, uint8_t gran
)
421 int level
= get_start_level(sl0
, gran
);
422 uint64_t ipa_bits
= 64 - t0sz
;
423 uint64_t max_ipa
= (1ULL << ipa_bits
) - 1;
424 int nr_concat
= pgd_concat_idx(level
, gran
, max_ipa
) + 1;
426 return nr_concat
<= VMSA_MAX_S2_CONCAT
;
429 static int decode_ste_s2_cfg(SMMUv3State
*s
, SMMUTransCfg
*cfg
,
432 uint8_t oas
= FIELD_EX32(s
->idr
[5], IDR5
, OAS
);
434 if (STE_S2AA64(ste
) == 0x0) {
435 qemu_log_mask(LOG_UNIMP
,
436 "SMMUv3 AArch32 tables not supported\n");
437 g_assert_not_reached();
440 switch (STE_S2TG(ste
)) {
442 cfg
->s2cfg
.granule_sz
= 12;
445 cfg
->s2cfg
.granule_sz
= 16;
448 cfg
->s2cfg
.granule_sz
= 14;
451 qemu_log_mask(LOG_GUEST_ERROR
,
452 "SMMUv3 bad STE S2TG: %x\n", STE_S2TG(ste
));
456 cfg
->s2cfg
.vttb
= STE_S2TTB(ste
);
458 cfg
->s2cfg
.sl0
= STE_S2SL0(ste
);
459 /* FEAT_TTST not supported. */
460 if (cfg
->s2cfg
.sl0
== 0x3) {
461 qemu_log_mask(LOG_UNIMP
, "SMMUv3 S2SL0 = 0x3 has no meaning!\n");
465 /* For AA64, The effective S2PS size is capped to the OAS. */
466 cfg
->s2cfg
.eff_ps
= oas2bits(MIN(STE_S2PS(ste
), oas
));
468 * For SMMUv3.1 and later, when OAS == IAS == 52, the stage 2 input
469 * range is further limited to 48 bits unless STE.S2TG indicates a
472 if (cfg
->s2cfg
.granule_sz
!= 16) {
473 cfg
->s2cfg
.eff_ps
= MIN(cfg
->s2cfg
.eff_ps
, 48);
476 * It is ILLEGAL for the address in S2TTB to be outside the range
477 * described by the effective S2PS value.
479 if (cfg
->s2cfg
.vttb
& ~(MAKE_64BIT_MASK(0, cfg
->s2cfg
.eff_ps
))) {
480 qemu_log_mask(LOG_GUEST_ERROR
,
481 "SMMUv3 S2TTB too large 0x%" PRIx64
482 ", effective PS %d bits\n",
483 cfg
->s2cfg
.vttb
, cfg
->s2cfg
.eff_ps
);
487 cfg
->s2cfg
.tsz
= STE_S2T0SZ(ste
);
489 if (!s2t0sz_valid(cfg
)) {
490 qemu_log_mask(LOG_GUEST_ERROR
, "SMMUv3 bad STE S2T0SZ = %d\n",
495 if (!s2_pgtable_config_valid(cfg
->s2cfg
.sl0
, cfg
->s2cfg
.tsz
,
496 cfg
->s2cfg
.granule_sz
)) {
497 qemu_log_mask(LOG_GUEST_ERROR
,
498 "SMMUv3 STE stage 2 config not valid!\n");
502 /* Only LE supported(IDR0.TTENDIAN). */
503 if (STE_S2ENDI(ste
)) {
504 qemu_log_mask(LOG_GUEST_ERROR
,
505 "SMMUv3 STE_S2ENDI only supports LE!\n");
509 cfg
->s2cfg
.affd
= STE_S2AFFD(ste
);
511 cfg
->s2cfg
.record_faults
= STE_S2R(ste
);
512 /* As stall is not supported. */
514 qemu_log_mask(LOG_UNIMP
, "SMMUv3 Stall not implemented!\n");
524 static void decode_ste_config(SMMUTransCfg
*cfg
, uint32_t config
)
527 if (STE_CFG_ABORT(config
)) {
531 if (STE_CFG_BYPASS(config
)) {
532 cfg
->bypassed
= true;
536 if (STE_CFG_S1_ENABLED(config
)) {
537 cfg
->stage
= SMMU_STAGE_1
;
540 if (STE_CFG_S2_ENABLED(config
)) {
541 cfg
->stage
|= SMMU_STAGE_2
;
545 /* Returns < 0 in case of invalid STE, 0 otherwise */
546 static int decode_ste(SMMUv3State
*s
, SMMUTransCfg
*cfg
,
547 STE
*ste
, SMMUEventInfo
*event
)
550 uint8_t oas
= FIELD_EX32(s
->idr
[5], IDR5
, OAS
);
553 if (!STE_VALID(ste
)) {
554 if (!event
->inval_ste_allowed
) {
555 qemu_log_mask(LOG_GUEST_ERROR
, "invalid STE\n");
560 config
= STE_CONFIG(ste
);
562 decode_ste_config(cfg
, config
);
564 if (cfg
->aborted
|| cfg
->bypassed
) {
569 * If a stage is enabled in SW while not advertised, throw bad ste
570 * according to user manual(IHI0070E) "5.2 Stream Table Entry".
572 if (!STAGE1_SUPPORTED(s
) && STE_CFG_S1_ENABLED(config
)) {
573 qemu_log_mask(LOG_GUEST_ERROR
, "SMMUv3 S1 used but not supported.\n");
576 if (!STAGE2_SUPPORTED(s
) && STE_CFG_S2_ENABLED(config
)) {
577 qemu_log_mask(LOG_GUEST_ERROR
, "SMMUv3 S2 used but not supported.\n");
581 if (STAGE2_SUPPORTED(s
)) {
582 /* VMID is considered even if s2 is disabled. */
583 cfg
->s2cfg
.vmid
= STE_S2VMID(ste
);
586 cfg
->s2cfg
.vmid
= -1;
589 if (STE_CFG_S2_ENABLED(config
)) {
591 * Stage-1 OAS defaults to OAS even if not enabled as it would be used
592 * in input address check for stage-2.
594 cfg
->oas
= oas2bits(oas
);
595 ret
= decode_ste_s2_cfg(s
, cfg
, ste
);
601 if (STE_S1CDMAX(ste
) != 0) {
602 qemu_log_mask(LOG_UNIMP
,
603 "SMMUv3 does not support multiple context descriptors yet\n");
607 if (STE_S1STALLD(ste
)) {
608 qemu_log_mask(LOG_UNIMP
,
609 "SMMUv3 S1 stalling fault model not allowed yet\n");
615 event
->type
= SMMU_EVT_C_BAD_STE
;
620 * smmu_find_ste - Return the stream table entry associated
625 * @ste: returned stream table entry
626 * @event: handle to an event info
628 * Supports linear and 2-level stream table
629 * Return 0 on success, -EINVAL otherwise
631 static int smmu_find_ste(SMMUv3State
*s
, uint32_t sid
, STE
*ste
,
632 SMMUEventInfo
*event
)
634 dma_addr_t addr
, strtab_base
;
636 int strtab_size_shift
;
639 trace_smmuv3_find_ste(sid
, s
->features
, s
->sid_split
);
640 log2size
= FIELD_EX32(s
->strtab_base_cfg
, STRTAB_BASE_CFG
, LOG2SIZE
);
642 * Check SID range against both guest-configured and implementation limits
644 if (sid
>= (1 << MIN(log2size
, SMMU_IDR1_SIDSIZE
))) {
645 event
->type
= SMMU_EVT_C_BAD_STREAMID
;
648 if (s
->features
& SMMU_FEATURE_2LVL_STE
) {
649 int l1_ste_offset
, l2_ste_offset
, max_l2_ste
, span
, i
;
650 dma_addr_t l1ptr
, l2ptr
;
654 * Align strtab base address to table size. For this purpose, assume it
655 * is not bounded by SMMU_IDR1_SIDSIZE.
657 strtab_size_shift
= MAX(5, (int)log2size
- s
->sid_split
- 1 + 3);
658 strtab_base
= s
->strtab_base
& SMMU_BASE_ADDR_MASK
&
659 ~MAKE_64BIT_MASK(0, strtab_size_shift
);
660 l1_ste_offset
= sid
>> s
->sid_split
;
661 l2_ste_offset
= sid
& ((1 << s
->sid_split
) - 1);
662 l1ptr
= (dma_addr_t
)(strtab_base
+ l1_ste_offset
* sizeof(l1std
));
663 /* TODO: guarantee 64-bit single-copy atomicity */
664 ret
= dma_memory_read(&address_space_memory
, l1ptr
, &l1std
,
665 sizeof(l1std
), MEMTXATTRS_UNSPECIFIED
);
666 if (ret
!= MEMTX_OK
) {
667 qemu_log_mask(LOG_GUEST_ERROR
,
668 "Could not read L1PTR at 0X%"PRIx64
"\n", l1ptr
);
669 event
->type
= SMMU_EVT_F_STE_FETCH
;
670 event
->u
.f_ste_fetch
.addr
= l1ptr
;
673 for (i
= 0; i
< ARRAY_SIZE(l1std
.word
); i
++) {
674 le32_to_cpus(&l1std
.word
[i
]);
677 span
= L1STD_SPAN(&l1std
);
680 /* l2ptr is not valid */
681 if (!event
->inval_ste_allowed
) {
682 qemu_log_mask(LOG_GUEST_ERROR
,
683 "invalid sid=%d (L1STD span=0)\n", sid
);
685 event
->type
= SMMU_EVT_C_BAD_STREAMID
;
688 max_l2_ste
= (1 << span
) - 1;
689 l2ptr
= l1std_l2ptr(&l1std
);
690 trace_smmuv3_find_ste_2lvl(s
->strtab_base
, l1ptr
, l1_ste_offset
,
691 l2ptr
, l2_ste_offset
, max_l2_ste
);
692 if (l2_ste_offset
> max_l2_ste
) {
693 qemu_log_mask(LOG_GUEST_ERROR
,
694 "l2_ste_offset=%d > max_l2_ste=%d\n",
695 l2_ste_offset
, max_l2_ste
);
696 event
->type
= SMMU_EVT_C_BAD_STE
;
699 addr
= l2ptr
+ l2_ste_offset
* sizeof(*ste
);
701 strtab_size_shift
= log2size
+ 5;
702 strtab_base
= s
->strtab_base
& SMMU_BASE_ADDR_MASK
&
703 ~MAKE_64BIT_MASK(0, strtab_size_shift
);
704 addr
= strtab_base
+ sid
* sizeof(*ste
);
707 if (smmu_get_ste(s
, addr
, ste
, event
)) {
714 static int decode_cd(SMMUv3State
*s
, SMMUTransCfg
*cfg
,
715 CD
*cd
, SMMUEventInfo
*event
)
719 SMMUTranslationStatus status
;
721 uint8_t oas
= FIELD_EX32(s
->idr
[5], IDR5
, OAS
);
723 if (!CD_VALID(cd
) || !CD_AARCH64(cd
)) {
727 goto bad_cd
; /* SMMU_IDR0.TERM_MODEL == 1 */
730 goto bad_cd
; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
732 if (CD_HA(cd
) || CD_HD(cd
)) {
733 goto bad_cd
; /* HTTU = 0 */
736 /* we support only those at the moment */
739 cfg
->oas
= oas2bits(CD_IPS(cd
));
740 cfg
->oas
= MIN(oas2bits(oas
), cfg
->oas
);
741 cfg
->tbi
= CD_TBI(cd
);
742 cfg
->asid
= CD_ASID(cd
);
743 cfg
->affd
= CD_AFFD(cd
);
745 trace_smmuv3_decode_cd(cfg
->oas
);
747 /* decode data dependent on TT */
748 for (i
= 0; i
<= 1; i
++) {
750 SMMUTransTableInfo
*tt
= &cfg
->tt
[i
];
752 cfg
->tt
[i
].disabled
= CD_EPD(cd
, i
);
753 if (cfg
->tt
[i
].disabled
) {
758 if (tsz
< 16 || tsz
> 39) {
763 tt
->granule_sz
= tg2granule(tg
, i
);
764 if ((tt
->granule_sz
!= 12 && tt
->granule_sz
!= 14 &&
765 tt
->granule_sz
!= 16) || CD_ENDI(cd
)) {
770 * An address greater than 48 bits in size can only be output from a
771 * TTD when, in SMMUv3.1 and later, the effective IPS is 52 and a 64KB
772 * granule is in use for that translation table
774 if (tt
->granule_sz
!= 16) {
775 cfg
->oas
= MIN(cfg
->oas
, 48);
778 tt
->ttb
= CD_TTB(cd
, i
);
780 if (tt
->ttb
& ~(MAKE_64BIT_MASK(0, cfg
->oas
))) {
784 /* Translate the TTBx, from IPA to PA if nesting is enabled. */
785 if (cfg
->stage
== SMMU_NESTED
) {
786 status
= smmuv3_do_translate(s
, tt
->ttb
, cfg
, event
, IOMMU_RO
,
787 &entry
, SMMU_CLASS_TT
);
789 * Same PTW faults are reported but with CLASS = TT.
790 * If TTBx is larger than the effective stage 1 output addres
791 * size, it reports C_BAD_CD, which is handled by the above case.
793 if (status
!= SMMU_TRANS_SUCCESS
) {
796 tt
->ttb
= CACHED_ENTRY_TO_ADDR(entry
, tt
->ttb
);
799 tt
->had
= CD_HAD(cd
, i
);
800 trace_smmuv3_decode_cd_tt(i
, tt
->tsz
, tt
->ttb
, tt
->granule_sz
, tt
->had
);
803 cfg
->record_faults
= CD_R(cd
);
808 event
->type
= SMMU_EVT_C_BAD_CD
;
813 * smmuv3_decode_config - Prepare the translation configuration
814 * for the @mr iommu region
815 * @mr: iommu memory region the translation config must be prepared for
816 * @cfg: output translation configuration which is populated through
817 * the different configuration decoding steps
818 * @event: must be zero'ed by the caller
820 * return < 0 in case of config decoding error (@event is filled
821 * accordingly). Return 0 otherwise.
823 static int smmuv3_decode_config(IOMMUMemoryRegion
*mr
, SMMUTransCfg
*cfg
,
824 SMMUEventInfo
*event
)
826 SMMUDevice
*sdev
= container_of(mr
, SMMUDevice
, iommu
);
827 uint32_t sid
= smmu_get_sid(sdev
);
828 SMMUv3State
*s
= sdev
->smmu
;
833 /* ASID defaults to -1 (if s1 is not supported). */
836 ret
= smmu_find_ste(s
, sid
, &ste
, event
);
841 ret
= decode_ste(s
, cfg
, &ste
, event
);
846 if (cfg
->aborted
|| cfg
->bypassed
|| (cfg
->stage
== SMMU_STAGE_2
)) {
850 ret
= smmu_get_cd(s
, &ste
, cfg
, 0 /* ssid */, &cd
, event
);
855 return decode_cd(s
, cfg
, &cd
, event
);
859 * smmuv3_get_config - Look up for a cached copy of configuration data for
860 * @sdev and on cache miss performs a configuration structure decoding from
863 * @sdev: SMMUDevice handle
864 * @event: output event info
866 * The configuration cache contains data resulting from both STE and CD
867 * decoding under the form of an SMMUTransCfg struct. The hash table is indexed
868 * by the SMMUDevice handle.
870 static SMMUTransCfg
*smmuv3_get_config(SMMUDevice
*sdev
, SMMUEventInfo
*event
)
872 SMMUv3State
*s
= sdev
->smmu
;
873 SMMUState
*bc
= &s
->smmu_state
;
876 cfg
= g_hash_table_lookup(bc
->configs
, sdev
);
878 sdev
->cfg_cache_hits
++;
879 trace_smmuv3_config_cache_hit(smmu_get_sid(sdev
),
880 sdev
->cfg_cache_hits
, sdev
->cfg_cache_misses
,
881 100 * sdev
->cfg_cache_hits
/
882 (sdev
->cfg_cache_hits
+ sdev
->cfg_cache_misses
));
884 sdev
->cfg_cache_misses
++;
885 trace_smmuv3_config_cache_miss(smmu_get_sid(sdev
),
886 sdev
->cfg_cache_hits
, sdev
->cfg_cache_misses
,
887 100 * sdev
->cfg_cache_hits
/
888 (sdev
->cfg_cache_hits
+ sdev
->cfg_cache_misses
));
889 cfg
= g_new0(SMMUTransCfg
, 1);
891 if (!smmuv3_decode_config(&sdev
->iommu
, cfg
, event
)) {
892 g_hash_table_insert(bc
->configs
, sdev
, cfg
);
901 static void smmuv3_flush_config(SMMUDevice
*sdev
)
903 SMMUv3State
*s
= sdev
->smmu
;
904 SMMUState
*bc
= &s
->smmu_state
;
906 trace_smmuv3_config_cache_inv(smmu_get_sid(sdev
));
907 g_hash_table_remove(bc
->configs
, sdev
);
910 /* Do translation with TLB lookup. */
911 static SMMUTranslationStatus
smmuv3_do_translate(SMMUv3State
*s
, hwaddr addr
,
913 SMMUEventInfo
*event
,
914 IOMMUAccessFlags flag
,
915 SMMUTLBEntry
**out_entry
,
916 SMMUTranslationClass
class)
918 SMMUPTWEventInfo ptw_info
= {};
919 SMMUState
*bs
= ARM_SMMU(s
);
920 SMMUTLBEntry
*cached_entry
= NULL
;
922 bool desc_s2_translation
= class != SMMU_CLASS_IN
;
925 * The function uses the argument class to identify which stage is used:
926 * - CLASS = IN: Means an input translation, determine the stage from STE.
927 * - CLASS = CD: Means the addr is an IPA of the CD, and it would be
928 * translated using the stage-2.
929 * - CLASS = TT: Means the addr is an IPA of the stage-1 translation table
930 * and it would be translated using the stage-2.
931 * For the last 2 cases instead of having intrusive changes in the common
932 * logic, we modify the cfg to be a stage-2 translation only in case of
933 * nested, and then restore it after.
935 if (desc_s2_translation
) {
939 cfg
->stage
= SMMU_STAGE_2
;
942 cached_entry
= smmu_translate(bs
, cfg
, addr
, flag
, &ptw_info
);
944 if (desc_s2_translation
) {
950 /* All faults from PTW has S2 field. */
951 event
->u
.f_walk_eabt
.s2
= (ptw_info
.stage
== SMMU_STAGE_2
);
953 * Fault class is set as follows based on "class" input to
954 * the function and to "ptw_info" from "smmu_translate()"
956 * - EABT => CLASS_TT (hardcoded)
957 * - other events => CLASS_IN (input to function)
958 * For stage-2 => CLASS_IN (input to function)
959 * For nested, for all events:
960 * - CD fetch => CLASS_CD (input to function)
961 * - walking stage 1 translation table => CLASS_TT (from
962 * is_ipa_descriptor or input in case of TTBx)
963 * - s2 translation => CLASS_IN (input to function)
965 class = ptw_info
.is_ipa_descriptor
? SMMU_CLASS_TT
: class;
966 switch (ptw_info
.type
) {
967 case SMMU_PTW_ERR_WALK_EABT
:
968 event
->type
= SMMU_EVT_F_WALK_EABT
;
969 event
->u
.f_walk_eabt
.rnw
= flag
& 0x1;
970 event
->u
.f_walk_eabt
.class = (ptw_info
.stage
== SMMU_STAGE_2
) ?
971 class : SMMU_CLASS_TT
;
972 event
->u
.f_walk_eabt
.addr2
= ptw_info
.addr
;
974 case SMMU_PTW_ERR_TRANSLATION
:
975 if (PTW_RECORD_FAULT(ptw_info
, cfg
)) {
976 event
->type
= SMMU_EVT_F_TRANSLATION
;
977 event
->u
.f_translation
.addr2
= ptw_info
.addr
;
978 event
->u
.f_translation
.class = class;
979 event
->u
.f_translation
.rnw
= flag
& 0x1;
982 case SMMU_PTW_ERR_ADDR_SIZE
:
983 if (PTW_RECORD_FAULT(ptw_info
, cfg
)) {
984 event
->type
= SMMU_EVT_F_ADDR_SIZE
;
985 event
->u
.f_addr_size
.addr2
= ptw_info
.addr
;
986 event
->u
.f_addr_size
.class = class;
987 event
->u
.f_addr_size
.rnw
= flag
& 0x1;
990 case SMMU_PTW_ERR_ACCESS
:
991 if (PTW_RECORD_FAULT(ptw_info
, cfg
)) {
992 event
->type
= SMMU_EVT_F_ACCESS
;
993 event
->u
.f_access
.addr2
= ptw_info
.addr
;
994 event
->u
.f_access
.class = class;
995 event
->u
.f_access
.rnw
= flag
& 0x1;
998 case SMMU_PTW_ERR_PERMISSION
:
999 if (PTW_RECORD_FAULT(ptw_info
, cfg
)) {
1000 event
->type
= SMMU_EVT_F_PERMISSION
;
1001 event
->u
.f_permission
.addr2
= ptw_info
.addr
;
1002 event
->u
.f_permission
.class = class;
1003 event
->u
.f_permission
.rnw
= flag
& 0x1;
1007 g_assert_not_reached();
1009 return SMMU_TRANS_ERROR
;
1011 *out_entry
= cached_entry
;
1012 return SMMU_TRANS_SUCCESS
;
1016 * Sets the InputAddr for an SMMU_TRANS_ERROR, as it can't be
1017 * set from all contexts, as smmuv3_get_config() can return
1018 * translation faults in case of nested translation (for CD
1019 * and TTBx). But in that case the iova is not known.
1021 static void smmuv3_fixup_event(SMMUEventInfo
*event
, hwaddr iova
)
1023 switch (event
->type
) {
1024 case SMMU_EVT_F_WALK_EABT
:
1025 case SMMU_EVT_F_TRANSLATION
:
1026 case SMMU_EVT_F_ADDR_SIZE
:
1027 case SMMU_EVT_F_ACCESS
:
1028 case SMMU_EVT_F_PERMISSION
:
1029 event
->u
.f_walk_eabt
.addr
= iova
;
1036 /* Entry point to SMMU, does everything. */
1037 static IOMMUTLBEntry
smmuv3_translate(IOMMUMemoryRegion
*mr
, hwaddr addr
,
1038 IOMMUAccessFlags flag
, int iommu_idx
)
1040 SMMUDevice
*sdev
= container_of(mr
, SMMUDevice
, iommu
);
1041 SMMUv3State
*s
= sdev
->smmu
;
1042 uint32_t sid
= smmu_get_sid(sdev
);
1043 SMMUEventInfo event
= {.type
= SMMU_EVT_NONE
,
1045 .inval_ste_allowed
= false};
1046 SMMUTranslationStatus status
;
1047 SMMUTransCfg
*cfg
= NULL
;
1048 IOMMUTLBEntry entry
= {
1049 .target_as
= &address_space_memory
,
1051 .translated_addr
= addr
,
1052 .addr_mask
= ~(hwaddr
)0,
1055 SMMUTLBEntry
*cached_entry
= NULL
;
1057 qemu_mutex_lock(&s
->mutex
);
1059 if (!smmu_enabled(s
)) {
1060 if (FIELD_EX32(s
->gbpa
, GBPA
, ABORT
)) {
1061 status
= SMMU_TRANS_ABORT
;
1063 status
= SMMU_TRANS_DISABLE
;
1068 cfg
= smmuv3_get_config(sdev
, &event
);
1070 status
= SMMU_TRANS_ERROR
;
1075 status
= SMMU_TRANS_ABORT
;
1079 if (cfg
->bypassed
) {
1080 status
= SMMU_TRANS_BYPASS
;
1084 status
= smmuv3_do_translate(s
, addr
, cfg
, &event
, flag
,
1085 &cached_entry
, SMMU_CLASS_IN
);
1088 qemu_mutex_unlock(&s
->mutex
);
1090 case SMMU_TRANS_SUCCESS
:
1091 entry
.perm
= cached_entry
->entry
.perm
;
1092 entry
.translated_addr
= CACHED_ENTRY_TO_ADDR(cached_entry
, addr
);
1093 entry
.addr_mask
= cached_entry
->entry
.addr_mask
;
1094 trace_smmuv3_translate_success(mr
->parent_obj
.name
, sid
, addr
,
1095 entry
.translated_addr
, entry
.perm
,
1098 case SMMU_TRANS_DISABLE
:
1100 entry
.addr_mask
= ~TARGET_PAGE_MASK
;
1101 trace_smmuv3_translate_disable(mr
->parent_obj
.name
, sid
, addr
,
1104 case SMMU_TRANS_BYPASS
:
1106 entry
.addr_mask
= ~TARGET_PAGE_MASK
;
1107 trace_smmuv3_translate_bypass(mr
->parent_obj
.name
, sid
, addr
,
1110 case SMMU_TRANS_ABORT
:
1111 /* no event is recorded on abort */
1112 trace_smmuv3_translate_abort(mr
->parent_obj
.name
, sid
, addr
,
1115 case SMMU_TRANS_ERROR
:
1116 smmuv3_fixup_event(&event
, addr
);
1117 qemu_log_mask(LOG_GUEST_ERROR
,
1118 "%s translation failed for iova=0x%"PRIx64
" (%s)\n",
1119 mr
->parent_obj
.name
, addr
, smmu_event_string(event
.type
));
1120 smmuv3_record_event(s
, &event
);
1128 * smmuv3_notify_iova - call the notifier @n for a given
1129 * @asid and @iova tuple.
1131 * @mr: IOMMU mr region handle
1132 * @n: notifier to be called
1133 * @asid: address space ID or negative value if we don't care
1134 * @vmid: virtual machine ID or negative value if we don't care
1136 * @tg: translation granule (if communicated through range invalidation)
1137 * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1
1138 * @stage: Which stage(1 or 2) is used
1140 static void smmuv3_notify_iova(IOMMUMemoryRegion
*mr
,
1143 dma_addr_t iova
, uint8_t tg
,
1144 uint64_t num_pages
, int stage
)
1146 SMMUDevice
*sdev
= container_of(mr
, SMMUDevice
, iommu
);
1147 SMMUEventInfo eventinfo
= {.inval_ste_allowed
= true};
1148 SMMUTransCfg
*cfg
= smmuv3_get_config(sdev
, &eventinfo
);
1149 IOMMUTLBEvent event
;
1157 * stage is passed from TLB invalidation commands which can be either
1158 * stage-1 or stage-2.
1159 * However, IOMMUTLBEvent only understands IOVA, for stage-1 or stage-2
1160 * SMMU instances we consider the input address as the IOVA, but when
1161 * nesting is used, we can't mix stage-1 and stage-2 addresses, so for
1162 * nesting only stage-1 is considered the IOVA and would be notified.
1164 if ((stage
== SMMU_STAGE_2
) && (cfg
->stage
== SMMU_NESTED
))
1168 SMMUTransTableInfo
*tt
;
1170 if (asid
>= 0 && cfg
->asid
!= asid
) {
1174 if (vmid
>= 0 && cfg
->s2cfg
.vmid
!= vmid
) {
1178 if (stage
== SMMU_STAGE_1
) {
1179 tt
= select_tt(cfg
, iova
);
1183 granule
= tt
->granule_sz
;
1185 granule
= cfg
->s2cfg
.granule_sz
;
1189 granule
= tg
* 2 + 10;
1192 event
.type
= IOMMU_NOTIFIER_UNMAP
;
1193 event
.entry
.target_as
= &address_space_memory
;
1194 event
.entry
.iova
= iova
;
1195 event
.entry
.addr_mask
= num_pages
* (1 << granule
) - 1;
1196 event
.entry
.perm
= IOMMU_NONE
;
1198 memory_region_notify_iommu_one(n
, &event
);
1201 /* invalidate an asid/vmid/iova range tuple in all mr's */
1202 static void smmuv3_inv_notifiers_iova(SMMUState
*s
, int asid
, int vmid
,
1203 dma_addr_t iova
, uint8_t tg
,
1204 uint64_t num_pages
, int stage
)
1208 QLIST_FOREACH(sdev
, &s
->devices_with_notifiers
, next
) {
1209 IOMMUMemoryRegion
*mr
= &sdev
->iommu
;
1212 trace_smmuv3_inv_notifiers_iova(mr
->parent_obj
.name
, asid
, vmid
,
1213 iova
, tg
, num_pages
, stage
);
1215 IOMMU_NOTIFIER_FOREACH(n
, mr
) {
1216 smmuv3_notify_iova(mr
, n
, asid
, vmid
, iova
, tg
, num_pages
, stage
);
1221 static void smmuv3_range_inval(SMMUState
*s
, Cmd
*cmd
, SMMUStage stage
)
1223 dma_addr_t end
, addr
= CMD_ADDR(cmd
);
1224 uint8_t type
= CMD_TYPE(cmd
);
1226 uint8_t scale
= CMD_SCALE(cmd
);
1227 uint8_t num
= CMD_NUM(cmd
);
1228 uint8_t ttl
= CMD_TTL(cmd
);
1229 bool leaf
= CMD_LEAF(cmd
);
1230 uint8_t tg
= CMD_TG(cmd
);
1234 SMMUv3State
*smmuv3
= ARM_SMMUV3(s
);
1236 /* Only consider VMID if stage-2 is supported. */
1237 if (STAGE2_SUPPORTED(smmuv3
)) {
1238 vmid
= CMD_VMID(cmd
);
1241 if (type
== SMMU_CMD_TLBI_NH_VA
) {
1242 asid
= CMD_ASID(cmd
);
1246 trace_smmuv3_range_inval(vmid
, asid
, addr
, tg
, 1, ttl
, leaf
, stage
);
1247 smmuv3_inv_notifiers_iova(s
, asid
, vmid
, addr
, tg
, 1, stage
);
1248 if (stage
== SMMU_STAGE_1
) {
1249 smmu_iotlb_inv_iova(s
, asid
, vmid
, addr
, tg
, 1, ttl
);
1251 smmu_iotlb_inv_ipa(s
, vmid
, addr
, tg
, 1, ttl
);
1258 num_pages
= (num
+ 1) * BIT_ULL(scale
);
1259 granule
= tg
* 2 + 10;
1261 /* Split invalidations into ^2 range invalidations */
1262 end
= addr
+ (num_pages
<< granule
) - 1;
1264 while (addr
!= end
+ 1) {
1265 uint64_t mask
= dma_aligned_pow2_mask(addr
, end
, 64);
1267 num_pages
= (mask
+ 1) >> granule
;
1268 trace_smmuv3_range_inval(vmid
, asid
, addr
, tg
, num_pages
,
1270 smmuv3_inv_notifiers_iova(s
, asid
, vmid
, addr
, tg
, num_pages
, stage
);
1271 if (stage
== SMMU_STAGE_1
) {
1272 smmu_iotlb_inv_iova(s
, asid
, vmid
, addr
, tg
, num_pages
, ttl
);
1274 smmu_iotlb_inv_ipa(s
, vmid
, addr
, tg
, num_pages
, ttl
);
1281 smmuv3_invalidate_ste(gpointer key
, gpointer value
, gpointer user_data
)
1283 SMMUDevice
*sdev
= (SMMUDevice
*)key
;
1284 uint32_t sid
= smmu_get_sid(sdev
);
1285 SMMUSIDRange
*sid_range
= (SMMUSIDRange
*)user_data
;
1287 if (sid
< sid_range
->start
|| sid
> sid_range
->end
) {
1290 trace_smmuv3_config_cache_inv(sid
);
1294 static int smmuv3_cmdq_consume(SMMUv3State
*s
)
1296 SMMUState
*bs
= ARM_SMMU(s
);
1297 SMMUCmdError cmd_error
= SMMU_CERROR_NONE
;
1298 SMMUQueue
*q
= &s
->cmdq
;
1299 SMMUCommandType type
= 0;
1301 if (!smmuv3_cmdq_enabled(s
)) {
1305 * some commands depend on register values, typically CR0. In case those
1306 * register values change while handling the command, spec says it
1307 * is UNPREDICTABLE whether the command is interpreted under the new
1311 while (!smmuv3_q_empty(q
)) {
1312 uint32_t pending
= s
->gerror
^ s
->gerrorn
;
1315 trace_smmuv3_cmdq_consume(Q_PROD(q
), Q_CONS(q
),
1316 Q_PROD_WRAP(q
), Q_CONS_WRAP(q
));
1318 if (FIELD_EX32(pending
, GERROR
, CMDQ_ERR
)) {
1322 if (queue_read(q
, &cmd
) != MEMTX_OK
) {
1323 cmd_error
= SMMU_CERROR_ABT
;
1327 type
= CMD_TYPE(&cmd
);
1329 trace_smmuv3_cmdq_opcode(smmu_cmd_string(type
));
1331 qemu_mutex_lock(&s
->mutex
);
1334 if (CMD_SYNC_CS(&cmd
) & CMD_SYNC_SIG_IRQ
) {
1335 smmuv3_trigger_irq(s
, SMMU_IRQ_CMD_SYNC
, 0);
1338 case SMMU_CMD_PREFETCH_CONFIG
:
1339 case SMMU_CMD_PREFETCH_ADDR
:
1341 case SMMU_CMD_CFGI_STE
:
1343 uint32_t sid
= CMD_SID(&cmd
);
1344 SMMUDevice
*sdev
= smmu_find_sdev(bs
, sid
);
1346 if (CMD_SSEC(&cmd
)) {
1347 cmd_error
= SMMU_CERROR_ILL
;
1355 trace_smmuv3_cmdq_cfgi_ste(sid
);
1356 smmuv3_flush_config(sdev
);
1360 case SMMU_CMD_CFGI_STE_RANGE
: /* same as SMMU_CMD_CFGI_ALL */
1362 uint32_t sid
= CMD_SID(&cmd
), mask
;
1363 uint8_t range
= CMD_STE_RANGE(&cmd
);
1364 SMMUSIDRange sid_range
;
1366 if (CMD_SSEC(&cmd
)) {
1367 cmd_error
= SMMU_CERROR_ILL
;
1371 mask
= (1ULL << (range
+ 1)) - 1;
1372 sid_range
.start
= sid
& ~mask
;
1373 sid_range
.end
= sid_range
.start
+ mask
;
1375 trace_smmuv3_cmdq_cfgi_ste_range(sid_range
.start
, sid_range
.end
);
1376 g_hash_table_foreach_remove(bs
->configs
, smmuv3_invalidate_ste
,
1380 case SMMU_CMD_CFGI_CD
:
1381 case SMMU_CMD_CFGI_CD_ALL
:
1383 uint32_t sid
= CMD_SID(&cmd
);
1384 SMMUDevice
*sdev
= smmu_find_sdev(bs
, sid
);
1386 if (CMD_SSEC(&cmd
)) {
1387 cmd_error
= SMMU_CERROR_ILL
;
1395 trace_smmuv3_cmdq_cfgi_cd(sid
);
1396 smmuv3_flush_config(sdev
);
1399 case SMMU_CMD_TLBI_NH_ASID
:
1401 int asid
= CMD_ASID(&cmd
);
1404 if (!STAGE1_SUPPORTED(s
)) {
1405 cmd_error
= SMMU_CERROR_ILL
;
1410 * VMID is only matched when stage 2 is supported, otherwise set it
1411 * to -1 as the value used for stage-1 only VMIDs.
1413 if (STAGE2_SUPPORTED(s
)) {
1414 vmid
= CMD_VMID(&cmd
);
1417 trace_smmuv3_cmdq_tlbi_nh_asid(asid
);
1418 smmu_inv_notifiers_all(&s
->smmu_state
);
1419 smmu_iotlb_inv_asid_vmid(bs
, asid
, vmid
);
1422 case SMMU_CMD_TLBI_NH_ALL
:
1426 if (!STAGE1_SUPPORTED(s
)) {
1427 cmd_error
= SMMU_CERROR_ILL
;
1432 * If stage-2 is supported, invalidate for this VMID only, otherwise
1433 * invalidate the whole thing.
1435 if (STAGE2_SUPPORTED(s
)) {
1436 vmid
= CMD_VMID(&cmd
);
1437 trace_smmuv3_cmdq_tlbi_nh(vmid
);
1438 smmu_iotlb_inv_vmid_s1(bs
, vmid
);
1443 case SMMU_CMD_TLBI_NSNH_ALL
:
1444 trace_smmuv3_cmdq_tlbi_nsnh();
1445 smmu_inv_notifiers_all(&s
->smmu_state
);
1446 smmu_iotlb_inv_all(bs
);
1448 case SMMU_CMD_TLBI_NH_VAA
:
1449 case SMMU_CMD_TLBI_NH_VA
:
1450 if (!STAGE1_SUPPORTED(s
)) {
1451 cmd_error
= SMMU_CERROR_ILL
;
1454 smmuv3_range_inval(bs
, &cmd
, SMMU_STAGE_1
);
1456 case SMMU_CMD_TLBI_S12_VMALL
:
1458 int vmid
= CMD_VMID(&cmd
);
1460 if (!STAGE2_SUPPORTED(s
)) {
1461 cmd_error
= SMMU_CERROR_ILL
;
1465 trace_smmuv3_cmdq_tlbi_s12_vmid(vmid
);
1466 smmu_inv_notifiers_all(&s
->smmu_state
);
1467 smmu_iotlb_inv_vmid(bs
, vmid
);
1470 case SMMU_CMD_TLBI_S2_IPA
:
1471 if (!STAGE2_SUPPORTED(s
)) {
1472 cmd_error
= SMMU_CERROR_ILL
;
1476 * As currently only either s1 or s2 are supported
1477 * we can reuse same function for s2.
1479 smmuv3_range_inval(bs
, &cmd
, SMMU_STAGE_2
);
1481 case SMMU_CMD_TLBI_EL3_ALL
:
1482 case SMMU_CMD_TLBI_EL3_VA
:
1483 case SMMU_CMD_TLBI_EL2_ALL
:
1484 case SMMU_CMD_TLBI_EL2_ASID
:
1485 case SMMU_CMD_TLBI_EL2_VA
:
1486 case SMMU_CMD_TLBI_EL2_VAA
:
1487 case SMMU_CMD_ATC_INV
:
1488 case SMMU_CMD_PRI_RESP
:
1489 case SMMU_CMD_RESUME
:
1490 case SMMU_CMD_STALL_TERM
:
1491 trace_smmuv3_unhandled_cmd(type
);
1494 cmd_error
= SMMU_CERROR_ILL
;
1497 qemu_mutex_unlock(&s
->mutex
);
1499 if (cmd_error
== SMMU_CERROR_ILL
) {
1500 qemu_log_mask(LOG_GUEST_ERROR
,
1501 "Illegal command type: %d\n", CMD_TYPE(&cmd
));
1506 * We only increment the cons index after the completion of
1507 * the command. We do that because the SYNC returns immediately
1508 * and does not check the completion of previous commands
1514 trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type
), cmd_error
);
1515 smmu_write_cmdq_err(s
, cmd_error
);
1516 smmuv3_trigger_irq(s
, SMMU_IRQ_GERROR
, R_GERROR_CMDQ_ERR_MASK
);
1519 trace_smmuv3_cmdq_consume_out(Q_PROD(q
), Q_CONS(q
),
1520 Q_PROD_WRAP(q
), Q_CONS_WRAP(q
));
1525 static MemTxResult
smmu_writell(SMMUv3State
*s
, hwaddr offset
,
1526 uint64_t data
, MemTxAttrs attrs
)
1529 case A_GERROR_IRQ_CFG0
:
1530 s
->gerror_irq_cfg0
= data
;
1533 s
->strtab_base
= data
;
1536 s
->cmdq
.base
= data
;
1537 s
->cmdq
.log2size
= extract64(s
->cmdq
.base
, 0, 5);
1538 if (s
->cmdq
.log2size
> SMMU_CMDQS
) {
1539 s
->cmdq
.log2size
= SMMU_CMDQS
;
1543 s
->eventq
.base
= data
;
1544 s
->eventq
.log2size
= extract64(s
->eventq
.base
, 0, 5);
1545 if (s
->eventq
.log2size
> SMMU_EVENTQS
) {
1546 s
->eventq
.log2size
= SMMU_EVENTQS
;
1549 case A_EVENTQ_IRQ_CFG0
:
1550 s
->eventq_irq_cfg0
= data
;
1553 qemu_log_mask(LOG_UNIMP
,
1554 "%s Unexpected 64-bit access to 0x%"PRIx64
" (WI)\n",
1560 static MemTxResult
smmu_writel(SMMUv3State
*s
, hwaddr offset
,
1561 uint64_t data
, MemTxAttrs attrs
)
1566 s
->cr0ack
= data
& ~SMMU_CR0_RESERVED
;
1567 /* in case the command queue has been enabled */
1568 smmuv3_cmdq_consume(s
);
1580 smmuv3_write_gerrorn(s
, data
);
1582 * By acknowledging the CMDQ_ERR, SW may notify cmds can
1583 * be processed again
1585 smmuv3_cmdq_consume(s
);
1587 case A_GERROR_IRQ_CFG0
: /* 64b */
1588 s
->gerror_irq_cfg0
= deposit64(s
->gerror_irq_cfg0
, 0, 32, data
);
1590 case A_GERROR_IRQ_CFG0
+ 4:
1591 s
->gerror_irq_cfg0
= deposit64(s
->gerror_irq_cfg0
, 32, 32, data
);
1593 case A_GERROR_IRQ_CFG1
:
1594 s
->gerror_irq_cfg1
= data
;
1596 case A_GERROR_IRQ_CFG2
:
1597 s
->gerror_irq_cfg2
= data
;
1601 * If UPDATE is not set, the write is ignored. This is the only
1602 * permitted behavior in SMMUv3.2 and later.
1604 if (data
& R_GBPA_UPDATE_MASK
) {
1605 /* Ignore update bit as write is synchronous. */
1606 s
->gbpa
= data
& ~R_GBPA_UPDATE_MASK
;
1609 case A_STRTAB_BASE
: /* 64b */
1610 s
->strtab_base
= deposit64(s
->strtab_base
, 0, 32, data
);
1612 case A_STRTAB_BASE
+ 4:
1613 s
->strtab_base
= deposit64(s
->strtab_base
, 32, 32, data
);
1615 case A_STRTAB_BASE_CFG
:
1616 s
->strtab_base_cfg
= data
;
1617 if (FIELD_EX32(data
, STRTAB_BASE_CFG
, FMT
) == 1) {
1618 s
->sid_split
= FIELD_EX32(data
, STRTAB_BASE_CFG
, SPLIT
);
1619 s
->features
|= SMMU_FEATURE_2LVL_STE
;
1622 case A_CMDQ_BASE
: /* 64b */
1623 s
->cmdq
.base
= deposit64(s
->cmdq
.base
, 0, 32, data
);
1624 s
->cmdq
.log2size
= extract64(s
->cmdq
.base
, 0, 5);
1625 if (s
->cmdq
.log2size
> SMMU_CMDQS
) {
1626 s
->cmdq
.log2size
= SMMU_CMDQS
;
1629 case A_CMDQ_BASE
+ 4: /* 64b */
1630 s
->cmdq
.base
= deposit64(s
->cmdq
.base
, 32, 32, data
);
1633 s
->cmdq
.prod
= data
;
1634 smmuv3_cmdq_consume(s
);
1637 s
->cmdq
.cons
= data
;
1639 case A_EVENTQ_BASE
: /* 64b */
1640 s
->eventq
.base
= deposit64(s
->eventq
.base
, 0, 32, data
);
1641 s
->eventq
.log2size
= extract64(s
->eventq
.base
, 0, 5);
1642 if (s
->eventq
.log2size
> SMMU_EVENTQS
) {
1643 s
->eventq
.log2size
= SMMU_EVENTQS
;
1646 case A_EVENTQ_BASE
+ 4:
1647 s
->eventq
.base
= deposit64(s
->eventq
.base
, 32, 32, data
);
1650 s
->eventq
.prod
= data
;
1653 s
->eventq
.cons
= data
;
1655 case A_EVENTQ_IRQ_CFG0
: /* 64b */
1656 s
->eventq_irq_cfg0
= deposit64(s
->eventq_irq_cfg0
, 0, 32, data
);
1658 case A_EVENTQ_IRQ_CFG0
+ 4:
1659 s
->eventq_irq_cfg0
= deposit64(s
->eventq_irq_cfg0
, 32, 32, data
);
1661 case A_EVENTQ_IRQ_CFG1
:
1662 s
->eventq_irq_cfg1
= data
;
1664 case A_EVENTQ_IRQ_CFG2
:
1665 s
->eventq_irq_cfg2
= data
;
1668 qemu_log_mask(LOG_UNIMP
,
1669 "%s Unexpected 32-bit access to 0x%"PRIx64
" (WI)\n",
1675 static MemTxResult
smmu_write_mmio(void *opaque
, hwaddr offset
, uint64_t data
,
1676 unsigned size
, MemTxAttrs attrs
)
1678 SMMUState
*sys
= opaque
;
1679 SMMUv3State
*s
= ARM_SMMUV3(sys
);
1682 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1687 r
= smmu_writell(s
, offset
, data
, attrs
);
1690 r
= smmu_writel(s
, offset
, data
, attrs
);
1697 trace_smmuv3_write_mmio(offset
, data
, size
, r
);
1701 static MemTxResult
smmu_readll(SMMUv3State
*s
, hwaddr offset
,
1702 uint64_t *data
, MemTxAttrs attrs
)
1705 case A_GERROR_IRQ_CFG0
:
1706 *data
= s
->gerror_irq_cfg0
;
1709 *data
= s
->strtab_base
;
1712 *data
= s
->cmdq
.base
;
1715 *data
= s
->eventq
.base
;
1719 qemu_log_mask(LOG_UNIMP
,
1720 "%s Unexpected 64-bit access to 0x%"PRIx64
" (RAZ)\n",
1726 static MemTxResult
smmu_readl(SMMUv3State
*s
, hwaddr offset
,
1727 uint64_t *data
, MemTxAttrs attrs
)
1730 case A_IDREGS
... A_IDREGS
+ 0x2f:
1731 *data
= smmuv3_idreg(offset
- A_IDREGS
);
1733 case A_IDR0
... A_IDR5
:
1734 *data
= s
->idr
[(offset
- A_IDR0
) / 4];
1761 case A_IRQ_CTRL_ACK
:
1762 *data
= s
->irq_ctrl
;
1770 case A_GERROR_IRQ_CFG0
: /* 64b */
1771 *data
= extract64(s
->gerror_irq_cfg0
, 0, 32);
1773 case A_GERROR_IRQ_CFG0
+ 4:
1774 *data
= extract64(s
->gerror_irq_cfg0
, 32, 32);
1776 case A_GERROR_IRQ_CFG1
:
1777 *data
= s
->gerror_irq_cfg1
;
1779 case A_GERROR_IRQ_CFG2
:
1780 *data
= s
->gerror_irq_cfg2
;
1782 case A_STRTAB_BASE
: /* 64b */
1783 *data
= extract64(s
->strtab_base
, 0, 32);
1785 case A_STRTAB_BASE
+ 4: /* 64b */
1786 *data
= extract64(s
->strtab_base
, 32, 32);
1788 case A_STRTAB_BASE_CFG
:
1789 *data
= s
->strtab_base_cfg
;
1791 case A_CMDQ_BASE
: /* 64b */
1792 *data
= extract64(s
->cmdq
.base
, 0, 32);
1794 case A_CMDQ_BASE
+ 4:
1795 *data
= extract64(s
->cmdq
.base
, 32, 32);
1798 *data
= s
->cmdq
.prod
;
1801 *data
= s
->cmdq
.cons
;
1803 case A_EVENTQ_BASE
: /* 64b */
1804 *data
= extract64(s
->eventq
.base
, 0, 32);
1806 case A_EVENTQ_BASE
+ 4: /* 64b */
1807 *data
= extract64(s
->eventq
.base
, 32, 32);
1810 *data
= s
->eventq
.prod
;
1813 *data
= s
->eventq
.cons
;
1817 qemu_log_mask(LOG_UNIMP
,
1818 "%s unhandled 32-bit access at 0x%"PRIx64
" (RAZ)\n",
1824 static MemTxResult
smmu_read_mmio(void *opaque
, hwaddr offset
, uint64_t *data
,
1825 unsigned size
, MemTxAttrs attrs
)
1827 SMMUState
*sys
= opaque
;
1828 SMMUv3State
*s
= ARM_SMMUV3(sys
);
1831 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1836 r
= smmu_readll(s
, offset
, data
, attrs
);
1839 r
= smmu_readl(s
, offset
, data
, attrs
);
1846 trace_smmuv3_read_mmio(offset
, *data
, size
, r
);
1850 static const MemoryRegionOps smmu_mem_ops
= {
1851 .read_with_attrs
= smmu_read_mmio
,
1852 .write_with_attrs
= smmu_write_mmio
,
1853 .endianness
= DEVICE_LITTLE_ENDIAN
,
1855 .min_access_size
= 4,
1856 .max_access_size
= 8,
1859 .min_access_size
= 4,
1860 .max_access_size
= 8,
1864 static void smmu_init_irq(SMMUv3State
*s
, SysBusDevice
*dev
)
1868 for (i
= 0; i
< ARRAY_SIZE(s
->irq
); i
++) {
1869 sysbus_init_irq(dev
, &s
->irq
[i
]);
1873 static void smmu_reset_hold(Object
*obj
, ResetType type
)
1875 SMMUv3State
*s
= ARM_SMMUV3(obj
);
1876 SMMUv3Class
*c
= ARM_SMMUV3_GET_CLASS(s
);
1878 if (c
->parent_phases
.hold
) {
1879 c
->parent_phases
.hold(obj
, type
);
1882 smmuv3_init_regs(s
);
1885 static void smmu_realize(DeviceState
*d
, Error
**errp
)
1887 SMMUState
*sys
= ARM_SMMU(d
);
1888 SMMUv3State
*s
= ARM_SMMUV3(sys
);
1889 SMMUv3Class
*c
= ARM_SMMUV3_GET_CLASS(s
);
1890 SysBusDevice
*dev
= SYS_BUS_DEVICE(d
);
1891 Error
*local_err
= NULL
;
1893 c
->parent_realize(d
, &local_err
);
1895 error_propagate(errp
, local_err
);
1899 qemu_mutex_init(&s
->mutex
);
1901 memory_region_init_io(&sys
->iomem
, OBJECT(s
),
1902 &smmu_mem_ops
, sys
, TYPE_ARM_SMMUV3
, 0x20000);
1904 sys
->mrtypename
= TYPE_SMMUV3_IOMMU_MEMORY_REGION
;
1906 sysbus_init_mmio(dev
, &sys
->iomem
);
1908 smmu_init_irq(s
, dev
);
1911 static const VMStateDescription vmstate_smmuv3_queue
= {
1912 .name
= "smmuv3_queue",
1914 .minimum_version_id
= 1,
1915 .fields
= (const VMStateField
[]) {
1916 VMSTATE_UINT64(base
, SMMUQueue
),
1917 VMSTATE_UINT32(prod
, SMMUQueue
),
1918 VMSTATE_UINT32(cons
, SMMUQueue
),
1919 VMSTATE_UINT8(log2size
, SMMUQueue
),
1920 VMSTATE_END_OF_LIST(),
1924 static bool smmuv3_gbpa_needed(void *opaque
)
1926 SMMUv3State
*s
= opaque
;
1928 /* Only migrate GBPA if it has different reset value. */
1929 return s
->gbpa
!= SMMU_GBPA_RESET_VAL
;
1932 static const VMStateDescription vmstate_gbpa
= {
1933 .name
= "smmuv3/gbpa",
1935 .minimum_version_id
= 1,
1936 .needed
= smmuv3_gbpa_needed
,
1937 .fields
= (const VMStateField
[]) {
1938 VMSTATE_UINT32(gbpa
, SMMUv3State
),
1939 VMSTATE_END_OF_LIST()
1943 static const VMStateDescription vmstate_smmuv3
= {
1946 .minimum_version_id
= 1,
1947 .priority
= MIG_PRI_IOMMU
,
1948 .fields
= (const VMStateField
[]) {
1949 VMSTATE_UINT32(features
, SMMUv3State
),
1950 VMSTATE_UINT8(sid_size
, SMMUv3State
),
1951 VMSTATE_UINT8(sid_split
, SMMUv3State
),
1953 VMSTATE_UINT32_ARRAY(cr
, SMMUv3State
, 3),
1954 VMSTATE_UINT32(cr0ack
, SMMUv3State
),
1955 VMSTATE_UINT32(statusr
, SMMUv3State
),
1956 VMSTATE_UINT32(irq_ctrl
, SMMUv3State
),
1957 VMSTATE_UINT32(gerror
, SMMUv3State
),
1958 VMSTATE_UINT32(gerrorn
, SMMUv3State
),
1959 VMSTATE_UINT64(gerror_irq_cfg0
, SMMUv3State
),
1960 VMSTATE_UINT32(gerror_irq_cfg1
, SMMUv3State
),
1961 VMSTATE_UINT32(gerror_irq_cfg2
, SMMUv3State
),
1962 VMSTATE_UINT64(strtab_base
, SMMUv3State
),
1963 VMSTATE_UINT32(strtab_base_cfg
, SMMUv3State
),
1964 VMSTATE_UINT64(eventq_irq_cfg0
, SMMUv3State
),
1965 VMSTATE_UINT32(eventq_irq_cfg1
, SMMUv3State
),
1966 VMSTATE_UINT32(eventq_irq_cfg2
, SMMUv3State
),
1968 VMSTATE_STRUCT(cmdq
, SMMUv3State
, 0, vmstate_smmuv3_queue
, SMMUQueue
),
1969 VMSTATE_STRUCT(eventq
, SMMUv3State
, 0, vmstate_smmuv3_queue
, SMMUQueue
),
1971 VMSTATE_END_OF_LIST(),
1973 .subsections
= (const VMStateDescription
* const []) {
1979 static Property smmuv3_properties
[] = {
1981 * Stages of translation advertised.
1984 * Defaults to stage 1
1986 DEFINE_PROP_STRING("stage", SMMUv3State
, stage
),
1987 DEFINE_PROP_END_OF_LIST()
1990 static void smmuv3_instance_init(Object
*obj
)
1992 /* Nothing much to do here as of now */
1995 static void smmuv3_class_init(ObjectClass
*klass
, void *data
)
1997 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1998 ResettableClass
*rc
= RESETTABLE_CLASS(klass
);
1999 SMMUv3Class
*c
= ARM_SMMUV3_CLASS(klass
);
2001 dc
->vmsd
= &vmstate_smmuv3
;
2002 resettable_class_set_parent_phases(rc
, NULL
, smmu_reset_hold
, NULL
,
2004 device_class_set_parent_realize(dc
, smmu_realize
,
2005 &c
->parent_realize
);
2006 device_class_set_props(dc
, smmuv3_properties
);
2009 static int smmuv3_notify_flag_changed(IOMMUMemoryRegion
*iommu
,
2010 IOMMUNotifierFlag old
,
2011 IOMMUNotifierFlag
new,
2014 SMMUDevice
*sdev
= container_of(iommu
, SMMUDevice
, iommu
);
2015 SMMUv3State
*s3
= sdev
->smmu
;
2016 SMMUState
*s
= &(s3
->smmu_state
);
2018 if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP
) {
2019 error_setg(errp
, "SMMUv3 does not support dev-iotlb yet");
2023 if (new & IOMMU_NOTIFIER_MAP
) {
2025 "device %02x.%02x.%x requires iommu MAP notifier which is "
2026 "not currently supported", pci_bus_num(sdev
->bus
),
2027 PCI_SLOT(sdev
->devfn
), PCI_FUNC(sdev
->devfn
));
2031 if (old
== IOMMU_NOTIFIER_NONE
) {
2032 trace_smmuv3_notify_flag_add(iommu
->parent_obj
.name
);
2033 QLIST_INSERT_HEAD(&s
->devices_with_notifiers
, sdev
, next
);
2034 } else if (new == IOMMU_NOTIFIER_NONE
) {
2035 trace_smmuv3_notify_flag_del(iommu
->parent_obj
.name
);
2036 QLIST_REMOVE(sdev
, next
);
2041 static void smmuv3_iommu_memory_region_class_init(ObjectClass
*klass
,
2044 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_CLASS(klass
);
2046 imrc
->translate
= smmuv3_translate
;
2047 imrc
->notify_flag_changed
= smmuv3_notify_flag_changed
;
2050 static const TypeInfo smmuv3_type_info
= {
2051 .name
= TYPE_ARM_SMMUV3
,
2052 .parent
= TYPE_ARM_SMMU
,
2053 .instance_size
= sizeof(SMMUv3State
),
2054 .instance_init
= smmuv3_instance_init
,
2055 .class_size
= sizeof(SMMUv3Class
),
2056 .class_init
= smmuv3_class_init
,
2059 static const TypeInfo smmuv3_iommu_memory_region_info
= {
2060 .parent
= TYPE_IOMMU_MEMORY_REGION
,
2061 .name
= TYPE_SMMUV3_IOMMU_MEMORY_REGION
,
2062 .class_init
= smmuv3_iommu_memory_region_class_init
,
2065 static void smmuv3_register_types(void)
2067 type_register(&smmuv3_type_info
);
2068 type_register(&smmuv3_iommu_memory_region_info
);
2071 type_init(smmuv3_register_types
)