2 * Copyright (C) 2014-2016 Broadcom Corporation
3 * Copyright (c) 2017 Red Hat, Inc.
4 * Written by Prem Mallappa, Eric Auger
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
22 #include "hw/sysbus.h"
23 #include "migration/vmstate.h"
24 #include "hw/qdev-properties.h"
25 #include "hw/qdev-core.h"
26 #include "hw/pci/pci.h"
30 #include "qemu/error-report.h"
31 #include "qapi/error.h"
33 #include "hw/arm/smmuv3.h"
34 #include "smmuv3-internal.h"
35 #include "smmu-internal.h"
37 #define PTW_RECORD_FAULT(cfg) (((cfg)->stage == 1) ? (cfg)->record_faults : \
38 (cfg)->s2cfg.record_faults)
41 * smmuv3_trigger_irq - pulse @irq if enabled and update
42 * GERROR register in case of GERROR interrupt
45 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
47 static void smmuv3_trigger_irq(SMMUv3State
*s
, SMMUIrq irq
,
55 pulse
= smmuv3_eventq_irq_enabled(s
);
58 qemu_log_mask(LOG_UNIMP
, "PRI not yet supported\n");
60 case SMMU_IRQ_CMD_SYNC
:
65 uint32_t pending
= s
->gerror
^ s
->gerrorn
;
66 uint32_t new_gerrors
= ~pending
& gerror_mask
;
69 /* only toggle non pending errors */
72 s
->gerror
^= new_gerrors
;
73 trace_smmuv3_write_gerror(new_gerrors
, s
->gerror
);
75 pulse
= smmuv3_gerror_irq_enabled(s
);
80 trace_smmuv3_trigger_irq(irq
);
81 qemu_irq_pulse(s
->irq
[irq
]);
85 static void smmuv3_write_gerrorn(SMMUv3State
*s
, uint32_t new_gerrorn
)
87 uint32_t pending
= s
->gerror
^ s
->gerrorn
;
88 uint32_t toggled
= s
->gerrorn
^ new_gerrorn
;
90 if (toggled
& ~pending
) {
91 qemu_log_mask(LOG_GUEST_ERROR
,
92 "guest toggles non pending errors = 0x%x\n",
97 * We do not raise any error in case guest toggles bits corresponding
98 * to not active IRQs (CONSTRAINED UNPREDICTABLE)
100 s
->gerrorn
= new_gerrorn
;
102 trace_smmuv3_write_gerrorn(toggled
& pending
, s
->gerrorn
);
105 static inline MemTxResult
queue_read(SMMUQueue
*q
, void *data
)
107 dma_addr_t addr
= Q_CONS_ENTRY(q
);
109 return dma_memory_read(&address_space_memory
, addr
, data
, q
->entry_size
,
110 MEMTXATTRS_UNSPECIFIED
);
113 static MemTxResult
queue_write(SMMUQueue
*q
, void *data
)
115 dma_addr_t addr
= Q_PROD_ENTRY(q
);
118 ret
= dma_memory_write(&address_space_memory
, addr
, data
, q
->entry_size
,
119 MEMTXATTRS_UNSPECIFIED
);
120 if (ret
!= MEMTX_OK
) {
128 static MemTxResult
smmuv3_write_eventq(SMMUv3State
*s
, Evt
*evt
)
130 SMMUQueue
*q
= &s
->eventq
;
133 if (!smmuv3_eventq_enabled(s
)) {
137 if (smmuv3_q_full(q
)) {
141 r
= queue_write(q
, evt
);
146 if (!smmuv3_q_empty(q
)) {
147 smmuv3_trigger_irq(s
, SMMU_IRQ_EVTQ
, 0);
152 void smmuv3_record_event(SMMUv3State
*s
, SMMUEventInfo
*info
)
157 if (!smmuv3_eventq_enabled(s
)) {
161 EVT_SET_TYPE(&evt
, info
->type
);
162 EVT_SET_SID(&evt
, info
->sid
);
164 switch (info
->type
) {
168 EVT_SET_SSID(&evt
, info
->u
.f_uut
.ssid
);
169 EVT_SET_SSV(&evt
, info
->u
.f_uut
.ssv
);
170 EVT_SET_ADDR(&evt
, info
->u
.f_uut
.addr
);
171 EVT_SET_RNW(&evt
, info
->u
.f_uut
.rnw
);
172 EVT_SET_PNU(&evt
, info
->u
.f_uut
.pnu
);
173 EVT_SET_IND(&evt
, info
->u
.f_uut
.ind
);
175 case SMMU_EVT_C_BAD_STREAMID
:
176 EVT_SET_SSID(&evt
, info
->u
.c_bad_streamid
.ssid
);
177 EVT_SET_SSV(&evt
, info
->u
.c_bad_streamid
.ssv
);
179 case SMMU_EVT_F_STE_FETCH
:
180 EVT_SET_SSID(&evt
, info
->u
.f_ste_fetch
.ssid
);
181 EVT_SET_SSV(&evt
, info
->u
.f_ste_fetch
.ssv
);
182 EVT_SET_ADDR2(&evt
, info
->u
.f_ste_fetch
.addr
);
184 case SMMU_EVT_C_BAD_STE
:
185 EVT_SET_SSID(&evt
, info
->u
.c_bad_ste
.ssid
);
186 EVT_SET_SSV(&evt
, info
->u
.c_bad_ste
.ssv
);
188 case SMMU_EVT_F_STREAM_DISABLED
:
190 case SMMU_EVT_F_TRANS_FORBIDDEN
:
191 EVT_SET_ADDR(&evt
, info
->u
.f_transl_forbidden
.addr
);
192 EVT_SET_RNW(&evt
, info
->u
.f_transl_forbidden
.rnw
);
194 case SMMU_EVT_C_BAD_SUBSTREAMID
:
195 EVT_SET_SSID(&evt
, info
->u
.c_bad_substream
.ssid
);
197 case SMMU_EVT_F_CD_FETCH
:
198 EVT_SET_SSID(&evt
, info
->u
.f_cd_fetch
.ssid
);
199 EVT_SET_SSV(&evt
, info
->u
.f_cd_fetch
.ssv
);
200 EVT_SET_ADDR(&evt
, info
->u
.f_cd_fetch
.addr
);
202 case SMMU_EVT_C_BAD_CD
:
203 EVT_SET_SSID(&evt
, info
->u
.c_bad_cd
.ssid
);
204 EVT_SET_SSV(&evt
, info
->u
.c_bad_cd
.ssv
);
206 case SMMU_EVT_F_WALK_EABT
:
207 case SMMU_EVT_F_TRANSLATION
:
208 case SMMU_EVT_F_ADDR_SIZE
:
209 case SMMU_EVT_F_ACCESS
:
210 case SMMU_EVT_F_PERMISSION
:
211 EVT_SET_STALL(&evt
, info
->u
.f_walk_eabt
.stall
);
212 EVT_SET_STAG(&evt
, info
->u
.f_walk_eabt
.stag
);
213 EVT_SET_SSID(&evt
, info
->u
.f_walk_eabt
.ssid
);
214 EVT_SET_SSV(&evt
, info
->u
.f_walk_eabt
.ssv
);
215 EVT_SET_S2(&evt
, info
->u
.f_walk_eabt
.s2
);
216 EVT_SET_ADDR(&evt
, info
->u
.f_walk_eabt
.addr
);
217 EVT_SET_RNW(&evt
, info
->u
.f_walk_eabt
.rnw
);
218 EVT_SET_PNU(&evt
, info
->u
.f_walk_eabt
.pnu
);
219 EVT_SET_IND(&evt
, info
->u
.f_walk_eabt
.ind
);
220 EVT_SET_CLASS(&evt
, info
->u
.f_walk_eabt
.class);
221 EVT_SET_ADDR2(&evt
, info
->u
.f_walk_eabt
.addr2
);
223 case SMMU_EVT_F_CFG_CONFLICT
:
224 EVT_SET_SSID(&evt
, info
->u
.f_cfg_conflict
.ssid
);
225 EVT_SET_SSV(&evt
, info
->u
.f_cfg_conflict
.ssv
);
227 /* rest is not implemented */
228 case SMMU_EVT_F_BAD_ATS_TREQ
:
229 case SMMU_EVT_F_TLB_CONFLICT
:
230 case SMMU_EVT_E_PAGE_REQ
:
232 g_assert_not_reached();
235 trace_smmuv3_record_event(smmu_event_string(info
->type
), info
->sid
);
236 r
= smmuv3_write_eventq(s
, &evt
);
238 smmuv3_trigger_irq(s
, SMMU_IRQ_GERROR
, R_GERROR_EVENTQ_ABT_ERR_MASK
);
240 info
->recorded
= true;
243 static void smmuv3_init_regs(SMMUv3State
*s
)
245 /* Based on sys property, the stages supported in smmu will be advertised.*/
246 if (s
->stage
&& !strcmp("2", s
->stage
)) {
247 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, S2P
, 1);
249 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, S1P
, 1);
252 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, TTF
, 2); /* AArch64 PTW only */
253 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, COHACC
, 1); /* IO coherent */
254 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, ASID16
, 1); /* 16-bit ASID */
255 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, VMID16
, 1); /* 16-bit VMID */
256 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, TTENDIAN
, 2); /* little endian */
257 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, STALL_MODEL
, 1); /* No stall */
258 /* terminated transaction will always be aborted/error returned */
259 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, TERM_MODEL
, 1);
260 /* 2-level stream table supported */
261 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, STLEVEL
, 1);
263 s
->idr
[1] = FIELD_DP32(s
->idr
[1], IDR1
, SIDSIZE
, SMMU_IDR1_SIDSIZE
);
264 s
->idr
[1] = FIELD_DP32(s
->idr
[1], IDR1
, EVENTQS
, SMMU_EVENTQS
);
265 s
->idr
[1] = FIELD_DP32(s
->idr
[1], IDR1
, CMDQS
, SMMU_CMDQS
);
267 s
->idr
[3] = FIELD_DP32(s
->idr
[3], IDR3
, RIL
, 1);
268 s
->idr
[3] = FIELD_DP32(s
->idr
[3], IDR3
, HAD
, 1);
269 s
->idr
[3] = FIELD_DP32(s
->idr
[3], IDR3
, BBML
, 2);
271 /* 4K, 16K and 64K granule support */
272 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, GRAN4K
, 1);
273 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, GRAN16K
, 1);
274 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, GRAN64K
, 1);
275 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, OAS
, SMMU_IDR5_OAS
); /* 44 bits */
277 s
->cmdq
.base
= deposit64(s
->cmdq
.base
, 0, 5, SMMU_CMDQS
);
280 s
->cmdq
.entry_size
= sizeof(struct Cmd
);
281 s
->eventq
.base
= deposit64(s
->eventq
.base
, 0, 5, SMMU_EVENTQS
);
284 s
->eventq
.entry_size
= sizeof(struct Evt
);
295 s
->gbpa
= SMMU_GBPA_RESET_VAL
;
298 static int smmu_get_ste(SMMUv3State
*s
, dma_addr_t addr
, STE
*buf
,
299 SMMUEventInfo
*event
)
303 trace_smmuv3_get_ste(addr
);
304 /* TODO: guarantee 64-bit single-copy atomicity */
305 ret
= dma_memory_read(&address_space_memory
, addr
, buf
, sizeof(*buf
),
306 MEMTXATTRS_UNSPECIFIED
);
307 if (ret
!= MEMTX_OK
) {
308 qemu_log_mask(LOG_GUEST_ERROR
,
309 "Cannot fetch pte at address=0x%"PRIx64
"\n", addr
);
310 event
->type
= SMMU_EVT_F_STE_FETCH
;
311 event
->u
.f_ste_fetch
.addr
= addr
;
318 /* @ssid > 0 not supported yet */
319 static int smmu_get_cd(SMMUv3State
*s
, STE
*ste
, uint32_t ssid
,
320 CD
*buf
, SMMUEventInfo
*event
)
322 dma_addr_t addr
= STE_CTXPTR(ste
);
325 trace_smmuv3_get_cd(addr
);
326 /* TODO: guarantee 64-bit single-copy atomicity */
327 ret
= dma_memory_read(&address_space_memory
, addr
, buf
, sizeof(*buf
),
328 MEMTXATTRS_UNSPECIFIED
);
329 if (ret
!= MEMTX_OK
) {
330 qemu_log_mask(LOG_GUEST_ERROR
,
331 "Cannot fetch pte at address=0x%"PRIx64
"\n", addr
);
332 event
->type
= SMMU_EVT_F_CD_FETCH
;
333 event
->u
.f_ste_fetch
.addr
= addr
;
340 * Max valid value is 39 when SMMU_IDR3.STT == 0.
341 * In architectures after SMMUv3.0:
342 * - If STE.S2TG selects a 4KB or 16KB granule, the minimum valid value for this
343 * field is MAX(16, 64-IAS)
344 * - If STE.S2TG selects a 64KB granule, the minimum valid value for this field
346 * As we only support AA64, IAS = OAS.
348 static bool s2t0sz_valid(SMMUTransCfg
*cfg
)
350 if (cfg
->s2cfg
.tsz
> 39) {
354 if (cfg
->s2cfg
.granule_sz
== 16) {
355 return (cfg
->s2cfg
.tsz
>= 64 - oas2bits(SMMU_IDR5_OAS
));
358 return (cfg
->s2cfg
.tsz
>= MAX(64 - oas2bits(SMMU_IDR5_OAS
), 16));
362 * Return true if s2 page table config is valid.
363 * This checks with the configured start level, ias_bits and granularity we can
364 * have a valid page table as described in ARM ARM D8.2 Translation process.
365 * The idea here is to see for the highest possible number of IPA bits, how
366 * many concatenated tables we would need, if it is more than 16, then this is
369 static bool s2_pgtable_config_valid(uint8_t sl0
, uint8_t t0sz
, uint8_t gran
)
371 int level
= get_start_level(sl0
, gran
);
372 uint64_t ipa_bits
= 64 - t0sz
;
373 uint64_t max_ipa
= (1ULL << ipa_bits
) - 1;
374 int nr_concat
= pgd_concat_idx(level
, gran
, max_ipa
) + 1;
376 return nr_concat
<= VMSA_MAX_S2_CONCAT
;
379 static int decode_ste_s2_cfg(SMMUTransCfg
*cfg
, STE
*ste
)
383 if (STE_S2AA64(ste
) == 0x0) {
384 qemu_log_mask(LOG_UNIMP
,
385 "SMMUv3 AArch32 tables not supported\n");
386 g_assert_not_reached();
389 switch (STE_S2TG(ste
)) {
391 cfg
->s2cfg
.granule_sz
= 12;
394 cfg
->s2cfg
.granule_sz
= 16;
397 cfg
->s2cfg
.granule_sz
= 14;
400 qemu_log_mask(LOG_GUEST_ERROR
,
401 "SMMUv3 bad STE S2TG: %x\n", STE_S2TG(ste
));
405 cfg
->s2cfg
.vttb
= STE_S2TTB(ste
);
407 cfg
->s2cfg
.sl0
= STE_S2SL0(ste
);
408 /* FEAT_TTST not supported. */
409 if (cfg
->s2cfg
.sl0
== 0x3) {
410 qemu_log_mask(LOG_UNIMP
, "SMMUv3 S2SL0 = 0x3 has no meaning!\n");
414 /* For AA64, The effective S2PS size is capped to the OAS. */
415 cfg
->s2cfg
.eff_ps
= oas2bits(MIN(STE_S2PS(ste
), SMMU_IDR5_OAS
));
417 * It is ILLEGAL for the address in S2TTB to be outside the range
418 * described by the effective S2PS value.
420 if (cfg
->s2cfg
.vttb
& ~(MAKE_64BIT_MASK(0, cfg
->s2cfg
.eff_ps
))) {
421 qemu_log_mask(LOG_GUEST_ERROR
,
422 "SMMUv3 S2TTB too large 0x%" PRIx64
423 ", effective PS %d bits\n",
424 cfg
->s2cfg
.vttb
, cfg
->s2cfg
.eff_ps
);
428 cfg
->s2cfg
.tsz
= STE_S2T0SZ(ste
);
430 if (!s2t0sz_valid(cfg
)) {
431 qemu_log_mask(LOG_GUEST_ERROR
, "SMMUv3 bad STE S2T0SZ = %d\n",
436 if (!s2_pgtable_config_valid(cfg
->s2cfg
.sl0
, cfg
->s2cfg
.tsz
,
437 cfg
->s2cfg
.granule_sz
)) {
438 qemu_log_mask(LOG_GUEST_ERROR
,
439 "SMMUv3 STE stage 2 config not valid!\n");
443 /* Only LE supported(IDR0.TTENDIAN). */
444 if (STE_S2ENDI(ste
)) {
445 qemu_log_mask(LOG_GUEST_ERROR
,
446 "SMMUv3 STE_S2ENDI only supports LE!\n");
450 cfg
->s2cfg
.affd
= STE_S2AFFD(ste
);
452 cfg
->s2cfg
.record_faults
= STE_S2R(ste
);
453 /* As stall is not supported. */
455 qemu_log_mask(LOG_UNIMP
, "SMMUv3 Stall not implemented!\n");
465 /* Returns < 0 in case of invalid STE, 0 otherwise */
466 static int decode_ste(SMMUv3State
*s
, SMMUTransCfg
*cfg
,
467 STE
*ste
, SMMUEventInfo
*event
)
472 if (!STE_VALID(ste
)) {
473 if (!event
->inval_ste_allowed
) {
474 qemu_log_mask(LOG_GUEST_ERROR
, "invalid STE\n");
479 config
= STE_CONFIG(ste
);
481 if (STE_CFG_ABORT(config
)) {
486 if (STE_CFG_BYPASS(config
)) {
487 cfg
->bypassed
= true;
492 * If a stage is enabled in SW while not advertised, throw bad ste
493 * according to user manual(IHI0070E) "5.2 Stream Table Entry".
495 if (!STAGE1_SUPPORTED(s
) && STE_CFG_S1_ENABLED(config
)) {
496 qemu_log_mask(LOG_GUEST_ERROR
, "SMMUv3 S1 used but not supported.\n");
499 if (!STAGE2_SUPPORTED(s
) && STE_CFG_S2_ENABLED(config
)) {
500 qemu_log_mask(LOG_GUEST_ERROR
, "SMMUv3 S2 used but not supported.\n");
504 if (STAGE2_SUPPORTED(s
)) {
505 /* VMID is considered even if s2 is disabled. */
506 cfg
->s2cfg
.vmid
= STE_S2VMID(ste
);
509 cfg
->s2cfg
.vmid
= -1;
512 if (STE_CFG_S2_ENABLED(config
)) {
514 * Stage-1 OAS defaults to OAS even if not enabled as it would be used
515 * in input address check for stage-2.
517 cfg
->oas
= oas2bits(SMMU_IDR5_OAS
);
518 ret
= decode_ste_s2_cfg(cfg
, ste
);
524 if (STE_S1CDMAX(ste
) != 0) {
525 qemu_log_mask(LOG_UNIMP
,
526 "SMMUv3 does not support multiple context descriptors yet\n");
530 if (STE_S1STALLD(ste
)) {
531 qemu_log_mask(LOG_UNIMP
,
532 "SMMUv3 S1 stalling fault model not allowed yet\n");
538 event
->type
= SMMU_EVT_C_BAD_STE
;
543 * smmu_find_ste - Return the stream table entry associated
548 * @ste: returned stream table entry
549 * @event: handle to an event info
551 * Supports linear and 2-level stream table
552 * Return 0 on success, -EINVAL otherwise
554 static int smmu_find_ste(SMMUv3State
*s
, uint32_t sid
, STE
*ste
,
555 SMMUEventInfo
*event
)
557 dma_addr_t addr
, strtab_base
;
559 int strtab_size_shift
;
562 trace_smmuv3_find_ste(sid
, s
->features
, s
->sid_split
);
563 log2size
= FIELD_EX32(s
->strtab_base_cfg
, STRTAB_BASE_CFG
, LOG2SIZE
);
565 * Check SID range against both guest-configured and implementation limits
567 if (sid
>= (1 << MIN(log2size
, SMMU_IDR1_SIDSIZE
))) {
568 event
->type
= SMMU_EVT_C_BAD_STREAMID
;
571 if (s
->features
& SMMU_FEATURE_2LVL_STE
) {
572 int l1_ste_offset
, l2_ste_offset
, max_l2_ste
, span
;
573 dma_addr_t l1ptr
, l2ptr
;
577 * Align strtab base address to table size. For this purpose, assume it
578 * is not bounded by SMMU_IDR1_SIDSIZE.
580 strtab_size_shift
= MAX(5, (int)log2size
- s
->sid_split
- 1 + 3);
581 strtab_base
= s
->strtab_base
& SMMU_BASE_ADDR_MASK
&
582 ~MAKE_64BIT_MASK(0, strtab_size_shift
);
583 l1_ste_offset
= sid
>> s
->sid_split
;
584 l2_ste_offset
= sid
& ((1 << s
->sid_split
) - 1);
585 l1ptr
= (dma_addr_t
)(strtab_base
+ l1_ste_offset
* sizeof(l1std
));
586 /* TODO: guarantee 64-bit single-copy atomicity */
587 ret
= dma_memory_read(&address_space_memory
, l1ptr
, &l1std
,
588 sizeof(l1std
), MEMTXATTRS_UNSPECIFIED
);
589 if (ret
!= MEMTX_OK
) {
590 qemu_log_mask(LOG_GUEST_ERROR
,
591 "Could not read L1PTR at 0X%"PRIx64
"\n", l1ptr
);
592 event
->type
= SMMU_EVT_F_STE_FETCH
;
593 event
->u
.f_ste_fetch
.addr
= l1ptr
;
597 span
= L1STD_SPAN(&l1std
);
600 /* l2ptr is not valid */
601 if (!event
->inval_ste_allowed
) {
602 qemu_log_mask(LOG_GUEST_ERROR
,
603 "invalid sid=%d (L1STD span=0)\n", sid
);
605 event
->type
= SMMU_EVT_C_BAD_STREAMID
;
608 max_l2_ste
= (1 << span
) - 1;
609 l2ptr
= l1std_l2ptr(&l1std
);
610 trace_smmuv3_find_ste_2lvl(s
->strtab_base
, l1ptr
, l1_ste_offset
,
611 l2ptr
, l2_ste_offset
, max_l2_ste
);
612 if (l2_ste_offset
> max_l2_ste
) {
613 qemu_log_mask(LOG_GUEST_ERROR
,
614 "l2_ste_offset=%d > max_l2_ste=%d\n",
615 l2_ste_offset
, max_l2_ste
);
616 event
->type
= SMMU_EVT_C_BAD_STE
;
619 addr
= l2ptr
+ l2_ste_offset
* sizeof(*ste
);
621 strtab_size_shift
= log2size
+ 5;
622 strtab_base
= s
->strtab_base
& SMMU_BASE_ADDR_MASK
&
623 ~MAKE_64BIT_MASK(0, strtab_size_shift
);
624 addr
= strtab_base
+ sid
* sizeof(*ste
);
627 if (smmu_get_ste(s
, addr
, ste
, event
)) {
634 static int decode_cd(SMMUTransCfg
*cfg
, CD
*cd
, SMMUEventInfo
*event
)
639 if (!CD_VALID(cd
) || !CD_AARCH64(cd
)) {
643 goto bad_cd
; /* SMMU_IDR0.TERM_MODEL == 1 */
646 goto bad_cd
; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
648 if (CD_HA(cd
) || CD_HD(cd
)) {
649 goto bad_cd
; /* HTTU = 0 */
652 /* we support only those at the moment */
656 cfg
->oas
= oas2bits(CD_IPS(cd
));
657 cfg
->oas
= MIN(oas2bits(SMMU_IDR5_OAS
), cfg
->oas
);
658 cfg
->tbi
= CD_TBI(cd
);
659 cfg
->asid
= CD_ASID(cd
);
661 trace_smmuv3_decode_cd(cfg
->oas
);
663 /* decode data dependent on TT */
664 for (i
= 0; i
<= 1; i
++) {
666 SMMUTransTableInfo
*tt
= &cfg
->tt
[i
];
668 cfg
->tt
[i
].disabled
= CD_EPD(cd
, i
);
669 if (cfg
->tt
[i
].disabled
) {
674 if (tsz
< 16 || tsz
> 39) {
679 tt
->granule_sz
= tg2granule(tg
, i
);
680 if ((tt
->granule_sz
!= 12 && tt
->granule_sz
!= 14 &&
681 tt
->granule_sz
!= 16) || CD_ENDI(cd
)) {
686 tt
->ttb
= CD_TTB(cd
, i
);
687 if (tt
->ttb
& ~(MAKE_64BIT_MASK(0, cfg
->oas
))) {
690 tt
->had
= CD_HAD(cd
, i
);
691 trace_smmuv3_decode_cd_tt(i
, tt
->tsz
, tt
->ttb
, tt
->granule_sz
, tt
->had
);
694 cfg
->record_faults
= CD_R(cd
);
699 event
->type
= SMMU_EVT_C_BAD_CD
;
704 * smmuv3_decode_config - Prepare the translation configuration
705 * for the @mr iommu region
706 * @mr: iommu memory region the translation config must be prepared for
707 * @cfg: output translation configuration which is populated through
708 * the different configuration decoding steps
709 * @event: must be zero'ed by the caller
711 * return < 0 in case of config decoding error (@event is filled
712 * accordingly). Return 0 otherwise.
714 static int smmuv3_decode_config(IOMMUMemoryRegion
*mr
, SMMUTransCfg
*cfg
,
715 SMMUEventInfo
*event
)
717 SMMUDevice
*sdev
= container_of(mr
, SMMUDevice
, iommu
);
718 uint32_t sid
= smmu_get_sid(sdev
);
719 SMMUv3State
*s
= sdev
->smmu
;
724 /* ASID defaults to -1 (if s1 is not supported). */
727 ret
= smmu_find_ste(s
, sid
, &ste
, event
);
732 ret
= decode_ste(s
, cfg
, &ste
, event
);
737 if (cfg
->aborted
|| cfg
->bypassed
|| (cfg
->stage
== 2)) {
741 ret
= smmu_get_cd(s
, &ste
, 0 /* ssid */, &cd
, event
);
746 return decode_cd(cfg
, &cd
, event
);
750 * smmuv3_get_config - Look up for a cached copy of configuration data for
751 * @sdev and on cache miss performs a configuration structure decoding from
754 * @sdev: SMMUDevice handle
755 * @event: output event info
757 * The configuration cache contains data resulting from both STE and CD
758 * decoding under the form of an SMMUTransCfg struct. The hash table is indexed
759 * by the SMMUDevice handle.
761 static SMMUTransCfg
*smmuv3_get_config(SMMUDevice
*sdev
, SMMUEventInfo
*event
)
763 SMMUv3State
*s
= sdev
->smmu
;
764 SMMUState
*bc
= &s
->smmu_state
;
767 cfg
= g_hash_table_lookup(bc
->configs
, sdev
);
769 sdev
->cfg_cache_hits
++;
770 trace_smmuv3_config_cache_hit(smmu_get_sid(sdev
),
771 sdev
->cfg_cache_hits
, sdev
->cfg_cache_misses
,
772 100 * sdev
->cfg_cache_hits
/
773 (sdev
->cfg_cache_hits
+ sdev
->cfg_cache_misses
));
775 sdev
->cfg_cache_misses
++;
776 trace_smmuv3_config_cache_miss(smmu_get_sid(sdev
),
777 sdev
->cfg_cache_hits
, sdev
->cfg_cache_misses
,
778 100 * sdev
->cfg_cache_hits
/
779 (sdev
->cfg_cache_hits
+ sdev
->cfg_cache_misses
));
780 cfg
= g_new0(SMMUTransCfg
, 1);
782 if (!smmuv3_decode_config(&sdev
->iommu
, cfg
, event
)) {
783 g_hash_table_insert(bc
->configs
, sdev
, cfg
);
792 static void smmuv3_flush_config(SMMUDevice
*sdev
)
794 SMMUv3State
*s
= sdev
->smmu
;
795 SMMUState
*bc
= &s
->smmu_state
;
797 trace_smmuv3_config_cache_inv(smmu_get_sid(sdev
));
798 g_hash_table_remove(bc
->configs
, sdev
);
801 static IOMMUTLBEntry
smmuv3_translate(IOMMUMemoryRegion
*mr
, hwaddr addr
,
802 IOMMUAccessFlags flag
, int iommu_idx
)
804 SMMUDevice
*sdev
= container_of(mr
, SMMUDevice
, iommu
);
805 SMMUv3State
*s
= sdev
->smmu
;
806 uint32_t sid
= smmu_get_sid(sdev
);
807 SMMUEventInfo event
= {.type
= SMMU_EVT_NONE
,
809 .inval_ste_allowed
= false};
810 SMMUPTWEventInfo ptw_info
= {};
811 SMMUTranslationStatus status
;
812 SMMUState
*bs
= ARM_SMMU(s
);
813 uint64_t page_mask
, aligned_addr
;
814 SMMUTLBEntry
*cached_entry
= NULL
;
815 SMMUTransTableInfo
*tt
;
816 SMMUTransCfg
*cfg
= NULL
;
817 IOMMUTLBEntry entry
= {
818 .target_as
= &address_space_memory
,
820 .translated_addr
= addr
,
821 .addr_mask
= ~(hwaddr
)0,
825 * Combined attributes used for TLB lookup, as only one stage is supported,
826 * it will hold attributes based on the enabled stage.
828 SMMUTransTableInfo tt_combined
;
830 qemu_mutex_lock(&s
->mutex
);
832 if (!smmu_enabled(s
)) {
833 if (FIELD_EX32(s
->gbpa
, GBPA
, ABORT
)) {
834 status
= SMMU_TRANS_ABORT
;
836 status
= SMMU_TRANS_DISABLE
;
841 cfg
= smmuv3_get_config(sdev
, &event
);
843 status
= SMMU_TRANS_ERROR
;
848 status
= SMMU_TRANS_ABORT
;
853 status
= SMMU_TRANS_BYPASS
;
857 if (cfg
->stage
== 1) {
858 /* Select stage1 translation table. */
859 tt
= select_tt(cfg
, addr
);
861 if (cfg
->record_faults
) {
862 event
.type
= SMMU_EVT_F_TRANSLATION
;
863 event
.u
.f_translation
.addr
= addr
;
864 event
.u
.f_translation
.rnw
= flag
& 0x1;
866 status
= SMMU_TRANS_ERROR
;
869 tt_combined
.granule_sz
= tt
->granule_sz
;
870 tt_combined
.tsz
= tt
->tsz
;
874 tt_combined
.granule_sz
= cfg
->s2cfg
.granule_sz
;
875 tt_combined
.tsz
= cfg
->s2cfg
.tsz
;
878 * TLB lookup looks for granule and input size for a translation stage,
879 * as only one stage is supported right now, choose the right values
880 * from the configuration.
882 page_mask
= (1ULL << tt_combined
.granule_sz
) - 1;
883 aligned_addr
= addr
& ~page_mask
;
885 cached_entry
= smmu_iotlb_lookup(bs
, cfg
, &tt_combined
, aligned_addr
);
887 if ((flag
& IOMMU_WO
) && !(cached_entry
->entry
.perm
& IOMMU_WO
)) {
888 status
= SMMU_TRANS_ERROR
;
890 * We know that the TLB only contains either stage-1 or stage-2 as
891 * nesting is not supported. So it is sufficient to check the
892 * translation stage to know the TLB stage for now.
894 event
.u
.f_walk_eabt
.s2
= (cfg
->stage
== 2);
895 if (PTW_RECORD_FAULT(cfg
)) {
896 event
.type
= SMMU_EVT_F_PERMISSION
;
897 event
.u
.f_permission
.addr
= addr
;
898 event
.u
.f_permission
.rnw
= flag
& 0x1;
901 status
= SMMU_TRANS_SUCCESS
;
906 cached_entry
= g_new0(SMMUTLBEntry
, 1);
908 if (smmu_ptw(cfg
, aligned_addr
, flag
, cached_entry
, &ptw_info
)) {
909 /* All faults from PTW has S2 field. */
910 event
.u
.f_walk_eabt
.s2
= (ptw_info
.stage
== 2);
911 g_free(cached_entry
);
912 switch (ptw_info
.type
) {
913 case SMMU_PTW_ERR_WALK_EABT
:
914 event
.type
= SMMU_EVT_F_WALK_EABT
;
915 event
.u
.f_walk_eabt
.addr
= addr
;
916 event
.u
.f_walk_eabt
.rnw
= flag
& 0x1;
917 event
.u
.f_walk_eabt
.class = 0x1;
918 event
.u
.f_walk_eabt
.addr2
= ptw_info
.addr
;
920 case SMMU_PTW_ERR_TRANSLATION
:
921 if (PTW_RECORD_FAULT(cfg
)) {
922 event
.type
= SMMU_EVT_F_TRANSLATION
;
923 event
.u
.f_translation
.addr
= addr
;
924 event
.u
.f_translation
.rnw
= flag
& 0x1;
927 case SMMU_PTW_ERR_ADDR_SIZE
:
928 if (PTW_RECORD_FAULT(cfg
)) {
929 event
.type
= SMMU_EVT_F_ADDR_SIZE
;
930 event
.u
.f_addr_size
.addr
= addr
;
931 event
.u
.f_addr_size
.rnw
= flag
& 0x1;
934 case SMMU_PTW_ERR_ACCESS
:
935 if (PTW_RECORD_FAULT(cfg
)) {
936 event
.type
= SMMU_EVT_F_ACCESS
;
937 event
.u
.f_access
.addr
= addr
;
938 event
.u
.f_access
.rnw
= flag
& 0x1;
941 case SMMU_PTW_ERR_PERMISSION
:
942 if (PTW_RECORD_FAULT(cfg
)) {
943 event
.type
= SMMU_EVT_F_PERMISSION
;
944 event
.u
.f_permission
.addr
= addr
;
945 event
.u
.f_permission
.rnw
= flag
& 0x1;
949 g_assert_not_reached();
951 status
= SMMU_TRANS_ERROR
;
953 smmu_iotlb_insert(bs
, cfg
, cached_entry
);
954 status
= SMMU_TRANS_SUCCESS
;
958 qemu_mutex_unlock(&s
->mutex
);
960 case SMMU_TRANS_SUCCESS
:
961 entry
.perm
= cached_entry
->entry
.perm
;
962 entry
.translated_addr
= cached_entry
->entry
.translated_addr
+
963 (addr
& cached_entry
->entry
.addr_mask
);
964 entry
.addr_mask
= cached_entry
->entry
.addr_mask
;
965 trace_smmuv3_translate_success(mr
->parent_obj
.name
, sid
, addr
,
966 entry
.translated_addr
, entry
.perm
);
968 case SMMU_TRANS_DISABLE
:
970 entry
.addr_mask
= ~TARGET_PAGE_MASK
;
971 trace_smmuv3_translate_disable(mr
->parent_obj
.name
, sid
, addr
,
974 case SMMU_TRANS_BYPASS
:
976 entry
.addr_mask
= ~TARGET_PAGE_MASK
;
977 trace_smmuv3_translate_bypass(mr
->parent_obj
.name
, sid
, addr
,
980 case SMMU_TRANS_ABORT
:
981 /* no event is recorded on abort */
982 trace_smmuv3_translate_abort(mr
->parent_obj
.name
, sid
, addr
,
985 case SMMU_TRANS_ERROR
:
986 qemu_log_mask(LOG_GUEST_ERROR
,
987 "%s translation failed for iova=0x%"PRIx64
" (%s)\n",
988 mr
->parent_obj
.name
, addr
, smmu_event_string(event
.type
));
989 smmuv3_record_event(s
, &event
);
997 * smmuv3_notify_iova - call the notifier @n for a given
998 * @asid and @iova tuple.
1000 * @mr: IOMMU mr region handle
1001 * @n: notifier to be called
1002 * @asid: address space ID or negative value if we don't care
1003 * @vmid: virtual machine ID or negative value if we don't care
1005 * @tg: translation granule (if communicated through range invalidation)
1006 * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1
1008 static void smmuv3_notify_iova(IOMMUMemoryRegion
*mr
,
1011 dma_addr_t iova
, uint8_t tg
,
1014 SMMUDevice
*sdev
= container_of(mr
, SMMUDevice
, iommu
);
1015 IOMMUTLBEvent event
;
1017 SMMUv3State
*s
= sdev
->smmu
;
1020 SMMUEventInfo event
= {.inval_ste_allowed
= true};
1021 SMMUTransCfg
*cfg
= smmuv3_get_config(sdev
, &event
);
1022 SMMUTransTableInfo
*tt
;
1028 if (asid
>= 0 && cfg
->asid
!= asid
) {
1032 if (vmid
>= 0 && cfg
->s2cfg
.vmid
!= vmid
) {
1036 if (STAGE1_SUPPORTED(s
)) {
1037 tt
= select_tt(cfg
, iova
);
1041 granule
= tt
->granule_sz
;
1043 granule
= cfg
->s2cfg
.granule_sz
;
1047 granule
= tg
* 2 + 10;
1050 event
.type
= IOMMU_NOTIFIER_UNMAP
;
1051 event
.entry
.target_as
= &address_space_memory
;
1052 event
.entry
.iova
= iova
;
1053 event
.entry
.addr_mask
= num_pages
* (1 << granule
) - 1;
1054 event
.entry
.perm
= IOMMU_NONE
;
1056 memory_region_notify_iommu_one(n
, &event
);
1059 /* invalidate an asid/vmid/iova range tuple in all mr's */
1060 static void smmuv3_inv_notifiers_iova(SMMUState
*s
, int asid
, int vmid
,
1061 dma_addr_t iova
, uint8_t tg
,
1066 QLIST_FOREACH(sdev
, &s
->devices_with_notifiers
, next
) {
1067 IOMMUMemoryRegion
*mr
= &sdev
->iommu
;
1070 trace_smmuv3_inv_notifiers_iova(mr
->parent_obj
.name
, asid
, vmid
,
1071 iova
, tg
, num_pages
);
1073 IOMMU_NOTIFIER_FOREACH(n
, mr
) {
1074 smmuv3_notify_iova(mr
, n
, asid
, vmid
, iova
, tg
, num_pages
);
1079 static void smmuv3_range_inval(SMMUState
*s
, Cmd
*cmd
)
1081 dma_addr_t end
, addr
= CMD_ADDR(cmd
);
1082 uint8_t type
= CMD_TYPE(cmd
);
1084 uint8_t scale
= CMD_SCALE(cmd
);
1085 uint8_t num
= CMD_NUM(cmd
);
1086 uint8_t ttl
= CMD_TTL(cmd
);
1087 bool leaf
= CMD_LEAF(cmd
);
1088 uint8_t tg
= CMD_TG(cmd
);
1092 SMMUv3State
*smmuv3
= ARM_SMMUV3(s
);
1094 /* Only consider VMID if stage-2 is supported. */
1095 if (STAGE2_SUPPORTED(smmuv3
)) {
1096 vmid
= CMD_VMID(cmd
);
1099 if (type
== SMMU_CMD_TLBI_NH_VA
) {
1100 asid
= CMD_ASID(cmd
);
1104 trace_smmuv3_range_inval(vmid
, asid
, addr
, tg
, 1, ttl
, leaf
);
1105 smmuv3_inv_notifiers_iova(s
, asid
, vmid
, addr
, tg
, 1);
1106 smmu_iotlb_inv_iova(s
, asid
, vmid
, addr
, tg
, 1, ttl
);
1112 num_pages
= (num
+ 1) * BIT_ULL(scale
);
1113 granule
= tg
* 2 + 10;
1115 /* Split invalidations into ^2 range invalidations */
1116 end
= addr
+ (num_pages
<< granule
) - 1;
1118 while (addr
!= end
+ 1) {
1119 uint64_t mask
= dma_aligned_pow2_mask(addr
, end
, 64);
1121 num_pages
= (mask
+ 1) >> granule
;
1122 trace_smmuv3_range_inval(vmid
, asid
, addr
, tg
, num_pages
, ttl
, leaf
);
1123 smmuv3_inv_notifiers_iova(s
, asid
, vmid
, addr
, tg
, num_pages
);
1124 smmu_iotlb_inv_iova(s
, asid
, vmid
, addr
, tg
, num_pages
, ttl
);
1130 smmuv3_invalidate_ste(gpointer key
, gpointer value
, gpointer user_data
)
1132 SMMUDevice
*sdev
= (SMMUDevice
*)key
;
1133 uint32_t sid
= smmu_get_sid(sdev
);
1134 SMMUSIDRange
*sid_range
= (SMMUSIDRange
*)user_data
;
1136 if (sid
< sid_range
->start
|| sid
> sid_range
->end
) {
1139 trace_smmuv3_config_cache_inv(sid
);
1143 static int smmuv3_cmdq_consume(SMMUv3State
*s
)
1145 SMMUState
*bs
= ARM_SMMU(s
);
1146 SMMUCmdError cmd_error
= SMMU_CERROR_NONE
;
1147 SMMUQueue
*q
= &s
->cmdq
;
1148 SMMUCommandType type
= 0;
1150 if (!smmuv3_cmdq_enabled(s
)) {
1154 * some commands depend on register values, typically CR0. In case those
1155 * register values change while handling the command, spec says it
1156 * is UNPREDICTABLE whether the command is interpreted under the new
1160 while (!smmuv3_q_empty(q
)) {
1161 uint32_t pending
= s
->gerror
^ s
->gerrorn
;
1164 trace_smmuv3_cmdq_consume(Q_PROD(q
), Q_CONS(q
),
1165 Q_PROD_WRAP(q
), Q_CONS_WRAP(q
));
1167 if (FIELD_EX32(pending
, GERROR
, CMDQ_ERR
)) {
1171 if (queue_read(q
, &cmd
) != MEMTX_OK
) {
1172 cmd_error
= SMMU_CERROR_ABT
;
1176 type
= CMD_TYPE(&cmd
);
1178 trace_smmuv3_cmdq_opcode(smmu_cmd_string(type
));
1180 qemu_mutex_lock(&s
->mutex
);
1183 if (CMD_SYNC_CS(&cmd
) & CMD_SYNC_SIG_IRQ
) {
1184 smmuv3_trigger_irq(s
, SMMU_IRQ_CMD_SYNC
, 0);
1187 case SMMU_CMD_PREFETCH_CONFIG
:
1188 case SMMU_CMD_PREFETCH_ADDR
:
1190 case SMMU_CMD_CFGI_STE
:
1192 uint32_t sid
= CMD_SID(&cmd
);
1193 IOMMUMemoryRegion
*mr
= smmu_iommu_mr(bs
, sid
);
1196 if (CMD_SSEC(&cmd
)) {
1197 cmd_error
= SMMU_CERROR_ILL
;
1205 trace_smmuv3_cmdq_cfgi_ste(sid
);
1206 sdev
= container_of(mr
, SMMUDevice
, iommu
);
1207 smmuv3_flush_config(sdev
);
1211 case SMMU_CMD_CFGI_STE_RANGE
: /* same as SMMU_CMD_CFGI_ALL */
1213 uint32_t sid
= CMD_SID(&cmd
), mask
;
1214 uint8_t range
= CMD_STE_RANGE(&cmd
);
1215 SMMUSIDRange sid_range
;
1217 if (CMD_SSEC(&cmd
)) {
1218 cmd_error
= SMMU_CERROR_ILL
;
1222 mask
= (1ULL << (range
+ 1)) - 1;
1223 sid_range
.start
= sid
& ~mask
;
1224 sid_range
.end
= sid_range
.start
+ mask
;
1226 trace_smmuv3_cmdq_cfgi_ste_range(sid_range
.start
, sid_range
.end
);
1227 g_hash_table_foreach_remove(bs
->configs
, smmuv3_invalidate_ste
,
1231 case SMMU_CMD_CFGI_CD
:
1232 case SMMU_CMD_CFGI_CD_ALL
:
1234 uint32_t sid
= CMD_SID(&cmd
);
1235 IOMMUMemoryRegion
*mr
= smmu_iommu_mr(bs
, sid
);
1238 if (CMD_SSEC(&cmd
)) {
1239 cmd_error
= SMMU_CERROR_ILL
;
1247 trace_smmuv3_cmdq_cfgi_cd(sid
);
1248 sdev
= container_of(mr
, SMMUDevice
, iommu
);
1249 smmuv3_flush_config(sdev
);
1252 case SMMU_CMD_TLBI_NH_ASID
:
1254 uint16_t asid
= CMD_ASID(&cmd
);
1256 if (!STAGE1_SUPPORTED(s
)) {
1257 cmd_error
= SMMU_CERROR_ILL
;
1261 trace_smmuv3_cmdq_tlbi_nh_asid(asid
);
1262 smmu_inv_notifiers_all(&s
->smmu_state
);
1263 smmu_iotlb_inv_asid(bs
, asid
);
1266 case SMMU_CMD_TLBI_NH_ALL
:
1267 if (!STAGE1_SUPPORTED(s
)) {
1268 cmd_error
= SMMU_CERROR_ILL
;
1272 case SMMU_CMD_TLBI_NSNH_ALL
:
1273 trace_smmuv3_cmdq_tlbi_nh();
1274 smmu_inv_notifiers_all(&s
->smmu_state
);
1275 smmu_iotlb_inv_all(bs
);
1277 case SMMU_CMD_TLBI_NH_VAA
:
1278 case SMMU_CMD_TLBI_NH_VA
:
1279 if (!STAGE1_SUPPORTED(s
)) {
1280 cmd_error
= SMMU_CERROR_ILL
;
1283 smmuv3_range_inval(bs
, &cmd
);
1285 case SMMU_CMD_TLBI_S12_VMALL
:
1287 uint16_t vmid
= CMD_VMID(&cmd
);
1289 if (!STAGE2_SUPPORTED(s
)) {
1290 cmd_error
= SMMU_CERROR_ILL
;
1294 trace_smmuv3_cmdq_tlbi_s12_vmid(vmid
);
1295 smmu_inv_notifiers_all(&s
->smmu_state
);
1296 smmu_iotlb_inv_vmid(bs
, vmid
);
1299 case SMMU_CMD_TLBI_S2_IPA
:
1300 if (!STAGE2_SUPPORTED(s
)) {
1301 cmd_error
= SMMU_CERROR_ILL
;
1305 * As currently only either s1 or s2 are supported
1306 * we can reuse same function for s2.
1308 smmuv3_range_inval(bs
, &cmd
);
1310 case SMMU_CMD_TLBI_EL3_ALL
:
1311 case SMMU_CMD_TLBI_EL3_VA
:
1312 case SMMU_CMD_TLBI_EL2_ALL
:
1313 case SMMU_CMD_TLBI_EL2_ASID
:
1314 case SMMU_CMD_TLBI_EL2_VA
:
1315 case SMMU_CMD_TLBI_EL2_VAA
:
1316 case SMMU_CMD_ATC_INV
:
1317 case SMMU_CMD_PRI_RESP
:
1318 case SMMU_CMD_RESUME
:
1319 case SMMU_CMD_STALL_TERM
:
1320 trace_smmuv3_unhandled_cmd(type
);
1323 cmd_error
= SMMU_CERROR_ILL
;
1326 qemu_mutex_unlock(&s
->mutex
);
1328 if (cmd_error
== SMMU_CERROR_ILL
) {
1329 qemu_log_mask(LOG_GUEST_ERROR
,
1330 "Illegal command type: %d\n", CMD_TYPE(&cmd
));
1335 * We only increment the cons index after the completion of
1336 * the command. We do that because the SYNC returns immediately
1337 * and does not check the completion of previous commands
1343 trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type
), cmd_error
);
1344 smmu_write_cmdq_err(s
, cmd_error
);
1345 smmuv3_trigger_irq(s
, SMMU_IRQ_GERROR
, R_GERROR_CMDQ_ERR_MASK
);
1348 trace_smmuv3_cmdq_consume_out(Q_PROD(q
), Q_CONS(q
),
1349 Q_PROD_WRAP(q
), Q_CONS_WRAP(q
));
1354 static MemTxResult
smmu_writell(SMMUv3State
*s
, hwaddr offset
,
1355 uint64_t data
, MemTxAttrs attrs
)
1358 case A_GERROR_IRQ_CFG0
:
1359 s
->gerror_irq_cfg0
= data
;
1362 s
->strtab_base
= data
;
1365 s
->cmdq
.base
= data
;
1366 s
->cmdq
.log2size
= extract64(s
->cmdq
.base
, 0, 5);
1367 if (s
->cmdq
.log2size
> SMMU_CMDQS
) {
1368 s
->cmdq
.log2size
= SMMU_CMDQS
;
1372 s
->eventq
.base
= data
;
1373 s
->eventq
.log2size
= extract64(s
->eventq
.base
, 0, 5);
1374 if (s
->eventq
.log2size
> SMMU_EVENTQS
) {
1375 s
->eventq
.log2size
= SMMU_EVENTQS
;
1378 case A_EVENTQ_IRQ_CFG0
:
1379 s
->eventq_irq_cfg0
= data
;
1382 qemu_log_mask(LOG_UNIMP
,
1383 "%s Unexpected 64-bit access to 0x%"PRIx64
" (WI)\n",
1389 static MemTxResult
smmu_writel(SMMUv3State
*s
, hwaddr offset
,
1390 uint64_t data
, MemTxAttrs attrs
)
1395 s
->cr0ack
= data
& ~SMMU_CR0_RESERVED
;
1396 /* in case the command queue has been enabled */
1397 smmuv3_cmdq_consume(s
);
1409 smmuv3_write_gerrorn(s
, data
);
1411 * By acknowledging the CMDQ_ERR, SW may notify cmds can
1412 * be processed again
1414 smmuv3_cmdq_consume(s
);
1416 case A_GERROR_IRQ_CFG0
: /* 64b */
1417 s
->gerror_irq_cfg0
= deposit64(s
->gerror_irq_cfg0
, 0, 32, data
);
1419 case A_GERROR_IRQ_CFG0
+ 4:
1420 s
->gerror_irq_cfg0
= deposit64(s
->gerror_irq_cfg0
, 32, 32, data
);
1422 case A_GERROR_IRQ_CFG1
:
1423 s
->gerror_irq_cfg1
= data
;
1425 case A_GERROR_IRQ_CFG2
:
1426 s
->gerror_irq_cfg2
= data
;
1430 * If UPDATE is not set, the write is ignored. This is the only
1431 * permitted behavior in SMMUv3.2 and later.
1433 if (data
& R_GBPA_UPDATE_MASK
) {
1434 /* Ignore update bit as write is synchronous. */
1435 s
->gbpa
= data
& ~R_GBPA_UPDATE_MASK
;
1438 case A_STRTAB_BASE
: /* 64b */
1439 s
->strtab_base
= deposit64(s
->strtab_base
, 0, 32, data
);
1441 case A_STRTAB_BASE
+ 4:
1442 s
->strtab_base
= deposit64(s
->strtab_base
, 32, 32, data
);
1444 case A_STRTAB_BASE_CFG
:
1445 s
->strtab_base_cfg
= data
;
1446 if (FIELD_EX32(data
, STRTAB_BASE_CFG
, FMT
) == 1) {
1447 s
->sid_split
= FIELD_EX32(data
, STRTAB_BASE_CFG
, SPLIT
);
1448 s
->features
|= SMMU_FEATURE_2LVL_STE
;
1451 case A_CMDQ_BASE
: /* 64b */
1452 s
->cmdq
.base
= deposit64(s
->cmdq
.base
, 0, 32, data
);
1453 s
->cmdq
.log2size
= extract64(s
->cmdq
.base
, 0, 5);
1454 if (s
->cmdq
.log2size
> SMMU_CMDQS
) {
1455 s
->cmdq
.log2size
= SMMU_CMDQS
;
1458 case A_CMDQ_BASE
+ 4: /* 64b */
1459 s
->cmdq
.base
= deposit64(s
->cmdq
.base
, 32, 32, data
);
1462 s
->cmdq
.prod
= data
;
1463 smmuv3_cmdq_consume(s
);
1466 s
->cmdq
.cons
= data
;
1468 case A_EVENTQ_BASE
: /* 64b */
1469 s
->eventq
.base
= deposit64(s
->eventq
.base
, 0, 32, data
);
1470 s
->eventq
.log2size
= extract64(s
->eventq
.base
, 0, 5);
1471 if (s
->eventq
.log2size
> SMMU_EVENTQS
) {
1472 s
->eventq
.log2size
= SMMU_EVENTQS
;
1475 case A_EVENTQ_BASE
+ 4:
1476 s
->eventq
.base
= deposit64(s
->eventq
.base
, 32, 32, data
);
1479 s
->eventq
.prod
= data
;
1482 s
->eventq
.cons
= data
;
1484 case A_EVENTQ_IRQ_CFG0
: /* 64b */
1485 s
->eventq_irq_cfg0
= deposit64(s
->eventq_irq_cfg0
, 0, 32, data
);
1487 case A_EVENTQ_IRQ_CFG0
+ 4:
1488 s
->eventq_irq_cfg0
= deposit64(s
->eventq_irq_cfg0
, 32, 32, data
);
1490 case A_EVENTQ_IRQ_CFG1
:
1491 s
->eventq_irq_cfg1
= data
;
1493 case A_EVENTQ_IRQ_CFG2
:
1494 s
->eventq_irq_cfg2
= data
;
1497 qemu_log_mask(LOG_UNIMP
,
1498 "%s Unexpected 32-bit access to 0x%"PRIx64
" (WI)\n",
1504 static MemTxResult
smmu_write_mmio(void *opaque
, hwaddr offset
, uint64_t data
,
1505 unsigned size
, MemTxAttrs attrs
)
1507 SMMUState
*sys
= opaque
;
1508 SMMUv3State
*s
= ARM_SMMUV3(sys
);
1511 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1516 r
= smmu_writell(s
, offset
, data
, attrs
);
1519 r
= smmu_writel(s
, offset
, data
, attrs
);
1526 trace_smmuv3_write_mmio(offset
, data
, size
, r
);
1530 static MemTxResult
smmu_readll(SMMUv3State
*s
, hwaddr offset
,
1531 uint64_t *data
, MemTxAttrs attrs
)
1534 case A_GERROR_IRQ_CFG0
:
1535 *data
= s
->gerror_irq_cfg0
;
1538 *data
= s
->strtab_base
;
1541 *data
= s
->cmdq
.base
;
1544 *data
= s
->eventq
.base
;
1548 qemu_log_mask(LOG_UNIMP
,
1549 "%s Unexpected 64-bit access to 0x%"PRIx64
" (RAZ)\n",
1555 static MemTxResult
smmu_readl(SMMUv3State
*s
, hwaddr offset
,
1556 uint64_t *data
, MemTxAttrs attrs
)
1559 case A_IDREGS
... A_IDREGS
+ 0x2f:
1560 *data
= smmuv3_idreg(offset
- A_IDREGS
);
1562 case A_IDR0
... A_IDR5
:
1563 *data
= s
->idr
[(offset
- A_IDR0
) / 4];
1590 case A_IRQ_CTRL_ACK
:
1591 *data
= s
->irq_ctrl
;
1599 case A_GERROR_IRQ_CFG0
: /* 64b */
1600 *data
= extract64(s
->gerror_irq_cfg0
, 0, 32);
1602 case A_GERROR_IRQ_CFG0
+ 4:
1603 *data
= extract64(s
->gerror_irq_cfg0
, 32, 32);
1605 case A_GERROR_IRQ_CFG1
:
1606 *data
= s
->gerror_irq_cfg1
;
1608 case A_GERROR_IRQ_CFG2
:
1609 *data
= s
->gerror_irq_cfg2
;
1611 case A_STRTAB_BASE
: /* 64b */
1612 *data
= extract64(s
->strtab_base
, 0, 32);
1614 case A_STRTAB_BASE
+ 4: /* 64b */
1615 *data
= extract64(s
->strtab_base
, 32, 32);
1617 case A_STRTAB_BASE_CFG
:
1618 *data
= s
->strtab_base_cfg
;
1620 case A_CMDQ_BASE
: /* 64b */
1621 *data
= extract64(s
->cmdq
.base
, 0, 32);
1623 case A_CMDQ_BASE
+ 4:
1624 *data
= extract64(s
->cmdq
.base
, 32, 32);
1627 *data
= s
->cmdq
.prod
;
1630 *data
= s
->cmdq
.cons
;
1632 case A_EVENTQ_BASE
: /* 64b */
1633 *data
= extract64(s
->eventq
.base
, 0, 32);
1635 case A_EVENTQ_BASE
+ 4: /* 64b */
1636 *data
= extract64(s
->eventq
.base
, 32, 32);
1639 *data
= s
->eventq
.prod
;
1642 *data
= s
->eventq
.cons
;
1646 qemu_log_mask(LOG_UNIMP
,
1647 "%s unhandled 32-bit access at 0x%"PRIx64
" (RAZ)\n",
1653 static MemTxResult
smmu_read_mmio(void *opaque
, hwaddr offset
, uint64_t *data
,
1654 unsigned size
, MemTxAttrs attrs
)
1656 SMMUState
*sys
= opaque
;
1657 SMMUv3State
*s
= ARM_SMMUV3(sys
);
1660 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1665 r
= smmu_readll(s
, offset
, data
, attrs
);
1668 r
= smmu_readl(s
, offset
, data
, attrs
);
1675 trace_smmuv3_read_mmio(offset
, *data
, size
, r
);
1679 static const MemoryRegionOps smmu_mem_ops
= {
1680 .read_with_attrs
= smmu_read_mmio
,
1681 .write_with_attrs
= smmu_write_mmio
,
1682 .endianness
= DEVICE_LITTLE_ENDIAN
,
1684 .min_access_size
= 4,
1685 .max_access_size
= 8,
1688 .min_access_size
= 4,
1689 .max_access_size
= 8,
1693 static void smmu_init_irq(SMMUv3State
*s
, SysBusDevice
*dev
)
1697 for (i
= 0; i
< ARRAY_SIZE(s
->irq
); i
++) {
1698 sysbus_init_irq(dev
, &s
->irq
[i
]);
1702 static void smmu_reset_hold(Object
*obj
)
1704 SMMUv3State
*s
= ARM_SMMUV3(obj
);
1705 SMMUv3Class
*c
= ARM_SMMUV3_GET_CLASS(s
);
1707 if (c
->parent_phases
.hold
) {
1708 c
->parent_phases
.hold(obj
);
1711 smmuv3_init_regs(s
);
1714 static void smmu_realize(DeviceState
*d
, Error
**errp
)
1716 SMMUState
*sys
= ARM_SMMU(d
);
1717 SMMUv3State
*s
= ARM_SMMUV3(sys
);
1718 SMMUv3Class
*c
= ARM_SMMUV3_GET_CLASS(s
);
1719 SysBusDevice
*dev
= SYS_BUS_DEVICE(d
);
1720 Error
*local_err
= NULL
;
1722 c
->parent_realize(d
, &local_err
);
1724 error_propagate(errp
, local_err
);
1728 qemu_mutex_init(&s
->mutex
);
1730 memory_region_init_io(&sys
->iomem
, OBJECT(s
),
1731 &smmu_mem_ops
, sys
, TYPE_ARM_SMMUV3
, 0x20000);
1733 sys
->mrtypename
= TYPE_SMMUV3_IOMMU_MEMORY_REGION
;
1735 sysbus_init_mmio(dev
, &sys
->iomem
);
1737 smmu_init_irq(s
, dev
);
1740 static const VMStateDescription vmstate_smmuv3_queue
= {
1741 .name
= "smmuv3_queue",
1743 .minimum_version_id
= 1,
1744 .fields
= (VMStateField
[]) {
1745 VMSTATE_UINT64(base
, SMMUQueue
),
1746 VMSTATE_UINT32(prod
, SMMUQueue
),
1747 VMSTATE_UINT32(cons
, SMMUQueue
),
1748 VMSTATE_UINT8(log2size
, SMMUQueue
),
1749 VMSTATE_END_OF_LIST(),
1753 static bool smmuv3_gbpa_needed(void *opaque
)
1755 SMMUv3State
*s
= opaque
;
1757 /* Only migrate GBPA if it has different reset value. */
1758 return s
->gbpa
!= SMMU_GBPA_RESET_VAL
;
1761 static const VMStateDescription vmstate_gbpa
= {
1762 .name
= "smmuv3/gbpa",
1764 .minimum_version_id
= 1,
1765 .needed
= smmuv3_gbpa_needed
,
1766 .fields
= (VMStateField
[]) {
1767 VMSTATE_UINT32(gbpa
, SMMUv3State
),
1768 VMSTATE_END_OF_LIST()
1772 static const VMStateDescription vmstate_smmuv3
= {
1775 .minimum_version_id
= 1,
1776 .priority
= MIG_PRI_IOMMU
,
1777 .fields
= (VMStateField
[]) {
1778 VMSTATE_UINT32(features
, SMMUv3State
),
1779 VMSTATE_UINT8(sid_size
, SMMUv3State
),
1780 VMSTATE_UINT8(sid_split
, SMMUv3State
),
1782 VMSTATE_UINT32_ARRAY(cr
, SMMUv3State
, 3),
1783 VMSTATE_UINT32(cr0ack
, SMMUv3State
),
1784 VMSTATE_UINT32(statusr
, SMMUv3State
),
1785 VMSTATE_UINT32(irq_ctrl
, SMMUv3State
),
1786 VMSTATE_UINT32(gerror
, SMMUv3State
),
1787 VMSTATE_UINT32(gerrorn
, SMMUv3State
),
1788 VMSTATE_UINT64(gerror_irq_cfg0
, SMMUv3State
),
1789 VMSTATE_UINT32(gerror_irq_cfg1
, SMMUv3State
),
1790 VMSTATE_UINT32(gerror_irq_cfg2
, SMMUv3State
),
1791 VMSTATE_UINT64(strtab_base
, SMMUv3State
),
1792 VMSTATE_UINT32(strtab_base_cfg
, SMMUv3State
),
1793 VMSTATE_UINT64(eventq_irq_cfg0
, SMMUv3State
),
1794 VMSTATE_UINT32(eventq_irq_cfg1
, SMMUv3State
),
1795 VMSTATE_UINT32(eventq_irq_cfg2
, SMMUv3State
),
1797 VMSTATE_STRUCT(cmdq
, SMMUv3State
, 0, vmstate_smmuv3_queue
, SMMUQueue
),
1798 VMSTATE_STRUCT(eventq
, SMMUv3State
, 0, vmstate_smmuv3_queue
, SMMUQueue
),
1800 VMSTATE_END_OF_LIST(),
1802 .subsections
= (const VMStateDescription
* []) {
1808 static Property smmuv3_properties
[] = {
1810 * Stages of translation advertised.
1813 * Defaults to stage 1
1815 DEFINE_PROP_STRING("stage", SMMUv3State
, stage
),
1816 DEFINE_PROP_END_OF_LIST()
1819 static void smmuv3_instance_init(Object
*obj
)
1821 /* Nothing much to do here as of now */
1824 static void smmuv3_class_init(ObjectClass
*klass
, void *data
)
1826 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1827 ResettableClass
*rc
= RESETTABLE_CLASS(klass
);
1828 SMMUv3Class
*c
= ARM_SMMUV3_CLASS(klass
);
1830 dc
->vmsd
= &vmstate_smmuv3
;
1831 resettable_class_set_parent_phases(rc
, NULL
, smmu_reset_hold
, NULL
,
1833 c
->parent_realize
= dc
->realize
;
1834 dc
->realize
= smmu_realize
;
1835 device_class_set_props(dc
, smmuv3_properties
);
1838 static int smmuv3_notify_flag_changed(IOMMUMemoryRegion
*iommu
,
1839 IOMMUNotifierFlag old
,
1840 IOMMUNotifierFlag
new,
1843 SMMUDevice
*sdev
= container_of(iommu
, SMMUDevice
, iommu
);
1844 SMMUv3State
*s3
= sdev
->smmu
;
1845 SMMUState
*s
= &(s3
->smmu_state
);
1847 if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP
) {
1848 error_setg(errp
, "SMMUv3 does not support dev-iotlb yet");
1852 if (new & IOMMU_NOTIFIER_MAP
) {
1854 "device %02x.%02x.%x requires iommu MAP notifier which is "
1855 "not currently supported", pci_bus_num(sdev
->bus
),
1856 PCI_SLOT(sdev
->devfn
), PCI_FUNC(sdev
->devfn
));
1860 if (old
== IOMMU_NOTIFIER_NONE
) {
1861 trace_smmuv3_notify_flag_add(iommu
->parent_obj
.name
);
1862 QLIST_INSERT_HEAD(&s
->devices_with_notifiers
, sdev
, next
);
1863 } else if (new == IOMMU_NOTIFIER_NONE
) {
1864 trace_smmuv3_notify_flag_del(iommu
->parent_obj
.name
);
1865 QLIST_REMOVE(sdev
, next
);
1870 static void smmuv3_iommu_memory_region_class_init(ObjectClass
*klass
,
1873 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_CLASS(klass
);
1875 imrc
->translate
= smmuv3_translate
;
1876 imrc
->notify_flag_changed
= smmuv3_notify_flag_changed
;
1879 static const TypeInfo smmuv3_type_info
= {
1880 .name
= TYPE_ARM_SMMUV3
,
1881 .parent
= TYPE_ARM_SMMU
,
1882 .instance_size
= sizeof(SMMUv3State
),
1883 .instance_init
= smmuv3_instance_init
,
1884 .class_size
= sizeof(SMMUv3Class
),
1885 .class_init
= smmuv3_class_init
,
1888 static const TypeInfo smmuv3_iommu_memory_region_info
= {
1889 .parent
= TYPE_IOMMU_MEMORY_REGION
,
1890 .name
= TYPE_SMMUV3_IOMMU_MEMORY_REGION
,
1891 .class_init
= smmuv3_iommu_memory_region_class_init
,
1894 static void smmuv3_register_types(void)
1896 type_register(&smmuv3_type_info
);
1897 type_register(&smmuv3_iommu_memory_region_info
);
1900 type_init(smmuv3_register_types
)