2 * Copyright (C) 2014-2016 Broadcom Corporation
3 * Copyright (c) 2017 Red Hat, Inc.
4 * Written by Prem Mallappa, Eric Auger
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
22 #include "hw/sysbus.h"
23 #include "migration/vmstate.h"
24 #include "hw/qdev-core.h"
25 #include "hw/pci/pci.h"
29 #include "qemu/error-report.h"
30 #include "qapi/error.h"
32 #include "hw/arm/smmuv3.h"
33 #include "smmuv3-internal.h"
34 #include "smmu-internal.h"
37 * smmuv3_trigger_irq - pulse @irq if enabled and update
38 * GERROR register in case of GERROR interrupt
41 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
43 static void smmuv3_trigger_irq(SMMUv3State
*s
, SMMUIrq irq
,
51 pulse
= smmuv3_eventq_irq_enabled(s
);
54 qemu_log_mask(LOG_UNIMP
, "PRI not yet supported\n");
56 case SMMU_IRQ_CMD_SYNC
:
61 uint32_t pending
= s
->gerror
^ s
->gerrorn
;
62 uint32_t new_gerrors
= ~pending
& gerror_mask
;
65 /* only toggle non pending errors */
68 s
->gerror
^= new_gerrors
;
69 trace_smmuv3_write_gerror(new_gerrors
, s
->gerror
);
71 pulse
= smmuv3_gerror_irq_enabled(s
);
76 trace_smmuv3_trigger_irq(irq
);
77 qemu_irq_pulse(s
->irq
[irq
]);
81 static void smmuv3_write_gerrorn(SMMUv3State
*s
, uint32_t new_gerrorn
)
83 uint32_t pending
= s
->gerror
^ s
->gerrorn
;
84 uint32_t toggled
= s
->gerrorn
^ new_gerrorn
;
86 if (toggled
& ~pending
) {
87 qemu_log_mask(LOG_GUEST_ERROR
,
88 "guest toggles non pending errors = 0x%x\n",
93 * We do not raise any error in case guest toggles bits corresponding
94 * to not active IRQs (CONSTRAINED UNPREDICTABLE)
96 s
->gerrorn
= new_gerrorn
;
98 trace_smmuv3_write_gerrorn(toggled
& pending
, s
->gerrorn
);
101 static inline MemTxResult
queue_read(SMMUQueue
*q
, void *data
)
103 dma_addr_t addr
= Q_CONS_ENTRY(q
);
105 return dma_memory_read(&address_space_memory
, addr
, data
, q
->entry_size
,
106 MEMTXATTRS_UNSPECIFIED
);
109 static MemTxResult
queue_write(SMMUQueue
*q
, void *data
)
111 dma_addr_t addr
= Q_PROD_ENTRY(q
);
114 ret
= dma_memory_write(&address_space_memory
, addr
, data
, q
->entry_size
,
115 MEMTXATTRS_UNSPECIFIED
);
116 if (ret
!= MEMTX_OK
) {
124 static MemTxResult
smmuv3_write_eventq(SMMUv3State
*s
, Evt
*evt
)
126 SMMUQueue
*q
= &s
->eventq
;
129 if (!smmuv3_eventq_enabled(s
)) {
133 if (smmuv3_q_full(q
)) {
137 r
= queue_write(q
, evt
);
142 if (!smmuv3_q_empty(q
)) {
143 smmuv3_trigger_irq(s
, SMMU_IRQ_EVTQ
, 0);
148 void smmuv3_record_event(SMMUv3State
*s
, SMMUEventInfo
*info
)
153 if (!smmuv3_eventq_enabled(s
)) {
157 EVT_SET_TYPE(&evt
, info
->type
);
158 EVT_SET_SID(&evt
, info
->sid
);
160 switch (info
->type
) {
164 EVT_SET_SSID(&evt
, info
->u
.f_uut
.ssid
);
165 EVT_SET_SSV(&evt
, info
->u
.f_uut
.ssv
);
166 EVT_SET_ADDR(&evt
, info
->u
.f_uut
.addr
);
167 EVT_SET_RNW(&evt
, info
->u
.f_uut
.rnw
);
168 EVT_SET_PNU(&evt
, info
->u
.f_uut
.pnu
);
169 EVT_SET_IND(&evt
, info
->u
.f_uut
.ind
);
171 case SMMU_EVT_C_BAD_STREAMID
:
172 EVT_SET_SSID(&evt
, info
->u
.c_bad_streamid
.ssid
);
173 EVT_SET_SSV(&evt
, info
->u
.c_bad_streamid
.ssv
);
175 case SMMU_EVT_F_STE_FETCH
:
176 EVT_SET_SSID(&evt
, info
->u
.f_ste_fetch
.ssid
);
177 EVT_SET_SSV(&evt
, info
->u
.f_ste_fetch
.ssv
);
178 EVT_SET_ADDR2(&evt
, info
->u
.f_ste_fetch
.addr
);
180 case SMMU_EVT_C_BAD_STE
:
181 EVT_SET_SSID(&evt
, info
->u
.c_bad_ste
.ssid
);
182 EVT_SET_SSV(&evt
, info
->u
.c_bad_ste
.ssv
);
184 case SMMU_EVT_F_STREAM_DISABLED
:
186 case SMMU_EVT_F_TRANS_FORBIDDEN
:
187 EVT_SET_ADDR(&evt
, info
->u
.f_transl_forbidden
.addr
);
188 EVT_SET_RNW(&evt
, info
->u
.f_transl_forbidden
.rnw
);
190 case SMMU_EVT_C_BAD_SUBSTREAMID
:
191 EVT_SET_SSID(&evt
, info
->u
.c_bad_substream
.ssid
);
193 case SMMU_EVT_F_CD_FETCH
:
194 EVT_SET_SSID(&evt
, info
->u
.f_cd_fetch
.ssid
);
195 EVT_SET_SSV(&evt
, info
->u
.f_cd_fetch
.ssv
);
196 EVT_SET_ADDR(&evt
, info
->u
.f_cd_fetch
.addr
);
198 case SMMU_EVT_C_BAD_CD
:
199 EVT_SET_SSID(&evt
, info
->u
.c_bad_cd
.ssid
);
200 EVT_SET_SSV(&evt
, info
->u
.c_bad_cd
.ssv
);
202 case SMMU_EVT_F_WALK_EABT
:
203 case SMMU_EVT_F_TRANSLATION
:
204 case SMMU_EVT_F_ADDR_SIZE
:
205 case SMMU_EVT_F_ACCESS
:
206 case SMMU_EVT_F_PERMISSION
:
207 EVT_SET_STALL(&evt
, info
->u
.f_walk_eabt
.stall
);
208 EVT_SET_STAG(&evt
, info
->u
.f_walk_eabt
.stag
);
209 EVT_SET_SSID(&evt
, info
->u
.f_walk_eabt
.ssid
);
210 EVT_SET_SSV(&evt
, info
->u
.f_walk_eabt
.ssv
);
211 EVT_SET_S2(&evt
, info
->u
.f_walk_eabt
.s2
);
212 EVT_SET_ADDR(&evt
, info
->u
.f_walk_eabt
.addr
);
213 EVT_SET_RNW(&evt
, info
->u
.f_walk_eabt
.rnw
);
214 EVT_SET_PNU(&evt
, info
->u
.f_walk_eabt
.pnu
);
215 EVT_SET_IND(&evt
, info
->u
.f_walk_eabt
.ind
);
216 EVT_SET_CLASS(&evt
, info
->u
.f_walk_eabt
.class);
217 EVT_SET_ADDR2(&evt
, info
->u
.f_walk_eabt
.addr2
);
219 case SMMU_EVT_F_CFG_CONFLICT
:
220 EVT_SET_SSID(&evt
, info
->u
.f_cfg_conflict
.ssid
);
221 EVT_SET_SSV(&evt
, info
->u
.f_cfg_conflict
.ssv
);
223 /* rest is not implemented */
224 case SMMU_EVT_F_BAD_ATS_TREQ
:
225 case SMMU_EVT_F_TLB_CONFLICT
:
226 case SMMU_EVT_E_PAGE_REQ
:
228 g_assert_not_reached();
231 trace_smmuv3_record_event(smmu_event_string(info
->type
), info
->sid
);
232 r
= smmuv3_write_eventq(s
, &evt
);
234 smmuv3_trigger_irq(s
, SMMU_IRQ_GERROR
, R_GERROR_EVENTQ_ABT_ERR_MASK
);
236 info
->recorded
= true;
239 static void smmuv3_init_regs(SMMUv3State
*s
)
242 * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
243 * multi-level stream table
245 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, S1P
, 1); /* stage 1 supported */
246 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, TTF
, 2); /* AArch64 PTW only */
247 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, COHACC
, 1); /* IO coherent */
248 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, ASID16
, 1); /* 16-bit ASID */
249 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, TTENDIAN
, 2); /* little endian */
250 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, STALL_MODEL
, 1); /* No stall */
251 /* terminated transaction will always be aborted/error returned */
252 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, TERM_MODEL
, 1);
253 /* 2-level stream table supported */
254 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, STLEVEL
, 1);
256 s
->idr
[1] = FIELD_DP32(s
->idr
[1], IDR1
, SIDSIZE
, SMMU_IDR1_SIDSIZE
);
257 s
->idr
[1] = FIELD_DP32(s
->idr
[1], IDR1
, EVENTQS
, SMMU_EVENTQS
);
258 s
->idr
[1] = FIELD_DP32(s
->idr
[1], IDR1
, CMDQS
, SMMU_CMDQS
);
260 s
->idr
[3] = FIELD_DP32(s
->idr
[3], IDR3
, RIL
, 1);
261 s
->idr
[3] = FIELD_DP32(s
->idr
[3], IDR3
, HAD
, 1);
262 s
->idr
[3] = FIELD_DP32(s
->idr
[3], IDR3
, BBML
, 2);
264 /* 4K, 16K and 64K granule support */
265 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, GRAN4K
, 1);
266 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, GRAN16K
, 1);
267 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, GRAN64K
, 1);
268 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, OAS
, SMMU_IDR5_OAS
); /* 44 bits */
270 s
->cmdq
.base
= deposit64(s
->cmdq
.base
, 0, 5, SMMU_CMDQS
);
273 s
->cmdq
.entry_size
= sizeof(struct Cmd
);
274 s
->eventq
.base
= deposit64(s
->eventq
.base
, 0, 5, SMMU_EVENTQS
);
277 s
->eventq
.entry_size
= sizeof(struct Evt
);
288 s
->gbpa
= SMMU_GBPA_RESET_VAL
;
291 static int smmu_get_ste(SMMUv3State
*s
, dma_addr_t addr
, STE
*buf
,
292 SMMUEventInfo
*event
)
296 trace_smmuv3_get_ste(addr
);
297 /* TODO: guarantee 64-bit single-copy atomicity */
298 ret
= dma_memory_read(&address_space_memory
, addr
, buf
, sizeof(*buf
),
299 MEMTXATTRS_UNSPECIFIED
);
300 if (ret
!= MEMTX_OK
) {
301 qemu_log_mask(LOG_GUEST_ERROR
,
302 "Cannot fetch pte at address=0x%"PRIx64
"\n", addr
);
303 event
->type
= SMMU_EVT_F_STE_FETCH
;
304 event
->u
.f_ste_fetch
.addr
= addr
;
311 /* @ssid > 0 not supported yet */
312 static int smmu_get_cd(SMMUv3State
*s
, STE
*ste
, uint32_t ssid
,
313 CD
*buf
, SMMUEventInfo
*event
)
315 dma_addr_t addr
= STE_CTXPTR(ste
);
318 trace_smmuv3_get_cd(addr
);
319 /* TODO: guarantee 64-bit single-copy atomicity */
320 ret
= dma_memory_read(&address_space_memory
, addr
, buf
, sizeof(*buf
),
321 MEMTXATTRS_UNSPECIFIED
);
322 if (ret
!= MEMTX_OK
) {
323 qemu_log_mask(LOG_GUEST_ERROR
,
324 "Cannot fetch pte at address=0x%"PRIx64
"\n", addr
);
325 event
->type
= SMMU_EVT_F_CD_FETCH
;
326 event
->u
.f_ste_fetch
.addr
= addr
;
332 /* Returns < 0 in case of invalid STE, 0 otherwise */
333 static int decode_ste(SMMUv3State
*s
, SMMUTransCfg
*cfg
,
334 STE
*ste
, SMMUEventInfo
*event
)
338 if (!STE_VALID(ste
)) {
339 if (!event
->inval_ste_allowed
) {
340 qemu_log_mask(LOG_GUEST_ERROR
, "invalid STE\n");
345 config
= STE_CONFIG(ste
);
347 if (STE_CFG_ABORT(config
)) {
352 if (STE_CFG_BYPASS(config
)) {
353 cfg
->bypassed
= true;
357 if (STE_CFG_S2_ENABLED(config
)) {
358 qemu_log_mask(LOG_UNIMP
, "SMMUv3 does not support stage 2 yet\n");
362 if (STE_S1CDMAX(ste
) != 0) {
363 qemu_log_mask(LOG_UNIMP
,
364 "SMMUv3 does not support multiple context descriptors yet\n");
368 if (STE_S1STALLD(ste
)) {
369 qemu_log_mask(LOG_UNIMP
,
370 "SMMUv3 S1 stalling fault model not allowed yet\n");
376 event
->type
= SMMU_EVT_C_BAD_STE
;
381 * smmu_find_ste - Return the stream table entry associated
386 * @ste: returned stream table entry
387 * @event: handle to an event info
389 * Supports linear and 2-level stream table
390 * Return 0 on success, -EINVAL otherwise
392 static int smmu_find_ste(SMMUv3State
*s
, uint32_t sid
, STE
*ste
,
393 SMMUEventInfo
*event
)
395 dma_addr_t addr
, strtab_base
;
397 int strtab_size_shift
;
400 trace_smmuv3_find_ste(sid
, s
->features
, s
->sid_split
);
401 log2size
= FIELD_EX32(s
->strtab_base_cfg
, STRTAB_BASE_CFG
, LOG2SIZE
);
403 * Check SID range against both guest-configured and implementation limits
405 if (sid
>= (1 << MIN(log2size
, SMMU_IDR1_SIDSIZE
))) {
406 event
->type
= SMMU_EVT_C_BAD_STREAMID
;
409 if (s
->features
& SMMU_FEATURE_2LVL_STE
) {
410 int l1_ste_offset
, l2_ste_offset
, max_l2_ste
, span
;
411 dma_addr_t l1ptr
, l2ptr
;
415 * Align strtab base address to table size. For this purpose, assume it
416 * is not bounded by SMMU_IDR1_SIDSIZE.
418 strtab_size_shift
= MAX(5, (int)log2size
- s
->sid_split
- 1 + 3);
419 strtab_base
= s
->strtab_base
& SMMU_BASE_ADDR_MASK
&
420 ~MAKE_64BIT_MASK(0, strtab_size_shift
);
421 l1_ste_offset
= sid
>> s
->sid_split
;
422 l2_ste_offset
= sid
& ((1 << s
->sid_split
) - 1);
423 l1ptr
= (dma_addr_t
)(strtab_base
+ l1_ste_offset
* sizeof(l1std
));
424 /* TODO: guarantee 64-bit single-copy atomicity */
425 ret
= dma_memory_read(&address_space_memory
, l1ptr
, &l1std
,
426 sizeof(l1std
), MEMTXATTRS_UNSPECIFIED
);
427 if (ret
!= MEMTX_OK
) {
428 qemu_log_mask(LOG_GUEST_ERROR
,
429 "Could not read L1PTR at 0X%"PRIx64
"\n", l1ptr
);
430 event
->type
= SMMU_EVT_F_STE_FETCH
;
431 event
->u
.f_ste_fetch
.addr
= l1ptr
;
435 span
= L1STD_SPAN(&l1std
);
438 /* l2ptr is not valid */
439 if (!event
->inval_ste_allowed
) {
440 qemu_log_mask(LOG_GUEST_ERROR
,
441 "invalid sid=%d (L1STD span=0)\n", sid
);
443 event
->type
= SMMU_EVT_C_BAD_STREAMID
;
446 max_l2_ste
= (1 << span
) - 1;
447 l2ptr
= l1std_l2ptr(&l1std
);
448 trace_smmuv3_find_ste_2lvl(s
->strtab_base
, l1ptr
, l1_ste_offset
,
449 l2ptr
, l2_ste_offset
, max_l2_ste
);
450 if (l2_ste_offset
> max_l2_ste
) {
451 qemu_log_mask(LOG_GUEST_ERROR
,
452 "l2_ste_offset=%d > max_l2_ste=%d\n",
453 l2_ste_offset
, max_l2_ste
);
454 event
->type
= SMMU_EVT_C_BAD_STE
;
457 addr
= l2ptr
+ l2_ste_offset
* sizeof(*ste
);
459 strtab_size_shift
= log2size
+ 5;
460 strtab_base
= s
->strtab_base
& SMMU_BASE_ADDR_MASK
&
461 ~MAKE_64BIT_MASK(0, strtab_size_shift
);
462 addr
= strtab_base
+ sid
* sizeof(*ste
);
465 if (smmu_get_ste(s
, addr
, ste
, event
)) {
472 static int decode_cd(SMMUTransCfg
*cfg
, CD
*cd
, SMMUEventInfo
*event
)
477 if (!CD_VALID(cd
) || !CD_AARCH64(cd
)) {
481 goto bad_cd
; /* SMMU_IDR0.TERM_MODEL == 1 */
484 goto bad_cd
; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
486 if (CD_HA(cd
) || CD_HD(cd
)) {
487 goto bad_cd
; /* HTTU = 0 */
490 /* we support only those at the moment */
494 cfg
->oas
= oas2bits(CD_IPS(cd
));
495 cfg
->oas
= MIN(oas2bits(SMMU_IDR5_OAS
), cfg
->oas
);
496 cfg
->tbi
= CD_TBI(cd
);
497 cfg
->asid
= CD_ASID(cd
);
499 trace_smmuv3_decode_cd(cfg
->oas
);
501 /* decode data dependent on TT */
502 for (i
= 0; i
<= 1; i
++) {
504 SMMUTransTableInfo
*tt
= &cfg
->tt
[i
];
506 cfg
->tt
[i
].disabled
= CD_EPD(cd
, i
);
507 if (cfg
->tt
[i
].disabled
) {
512 if (tsz
< 16 || tsz
> 39) {
517 tt
->granule_sz
= tg2granule(tg
, i
);
518 if ((tt
->granule_sz
!= 12 && tt
->granule_sz
!= 14 &&
519 tt
->granule_sz
!= 16) || CD_ENDI(cd
)) {
524 tt
->ttb
= CD_TTB(cd
, i
);
525 if (tt
->ttb
& ~(MAKE_64BIT_MASK(0, cfg
->oas
))) {
528 tt
->had
= CD_HAD(cd
, i
);
529 trace_smmuv3_decode_cd_tt(i
, tt
->tsz
, tt
->ttb
, tt
->granule_sz
, tt
->had
);
532 cfg
->record_faults
= CD_R(cd
);
537 event
->type
= SMMU_EVT_C_BAD_CD
;
542 * smmuv3_decode_config - Prepare the translation configuration
543 * for the @mr iommu region
544 * @mr: iommu memory region the translation config must be prepared for
545 * @cfg: output translation configuration which is populated through
546 * the different configuration decoding steps
547 * @event: must be zero'ed by the caller
549 * return < 0 in case of config decoding error (@event is filled
550 * accordingly). Return 0 otherwise.
552 static int smmuv3_decode_config(IOMMUMemoryRegion
*mr
, SMMUTransCfg
*cfg
,
553 SMMUEventInfo
*event
)
555 SMMUDevice
*sdev
= container_of(mr
, SMMUDevice
, iommu
);
556 uint32_t sid
= smmu_get_sid(sdev
);
557 SMMUv3State
*s
= sdev
->smmu
;
562 ret
= smmu_find_ste(s
, sid
, &ste
, event
);
567 ret
= decode_ste(s
, cfg
, &ste
, event
);
572 if (cfg
->aborted
|| cfg
->bypassed
) {
576 ret
= smmu_get_cd(s
, &ste
, 0 /* ssid */, &cd
, event
);
581 return decode_cd(cfg
, &cd
, event
);
585 * smmuv3_get_config - Look up for a cached copy of configuration data for
586 * @sdev and on cache miss performs a configuration structure decoding from
589 * @sdev: SMMUDevice handle
590 * @event: output event info
592 * The configuration cache contains data resulting from both STE and CD
593 * decoding under the form of an SMMUTransCfg struct. The hash table is indexed
594 * by the SMMUDevice handle.
596 static SMMUTransCfg
*smmuv3_get_config(SMMUDevice
*sdev
, SMMUEventInfo
*event
)
598 SMMUv3State
*s
= sdev
->smmu
;
599 SMMUState
*bc
= &s
->smmu_state
;
602 cfg
= g_hash_table_lookup(bc
->configs
, sdev
);
604 sdev
->cfg_cache_hits
++;
605 trace_smmuv3_config_cache_hit(smmu_get_sid(sdev
),
606 sdev
->cfg_cache_hits
, sdev
->cfg_cache_misses
,
607 100 * sdev
->cfg_cache_hits
/
608 (sdev
->cfg_cache_hits
+ sdev
->cfg_cache_misses
));
610 sdev
->cfg_cache_misses
++;
611 trace_smmuv3_config_cache_miss(smmu_get_sid(sdev
),
612 sdev
->cfg_cache_hits
, sdev
->cfg_cache_misses
,
613 100 * sdev
->cfg_cache_hits
/
614 (sdev
->cfg_cache_hits
+ sdev
->cfg_cache_misses
));
615 cfg
= g_new0(SMMUTransCfg
, 1);
617 if (!smmuv3_decode_config(&sdev
->iommu
, cfg
, event
)) {
618 g_hash_table_insert(bc
->configs
, sdev
, cfg
);
627 static void smmuv3_flush_config(SMMUDevice
*sdev
)
629 SMMUv3State
*s
= sdev
->smmu
;
630 SMMUState
*bc
= &s
->smmu_state
;
632 trace_smmuv3_config_cache_inv(smmu_get_sid(sdev
));
633 g_hash_table_remove(bc
->configs
, sdev
);
636 static IOMMUTLBEntry
smmuv3_translate(IOMMUMemoryRegion
*mr
, hwaddr addr
,
637 IOMMUAccessFlags flag
, int iommu_idx
)
639 SMMUDevice
*sdev
= container_of(mr
, SMMUDevice
, iommu
);
640 SMMUv3State
*s
= sdev
->smmu
;
641 uint32_t sid
= smmu_get_sid(sdev
);
642 SMMUEventInfo event
= {.type
= SMMU_EVT_NONE
,
644 .inval_ste_allowed
= false};
645 SMMUPTWEventInfo ptw_info
= {};
646 SMMUTranslationStatus status
;
647 SMMUState
*bs
= ARM_SMMU(s
);
648 uint64_t page_mask
, aligned_addr
;
649 SMMUTLBEntry
*cached_entry
= NULL
;
650 SMMUTransTableInfo
*tt
;
651 SMMUTransCfg
*cfg
= NULL
;
652 IOMMUTLBEntry entry
= {
653 .target_as
= &address_space_memory
,
655 .translated_addr
= addr
,
656 .addr_mask
= ~(hwaddr
)0,
660 qemu_mutex_lock(&s
->mutex
);
662 if (!smmu_enabled(s
)) {
663 if (FIELD_EX32(s
->gbpa
, GBPA
, ABORT
)) {
664 status
= SMMU_TRANS_ABORT
;
666 status
= SMMU_TRANS_DISABLE
;
671 cfg
= smmuv3_get_config(sdev
, &event
);
673 status
= SMMU_TRANS_ERROR
;
678 status
= SMMU_TRANS_ABORT
;
683 status
= SMMU_TRANS_BYPASS
;
687 tt
= select_tt(cfg
, addr
);
689 if (cfg
->record_faults
) {
690 event
.type
= SMMU_EVT_F_TRANSLATION
;
691 event
.u
.f_translation
.addr
= addr
;
692 event
.u
.f_translation
.rnw
= flag
& 0x1;
694 status
= SMMU_TRANS_ERROR
;
698 page_mask
= (1ULL << (tt
->granule_sz
)) - 1;
699 aligned_addr
= addr
& ~page_mask
;
701 cached_entry
= smmu_iotlb_lookup(bs
, cfg
, tt
, aligned_addr
);
703 if ((flag
& IOMMU_WO
) && !(cached_entry
->entry
.perm
& IOMMU_WO
)) {
704 status
= SMMU_TRANS_ERROR
;
705 if (cfg
->record_faults
) {
706 event
.type
= SMMU_EVT_F_PERMISSION
;
707 event
.u
.f_permission
.addr
= addr
;
708 event
.u
.f_permission
.rnw
= flag
& 0x1;
711 status
= SMMU_TRANS_SUCCESS
;
716 cached_entry
= g_new0(SMMUTLBEntry
, 1);
718 if (smmu_ptw(cfg
, aligned_addr
, flag
, cached_entry
, &ptw_info
)) {
719 g_free(cached_entry
);
720 switch (ptw_info
.type
) {
721 case SMMU_PTW_ERR_WALK_EABT
:
722 event
.type
= SMMU_EVT_F_WALK_EABT
;
723 event
.u
.f_walk_eabt
.addr
= addr
;
724 event
.u
.f_walk_eabt
.rnw
= flag
& 0x1;
725 event
.u
.f_walk_eabt
.class = 0x1;
726 event
.u
.f_walk_eabt
.addr2
= ptw_info
.addr
;
728 case SMMU_PTW_ERR_TRANSLATION
:
729 if (cfg
->record_faults
) {
730 event
.type
= SMMU_EVT_F_TRANSLATION
;
731 event
.u
.f_translation
.addr
= addr
;
732 event
.u
.f_translation
.rnw
= flag
& 0x1;
735 case SMMU_PTW_ERR_ADDR_SIZE
:
736 if (cfg
->record_faults
) {
737 event
.type
= SMMU_EVT_F_ADDR_SIZE
;
738 event
.u
.f_addr_size
.addr
= addr
;
739 event
.u
.f_addr_size
.rnw
= flag
& 0x1;
742 case SMMU_PTW_ERR_ACCESS
:
743 if (cfg
->record_faults
) {
744 event
.type
= SMMU_EVT_F_ACCESS
;
745 event
.u
.f_access
.addr
= addr
;
746 event
.u
.f_access
.rnw
= flag
& 0x1;
749 case SMMU_PTW_ERR_PERMISSION
:
750 if (cfg
->record_faults
) {
751 event
.type
= SMMU_EVT_F_PERMISSION
;
752 event
.u
.f_permission
.addr
= addr
;
753 event
.u
.f_permission
.rnw
= flag
& 0x1;
757 g_assert_not_reached();
759 status
= SMMU_TRANS_ERROR
;
761 smmu_iotlb_insert(bs
, cfg
, cached_entry
);
762 status
= SMMU_TRANS_SUCCESS
;
766 qemu_mutex_unlock(&s
->mutex
);
768 case SMMU_TRANS_SUCCESS
:
769 entry
.perm
= cached_entry
->entry
.perm
;
770 entry
.translated_addr
= cached_entry
->entry
.translated_addr
+
771 (addr
& cached_entry
->entry
.addr_mask
);
772 entry
.addr_mask
= cached_entry
->entry
.addr_mask
;
773 trace_smmuv3_translate_success(mr
->parent_obj
.name
, sid
, addr
,
774 entry
.translated_addr
, entry
.perm
);
776 case SMMU_TRANS_DISABLE
:
778 entry
.addr_mask
= ~TARGET_PAGE_MASK
;
779 trace_smmuv3_translate_disable(mr
->parent_obj
.name
, sid
, addr
,
782 case SMMU_TRANS_BYPASS
:
784 entry
.addr_mask
= ~TARGET_PAGE_MASK
;
785 trace_smmuv3_translate_bypass(mr
->parent_obj
.name
, sid
, addr
,
788 case SMMU_TRANS_ABORT
:
789 /* no event is recorded on abort */
790 trace_smmuv3_translate_abort(mr
->parent_obj
.name
, sid
, addr
,
793 case SMMU_TRANS_ERROR
:
794 qemu_log_mask(LOG_GUEST_ERROR
,
795 "%s translation failed for iova=0x%"PRIx64
" (%s)\n",
796 mr
->parent_obj
.name
, addr
, smmu_event_string(event
.type
));
797 smmuv3_record_event(s
, &event
);
805 * smmuv3_notify_iova - call the notifier @n for a given
806 * @asid and @iova tuple.
808 * @mr: IOMMU mr region handle
809 * @n: notifier to be called
810 * @asid: address space ID or negative value if we don't care
812 * @tg: translation granule (if communicated through range invalidation)
813 * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1
815 static void smmuv3_notify_iova(IOMMUMemoryRegion
*mr
,
817 int asid
, dma_addr_t iova
,
818 uint8_t tg
, uint64_t num_pages
)
820 SMMUDevice
*sdev
= container_of(mr
, SMMUDevice
, iommu
);
825 SMMUEventInfo event
= {.inval_ste_allowed
= true};
826 SMMUTransCfg
*cfg
= smmuv3_get_config(sdev
, &event
);
827 SMMUTransTableInfo
*tt
;
833 if (asid
>= 0 && cfg
->asid
!= asid
) {
837 tt
= select_tt(cfg
, iova
);
841 granule
= tt
->granule_sz
;
843 granule
= tg
* 2 + 10;
846 event
.type
= IOMMU_NOTIFIER_UNMAP
;
847 event
.entry
.target_as
= &address_space_memory
;
848 event
.entry
.iova
= iova
;
849 event
.entry
.addr_mask
= num_pages
* (1 << granule
) - 1;
850 event
.entry
.perm
= IOMMU_NONE
;
852 memory_region_notify_iommu_one(n
, &event
);
855 /* invalidate an asid/iova range tuple in all mr's */
856 static void smmuv3_inv_notifiers_iova(SMMUState
*s
, int asid
, dma_addr_t iova
,
857 uint8_t tg
, uint64_t num_pages
)
861 QLIST_FOREACH(sdev
, &s
->devices_with_notifiers
, next
) {
862 IOMMUMemoryRegion
*mr
= &sdev
->iommu
;
865 trace_smmuv3_inv_notifiers_iova(mr
->parent_obj
.name
, asid
, iova
,
868 IOMMU_NOTIFIER_FOREACH(n
, mr
) {
869 smmuv3_notify_iova(mr
, n
, asid
, iova
, tg
, num_pages
);
874 static void smmuv3_s1_range_inval(SMMUState
*s
, Cmd
*cmd
)
876 dma_addr_t end
, addr
= CMD_ADDR(cmd
);
877 uint8_t type
= CMD_TYPE(cmd
);
878 uint16_t vmid
= CMD_VMID(cmd
);
879 uint8_t scale
= CMD_SCALE(cmd
);
880 uint8_t num
= CMD_NUM(cmd
);
881 uint8_t ttl
= CMD_TTL(cmd
);
882 bool leaf
= CMD_LEAF(cmd
);
883 uint8_t tg
= CMD_TG(cmd
);
888 if (type
== SMMU_CMD_TLBI_NH_VA
) {
889 asid
= CMD_ASID(cmd
);
893 trace_smmuv3_s1_range_inval(vmid
, asid
, addr
, tg
, 1, ttl
, leaf
);
894 smmuv3_inv_notifiers_iova(s
, asid
, addr
, tg
, 1);
895 smmu_iotlb_inv_iova(s
, asid
, addr
, tg
, 1, ttl
);
901 num_pages
= (num
+ 1) * BIT_ULL(scale
);
902 granule
= tg
* 2 + 10;
904 /* Split invalidations into ^2 range invalidations */
905 end
= addr
+ (num_pages
<< granule
) - 1;
907 while (addr
!= end
+ 1) {
908 uint64_t mask
= dma_aligned_pow2_mask(addr
, end
, 64);
910 num_pages
= (mask
+ 1) >> granule
;
911 trace_smmuv3_s1_range_inval(vmid
, asid
, addr
, tg
, num_pages
, ttl
, leaf
);
912 smmuv3_inv_notifiers_iova(s
, asid
, addr
, tg
, num_pages
);
913 smmu_iotlb_inv_iova(s
, asid
, addr
, tg
, num_pages
, ttl
);
919 smmuv3_invalidate_ste(gpointer key
, gpointer value
, gpointer user_data
)
921 SMMUDevice
*sdev
= (SMMUDevice
*)key
;
922 uint32_t sid
= smmu_get_sid(sdev
);
923 SMMUSIDRange
*sid_range
= (SMMUSIDRange
*)user_data
;
925 if (sid
< sid_range
->start
|| sid
> sid_range
->end
) {
928 trace_smmuv3_config_cache_inv(sid
);
932 static int smmuv3_cmdq_consume(SMMUv3State
*s
)
934 SMMUState
*bs
= ARM_SMMU(s
);
935 SMMUCmdError cmd_error
= SMMU_CERROR_NONE
;
936 SMMUQueue
*q
= &s
->cmdq
;
937 SMMUCommandType type
= 0;
939 if (!smmuv3_cmdq_enabled(s
)) {
943 * some commands depend on register values, typically CR0. In case those
944 * register values change while handling the command, spec says it
945 * is UNPREDICTABLE whether the command is interpreted under the new
949 while (!smmuv3_q_empty(q
)) {
950 uint32_t pending
= s
->gerror
^ s
->gerrorn
;
953 trace_smmuv3_cmdq_consume(Q_PROD(q
), Q_CONS(q
),
954 Q_PROD_WRAP(q
), Q_CONS_WRAP(q
));
956 if (FIELD_EX32(pending
, GERROR
, CMDQ_ERR
)) {
960 if (queue_read(q
, &cmd
) != MEMTX_OK
) {
961 cmd_error
= SMMU_CERROR_ABT
;
965 type
= CMD_TYPE(&cmd
);
967 trace_smmuv3_cmdq_opcode(smmu_cmd_string(type
));
969 qemu_mutex_lock(&s
->mutex
);
972 if (CMD_SYNC_CS(&cmd
) & CMD_SYNC_SIG_IRQ
) {
973 smmuv3_trigger_irq(s
, SMMU_IRQ_CMD_SYNC
, 0);
976 case SMMU_CMD_PREFETCH_CONFIG
:
977 case SMMU_CMD_PREFETCH_ADDR
:
979 case SMMU_CMD_CFGI_STE
:
981 uint32_t sid
= CMD_SID(&cmd
);
982 IOMMUMemoryRegion
*mr
= smmu_iommu_mr(bs
, sid
);
985 if (CMD_SSEC(&cmd
)) {
986 cmd_error
= SMMU_CERROR_ILL
;
994 trace_smmuv3_cmdq_cfgi_ste(sid
);
995 sdev
= container_of(mr
, SMMUDevice
, iommu
);
996 smmuv3_flush_config(sdev
);
1000 case SMMU_CMD_CFGI_STE_RANGE
: /* same as SMMU_CMD_CFGI_ALL */
1002 uint32_t sid
= CMD_SID(&cmd
), mask
;
1003 uint8_t range
= CMD_STE_RANGE(&cmd
);
1004 SMMUSIDRange sid_range
;
1006 if (CMD_SSEC(&cmd
)) {
1007 cmd_error
= SMMU_CERROR_ILL
;
1011 mask
= (1ULL << (range
+ 1)) - 1;
1012 sid_range
.start
= sid
& ~mask
;
1013 sid_range
.end
= sid_range
.start
+ mask
;
1015 trace_smmuv3_cmdq_cfgi_ste_range(sid_range
.start
, sid_range
.end
);
1016 g_hash_table_foreach_remove(bs
->configs
, smmuv3_invalidate_ste
,
1020 case SMMU_CMD_CFGI_CD
:
1021 case SMMU_CMD_CFGI_CD_ALL
:
1023 uint32_t sid
= CMD_SID(&cmd
);
1024 IOMMUMemoryRegion
*mr
= smmu_iommu_mr(bs
, sid
);
1027 if (CMD_SSEC(&cmd
)) {
1028 cmd_error
= SMMU_CERROR_ILL
;
1036 trace_smmuv3_cmdq_cfgi_cd(sid
);
1037 sdev
= container_of(mr
, SMMUDevice
, iommu
);
1038 smmuv3_flush_config(sdev
);
1041 case SMMU_CMD_TLBI_NH_ASID
:
1043 uint16_t asid
= CMD_ASID(&cmd
);
1045 trace_smmuv3_cmdq_tlbi_nh_asid(asid
);
1046 smmu_inv_notifiers_all(&s
->smmu_state
);
1047 smmu_iotlb_inv_asid(bs
, asid
);
1050 case SMMU_CMD_TLBI_NH_ALL
:
1051 case SMMU_CMD_TLBI_NSNH_ALL
:
1052 trace_smmuv3_cmdq_tlbi_nh();
1053 smmu_inv_notifiers_all(&s
->smmu_state
);
1054 smmu_iotlb_inv_all(bs
);
1056 case SMMU_CMD_TLBI_NH_VAA
:
1057 case SMMU_CMD_TLBI_NH_VA
:
1058 smmuv3_s1_range_inval(bs
, &cmd
);
1060 case SMMU_CMD_TLBI_EL3_ALL
:
1061 case SMMU_CMD_TLBI_EL3_VA
:
1062 case SMMU_CMD_TLBI_EL2_ALL
:
1063 case SMMU_CMD_TLBI_EL2_ASID
:
1064 case SMMU_CMD_TLBI_EL2_VA
:
1065 case SMMU_CMD_TLBI_EL2_VAA
:
1066 case SMMU_CMD_TLBI_S12_VMALL
:
1067 case SMMU_CMD_TLBI_S2_IPA
:
1068 case SMMU_CMD_ATC_INV
:
1069 case SMMU_CMD_PRI_RESP
:
1070 case SMMU_CMD_RESUME
:
1071 case SMMU_CMD_STALL_TERM
:
1072 trace_smmuv3_unhandled_cmd(type
);
1075 cmd_error
= SMMU_CERROR_ILL
;
1076 qemu_log_mask(LOG_GUEST_ERROR
,
1077 "Illegal command type: %d\n", CMD_TYPE(&cmd
));
1080 qemu_mutex_unlock(&s
->mutex
);
1085 * We only increment the cons index after the completion of
1086 * the command. We do that because the SYNC returns immediately
1087 * and does not check the completion of previous commands
1093 trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type
), cmd_error
);
1094 smmu_write_cmdq_err(s
, cmd_error
);
1095 smmuv3_trigger_irq(s
, SMMU_IRQ_GERROR
, R_GERROR_CMDQ_ERR_MASK
);
1098 trace_smmuv3_cmdq_consume_out(Q_PROD(q
), Q_CONS(q
),
1099 Q_PROD_WRAP(q
), Q_CONS_WRAP(q
));
1104 static MemTxResult
smmu_writell(SMMUv3State
*s
, hwaddr offset
,
1105 uint64_t data
, MemTxAttrs attrs
)
1108 case A_GERROR_IRQ_CFG0
:
1109 s
->gerror_irq_cfg0
= data
;
1112 s
->strtab_base
= data
;
1115 s
->cmdq
.base
= data
;
1116 s
->cmdq
.log2size
= extract64(s
->cmdq
.base
, 0, 5);
1117 if (s
->cmdq
.log2size
> SMMU_CMDQS
) {
1118 s
->cmdq
.log2size
= SMMU_CMDQS
;
1122 s
->eventq
.base
= data
;
1123 s
->eventq
.log2size
= extract64(s
->eventq
.base
, 0, 5);
1124 if (s
->eventq
.log2size
> SMMU_EVENTQS
) {
1125 s
->eventq
.log2size
= SMMU_EVENTQS
;
1128 case A_EVENTQ_IRQ_CFG0
:
1129 s
->eventq_irq_cfg0
= data
;
1132 qemu_log_mask(LOG_UNIMP
,
1133 "%s Unexpected 64-bit access to 0x%"PRIx64
" (WI)\n",
1139 static MemTxResult
smmu_writel(SMMUv3State
*s
, hwaddr offset
,
1140 uint64_t data
, MemTxAttrs attrs
)
1145 s
->cr0ack
= data
& ~SMMU_CR0_RESERVED
;
1146 /* in case the command queue has been enabled */
1147 smmuv3_cmdq_consume(s
);
1159 smmuv3_write_gerrorn(s
, data
);
1161 * By acknowledging the CMDQ_ERR, SW may notify cmds can
1162 * be processed again
1164 smmuv3_cmdq_consume(s
);
1166 case A_GERROR_IRQ_CFG0
: /* 64b */
1167 s
->gerror_irq_cfg0
= deposit64(s
->gerror_irq_cfg0
, 0, 32, data
);
1169 case A_GERROR_IRQ_CFG0
+ 4:
1170 s
->gerror_irq_cfg0
= deposit64(s
->gerror_irq_cfg0
, 32, 32, data
);
1172 case A_GERROR_IRQ_CFG1
:
1173 s
->gerror_irq_cfg1
= data
;
1175 case A_GERROR_IRQ_CFG2
:
1176 s
->gerror_irq_cfg2
= data
;
1180 * If UPDATE is not set, the write is ignored. This is the only
1181 * permitted behavior in SMMUv3.2 and later.
1183 if (data
& R_GBPA_UPDATE_MASK
) {
1184 /* Ignore update bit as write is synchronous. */
1185 s
->gbpa
= data
& ~R_GBPA_UPDATE_MASK
;
1188 case A_STRTAB_BASE
: /* 64b */
1189 s
->strtab_base
= deposit64(s
->strtab_base
, 0, 32, data
);
1191 case A_STRTAB_BASE
+ 4:
1192 s
->strtab_base
= deposit64(s
->strtab_base
, 32, 32, data
);
1194 case A_STRTAB_BASE_CFG
:
1195 s
->strtab_base_cfg
= data
;
1196 if (FIELD_EX32(data
, STRTAB_BASE_CFG
, FMT
) == 1) {
1197 s
->sid_split
= FIELD_EX32(data
, STRTAB_BASE_CFG
, SPLIT
);
1198 s
->features
|= SMMU_FEATURE_2LVL_STE
;
1201 case A_CMDQ_BASE
: /* 64b */
1202 s
->cmdq
.base
= deposit64(s
->cmdq
.base
, 0, 32, data
);
1203 s
->cmdq
.log2size
= extract64(s
->cmdq
.base
, 0, 5);
1204 if (s
->cmdq
.log2size
> SMMU_CMDQS
) {
1205 s
->cmdq
.log2size
= SMMU_CMDQS
;
1208 case A_CMDQ_BASE
+ 4: /* 64b */
1209 s
->cmdq
.base
= deposit64(s
->cmdq
.base
, 32, 32, data
);
1212 s
->cmdq
.prod
= data
;
1213 smmuv3_cmdq_consume(s
);
1216 s
->cmdq
.cons
= data
;
1218 case A_EVENTQ_BASE
: /* 64b */
1219 s
->eventq
.base
= deposit64(s
->eventq
.base
, 0, 32, data
);
1220 s
->eventq
.log2size
= extract64(s
->eventq
.base
, 0, 5);
1221 if (s
->eventq
.log2size
> SMMU_EVENTQS
) {
1222 s
->eventq
.log2size
= SMMU_EVENTQS
;
1225 case A_EVENTQ_BASE
+ 4:
1226 s
->eventq
.base
= deposit64(s
->eventq
.base
, 32, 32, data
);
1229 s
->eventq
.prod
= data
;
1232 s
->eventq
.cons
= data
;
1234 case A_EVENTQ_IRQ_CFG0
: /* 64b */
1235 s
->eventq_irq_cfg0
= deposit64(s
->eventq_irq_cfg0
, 0, 32, data
);
1237 case A_EVENTQ_IRQ_CFG0
+ 4:
1238 s
->eventq_irq_cfg0
= deposit64(s
->eventq_irq_cfg0
, 32, 32, data
);
1240 case A_EVENTQ_IRQ_CFG1
:
1241 s
->eventq_irq_cfg1
= data
;
1243 case A_EVENTQ_IRQ_CFG2
:
1244 s
->eventq_irq_cfg2
= data
;
1247 qemu_log_mask(LOG_UNIMP
,
1248 "%s Unexpected 32-bit access to 0x%"PRIx64
" (WI)\n",
1254 static MemTxResult
smmu_write_mmio(void *opaque
, hwaddr offset
, uint64_t data
,
1255 unsigned size
, MemTxAttrs attrs
)
1257 SMMUState
*sys
= opaque
;
1258 SMMUv3State
*s
= ARM_SMMUV3(sys
);
1261 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1266 r
= smmu_writell(s
, offset
, data
, attrs
);
1269 r
= smmu_writel(s
, offset
, data
, attrs
);
1276 trace_smmuv3_write_mmio(offset
, data
, size
, r
);
1280 static MemTxResult
smmu_readll(SMMUv3State
*s
, hwaddr offset
,
1281 uint64_t *data
, MemTxAttrs attrs
)
1284 case A_GERROR_IRQ_CFG0
:
1285 *data
= s
->gerror_irq_cfg0
;
1288 *data
= s
->strtab_base
;
1291 *data
= s
->cmdq
.base
;
1294 *data
= s
->eventq
.base
;
1298 qemu_log_mask(LOG_UNIMP
,
1299 "%s Unexpected 64-bit access to 0x%"PRIx64
" (RAZ)\n",
1305 static MemTxResult
smmu_readl(SMMUv3State
*s
, hwaddr offset
,
1306 uint64_t *data
, MemTxAttrs attrs
)
1309 case A_IDREGS
... A_IDREGS
+ 0x2f:
1310 *data
= smmuv3_idreg(offset
- A_IDREGS
);
1312 case A_IDR0
... A_IDR5
:
1313 *data
= s
->idr
[(offset
- A_IDR0
) / 4];
1340 case A_IRQ_CTRL_ACK
:
1341 *data
= s
->irq_ctrl
;
1349 case A_GERROR_IRQ_CFG0
: /* 64b */
1350 *data
= extract64(s
->gerror_irq_cfg0
, 0, 32);
1352 case A_GERROR_IRQ_CFG0
+ 4:
1353 *data
= extract64(s
->gerror_irq_cfg0
, 32, 32);
1355 case A_GERROR_IRQ_CFG1
:
1356 *data
= s
->gerror_irq_cfg1
;
1358 case A_GERROR_IRQ_CFG2
:
1359 *data
= s
->gerror_irq_cfg2
;
1361 case A_STRTAB_BASE
: /* 64b */
1362 *data
= extract64(s
->strtab_base
, 0, 32);
1364 case A_STRTAB_BASE
+ 4: /* 64b */
1365 *data
= extract64(s
->strtab_base
, 32, 32);
1367 case A_STRTAB_BASE_CFG
:
1368 *data
= s
->strtab_base_cfg
;
1370 case A_CMDQ_BASE
: /* 64b */
1371 *data
= extract64(s
->cmdq
.base
, 0, 32);
1373 case A_CMDQ_BASE
+ 4:
1374 *data
= extract64(s
->cmdq
.base
, 32, 32);
1377 *data
= s
->cmdq
.prod
;
1380 *data
= s
->cmdq
.cons
;
1382 case A_EVENTQ_BASE
: /* 64b */
1383 *data
= extract64(s
->eventq
.base
, 0, 32);
1385 case A_EVENTQ_BASE
+ 4: /* 64b */
1386 *data
= extract64(s
->eventq
.base
, 32, 32);
1389 *data
= s
->eventq
.prod
;
1392 *data
= s
->eventq
.cons
;
1396 qemu_log_mask(LOG_UNIMP
,
1397 "%s unhandled 32-bit access at 0x%"PRIx64
" (RAZ)\n",
1403 static MemTxResult
smmu_read_mmio(void *opaque
, hwaddr offset
, uint64_t *data
,
1404 unsigned size
, MemTxAttrs attrs
)
1406 SMMUState
*sys
= opaque
;
1407 SMMUv3State
*s
= ARM_SMMUV3(sys
);
1410 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1415 r
= smmu_readll(s
, offset
, data
, attrs
);
1418 r
= smmu_readl(s
, offset
, data
, attrs
);
1425 trace_smmuv3_read_mmio(offset
, *data
, size
, r
);
1429 static const MemoryRegionOps smmu_mem_ops
= {
1430 .read_with_attrs
= smmu_read_mmio
,
1431 .write_with_attrs
= smmu_write_mmio
,
1432 .endianness
= DEVICE_LITTLE_ENDIAN
,
1434 .min_access_size
= 4,
1435 .max_access_size
= 8,
1438 .min_access_size
= 4,
1439 .max_access_size
= 8,
1443 static void smmu_init_irq(SMMUv3State
*s
, SysBusDevice
*dev
)
1447 for (i
= 0; i
< ARRAY_SIZE(s
->irq
); i
++) {
1448 sysbus_init_irq(dev
, &s
->irq
[i
]);
1452 static void smmu_reset_hold(Object
*obj
)
1454 SMMUv3State
*s
= ARM_SMMUV3(obj
);
1455 SMMUv3Class
*c
= ARM_SMMUV3_GET_CLASS(s
);
1457 if (c
->parent_phases
.hold
) {
1458 c
->parent_phases
.hold(obj
);
1461 smmuv3_init_regs(s
);
1464 static void smmu_realize(DeviceState
*d
, Error
**errp
)
1466 SMMUState
*sys
= ARM_SMMU(d
);
1467 SMMUv3State
*s
= ARM_SMMUV3(sys
);
1468 SMMUv3Class
*c
= ARM_SMMUV3_GET_CLASS(s
);
1469 SysBusDevice
*dev
= SYS_BUS_DEVICE(d
);
1470 Error
*local_err
= NULL
;
1472 c
->parent_realize(d
, &local_err
);
1474 error_propagate(errp
, local_err
);
1478 qemu_mutex_init(&s
->mutex
);
1480 memory_region_init_io(&sys
->iomem
, OBJECT(s
),
1481 &smmu_mem_ops
, sys
, TYPE_ARM_SMMUV3
, 0x20000);
1483 sys
->mrtypename
= TYPE_SMMUV3_IOMMU_MEMORY_REGION
;
1485 sysbus_init_mmio(dev
, &sys
->iomem
);
1487 smmu_init_irq(s
, dev
);
1490 static const VMStateDescription vmstate_smmuv3_queue
= {
1491 .name
= "smmuv3_queue",
1493 .minimum_version_id
= 1,
1494 .fields
= (VMStateField
[]) {
1495 VMSTATE_UINT64(base
, SMMUQueue
),
1496 VMSTATE_UINT32(prod
, SMMUQueue
),
1497 VMSTATE_UINT32(cons
, SMMUQueue
),
1498 VMSTATE_UINT8(log2size
, SMMUQueue
),
1499 VMSTATE_END_OF_LIST(),
1503 static bool smmuv3_gbpa_needed(void *opaque
)
1505 SMMUv3State
*s
= opaque
;
1507 /* Only migrate GBPA if it has different reset value. */
1508 return s
->gbpa
!= SMMU_GBPA_RESET_VAL
;
1511 static const VMStateDescription vmstate_gbpa
= {
1512 .name
= "smmuv3/gbpa",
1514 .minimum_version_id
= 1,
1515 .needed
= smmuv3_gbpa_needed
,
1516 .fields
= (VMStateField
[]) {
1517 VMSTATE_UINT32(gbpa
, SMMUv3State
),
1518 VMSTATE_END_OF_LIST()
1522 static const VMStateDescription vmstate_smmuv3
= {
1525 .minimum_version_id
= 1,
1526 .priority
= MIG_PRI_IOMMU
,
1527 .fields
= (VMStateField
[]) {
1528 VMSTATE_UINT32(features
, SMMUv3State
),
1529 VMSTATE_UINT8(sid_size
, SMMUv3State
),
1530 VMSTATE_UINT8(sid_split
, SMMUv3State
),
1532 VMSTATE_UINT32_ARRAY(cr
, SMMUv3State
, 3),
1533 VMSTATE_UINT32(cr0ack
, SMMUv3State
),
1534 VMSTATE_UINT32(statusr
, SMMUv3State
),
1535 VMSTATE_UINT32(irq_ctrl
, SMMUv3State
),
1536 VMSTATE_UINT32(gerror
, SMMUv3State
),
1537 VMSTATE_UINT32(gerrorn
, SMMUv3State
),
1538 VMSTATE_UINT64(gerror_irq_cfg0
, SMMUv3State
),
1539 VMSTATE_UINT32(gerror_irq_cfg1
, SMMUv3State
),
1540 VMSTATE_UINT32(gerror_irq_cfg2
, SMMUv3State
),
1541 VMSTATE_UINT64(strtab_base
, SMMUv3State
),
1542 VMSTATE_UINT32(strtab_base_cfg
, SMMUv3State
),
1543 VMSTATE_UINT64(eventq_irq_cfg0
, SMMUv3State
),
1544 VMSTATE_UINT32(eventq_irq_cfg1
, SMMUv3State
),
1545 VMSTATE_UINT32(eventq_irq_cfg2
, SMMUv3State
),
1547 VMSTATE_STRUCT(cmdq
, SMMUv3State
, 0, vmstate_smmuv3_queue
, SMMUQueue
),
1548 VMSTATE_STRUCT(eventq
, SMMUv3State
, 0, vmstate_smmuv3_queue
, SMMUQueue
),
1550 VMSTATE_END_OF_LIST(),
1552 .subsections
= (const VMStateDescription
* []) {
1558 static void smmuv3_instance_init(Object
*obj
)
1560 /* Nothing much to do here as of now */
1563 static void smmuv3_class_init(ObjectClass
*klass
, void *data
)
1565 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1566 ResettableClass
*rc
= RESETTABLE_CLASS(klass
);
1567 SMMUv3Class
*c
= ARM_SMMUV3_CLASS(klass
);
1569 dc
->vmsd
= &vmstate_smmuv3
;
1570 resettable_class_set_parent_phases(rc
, NULL
, smmu_reset_hold
, NULL
,
1572 c
->parent_realize
= dc
->realize
;
1573 dc
->realize
= smmu_realize
;
1576 static int smmuv3_notify_flag_changed(IOMMUMemoryRegion
*iommu
,
1577 IOMMUNotifierFlag old
,
1578 IOMMUNotifierFlag
new,
1581 SMMUDevice
*sdev
= container_of(iommu
, SMMUDevice
, iommu
);
1582 SMMUv3State
*s3
= sdev
->smmu
;
1583 SMMUState
*s
= &(s3
->smmu_state
);
1585 if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP
) {
1586 error_setg(errp
, "SMMUv3 does not support dev-iotlb yet");
1590 if (new & IOMMU_NOTIFIER_MAP
) {
1592 "device %02x.%02x.%x requires iommu MAP notifier which is "
1593 "not currently supported", pci_bus_num(sdev
->bus
),
1594 PCI_SLOT(sdev
->devfn
), PCI_FUNC(sdev
->devfn
));
1598 if (old
== IOMMU_NOTIFIER_NONE
) {
1599 trace_smmuv3_notify_flag_add(iommu
->parent_obj
.name
);
1600 QLIST_INSERT_HEAD(&s
->devices_with_notifiers
, sdev
, next
);
1601 } else if (new == IOMMU_NOTIFIER_NONE
) {
1602 trace_smmuv3_notify_flag_del(iommu
->parent_obj
.name
);
1603 QLIST_REMOVE(sdev
, next
);
1608 static void smmuv3_iommu_memory_region_class_init(ObjectClass
*klass
,
1611 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_CLASS(klass
);
1613 imrc
->translate
= smmuv3_translate
;
1614 imrc
->notify_flag_changed
= smmuv3_notify_flag_changed
;
1617 static const TypeInfo smmuv3_type_info
= {
1618 .name
= TYPE_ARM_SMMUV3
,
1619 .parent
= TYPE_ARM_SMMU
,
1620 .instance_size
= sizeof(SMMUv3State
),
1621 .instance_init
= smmuv3_instance_init
,
1622 .class_size
= sizeof(SMMUv3Class
),
1623 .class_init
= smmuv3_class_init
,
1626 static const TypeInfo smmuv3_iommu_memory_region_info
= {
1627 .parent
= TYPE_IOMMU_MEMORY_REGION
,
1628 .name
= TYPE_SMMUV3_IOMMU_MEMORY_REGION
,
1629 .class_init
= smmuv3_iommu_memory_region_class_init
,
1632 static void smmuv3_register_types(void)
1634 type_register(&smmuv3_type_info
);
1635 type_register(&smmuv3_iommu_memory_region_info
);
1638 type_init(smmuv3_register_types
)