2 * Copyright (C) 2014-2016 Broadcom Corporation
3 * Copyright (c) 2017 Red Hat, Inc.
4 * Written by Prem Mallappa, Eric Auger
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "hw/boards.h"
21 #include "sysemu/sysemu.h"
22 #include "hw/sysbus.h"
23 #include "hw/qdev-core.h"
24 #include "hw/pci/pci.h"
25 #include "exec/address-spaces.h"
29 #include "qemu/error-report.h"
30 #include "qapi/error.h"
32 #include "hw/arm/smmuv3.h"
33 #include "smmuv3-internal.h"
36 * smmuv3_trigger_irq - pulse @irq if enabled and update
37 * GERROR register in case of GERROR interrupt
40 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
42 static void smmuv3_trigger_irq(SMMUv3State
*s
, SMMUIrq irq
,
50 pulse
= smmuv3_eventq_irq_enabled(s
);
53 qemu_log_mask(LOG_UNIMP
, "PRI not yet supported\n");
55 case SMMU_IRQ_CMD_SYNC
:
60 uint32_t pending
= s
->gerror
^ s
->gerrorn
;
61 uint32_t new_gerrors
= ~pending
& gerror_mask
;
64 /* only toggle non pending errors */
67 s
->gerror
^= new_gerrors
;
68 trace_smmuv3_write_gerror(new_gerrors
, s
->gerror
);
70 pulse
= smmuv3_gerror_irq_enabled(s
);
75 trace_smmuv3_trigger_irq(irq
);
76 qemu_irq_pulse(s
->irq
[irq
]);
80 static void smmuv3_write_gerrorn(SMMUv3State
*s
, uint32_t new_gerrorn
)
82 uint32_t pending
= s
->gerror
^ s
->gerrorn
;
83 uint32_t toggled
= s
->gerrorn
^ new_gerrorn
;
85 if (toggled
& ~pending
) {
86 qemu_log_mask(LOG_GUEST_ERROR
,
87 "guest toggles non pending errors = 0x%x\n",
92 * We do not raise any error in case guest toggles bits corresponding
93 * to not active IRQs (CONSTRAINED UNPREDICTABLE)
95 s
->gerrorn
= new_gerrorn
;
97 trace_smmuv3_write_gerrorn(toggled
& pending
, s
->gerrorn
);
100 static inline MemTxResult
queue_read(SMMUQueue
*q
, void *data
)
102 dma_addr_t addr
= Q_CONS_ENTRY(q
);
104 return dma_memory_read(&address_space_memory
, addr
, data
, q
->entry_size
);
107 static MemTxResult
queue_write(SMMUQueue
*q
, void *data
)
109 dma_addr_t addr
= Q_PROD_ENTRY(q
);
112 ret
= dma_memory_write(&address_space_memory
, addr
, data
, q
->entry_size
);
113 if (ret
!= MEMTX_OK
) {
121 static MemTxResult
smmuv3_write_eventq(SMMUv3State
*s
, Evt
*evt
)
123 SMMUQueue
*q
= &s
->eventq
;
126 if (!smmuv3_eventq_enabled(s
)) {
130 if (smmuv3_q_full(q
)) {
134 r
= queue_write(q
, evt
);
139 if (!smmuv3_q_empty(q
)) {
140 smmuv3_trigger_irq(s
, SMMU_IRQ_EVTQ
, 0);
145 void smmuv3_record_event(SMMUv3State
*s
, SMMUEventInfo
*info
)
150 if (!smmuv3_eventq_enabled(s
)) {
154 EVT_SET_TYPE(&evt
, info
->type
);
155 EVT_SET_SID(&evt
, info
->sid
);
157 switch (info
->type
) {
161 EVT_SET_SSID(&evt
, info
->u
.f_uut
.ssid
);
162 EVT_SET_SSV(&evt
, info
->u
.f_uut
.ssv
);
163 EVT_SET_ADDR(&evt
, info
->u
.f_uut
.addr
);
164 EVT_SET_RNW(&evt
, info
->u
.f_uut
.rnw
);
165 EVT_SET_PNU(&evt
, info
->u
.f_uut
.pnu
);
166 EVT_SET_IND(&evt
, info
->u
.f_uut
.ind
);
168 case SMMU_EVT_C_BAD_STREAMID
:
169 EVT_SET_SSID(&evt
, info
->u
.c_bad_streamid
.ssid
);
170 EVT_SET_SSV(&evt
, info
->u
.c_bad_streamid
.ssv
);
172 case SMMU_EVT_F_STE_FETCH
:
173 EVT_SET_SSID(&evt
, info
->u
.f_ste_fetch
.ssid
);
174 EVT_SET_SSV(&evt
, info
->u
.f_ste_fetch
.ssv
);
175 EVT_SET_ADDR(&evt
, info
->u
.f_ste_fetch
.addr
);
177 case SMMU_EVT_C_BAD_STE
:
178 EVT_SET_SSID(&evt
, info
->u
.c_bad_ste
.ssid
);
179 EVT_SET_SSV(&evt
, info
->u
.c_bad_ste
.ssv
);
181 case SMMU_EVT_F_STREAM_DISABLED
:
183 case SMMU_EVT_F_TRANS_FORBIDDEN
:
184 EVT_SET_ADDR(&evt
, info
->u
.f_transl_forbidden
.addr
);
185 EVT_SET_RNW(&evt
, info
->u
.f_transl_forbidden
.rnw
);
187 case SMMU_EVT_C_BAD_SUBSTREAMID
:
188 EVT_SET_SSID(&evt
, info
->u
.c_bad_substream
.ssid
);
190 case SMMU_EVT_F_CD_FETCH
:
191 EVT_SET_SSID(&evt
, info
->u
.f_cd_fetch
.ssid
);
192 EVT_SET_SSV(&evt
, info
->u
.f_cd_fetch
.ssv
);
193 EVT_SET_ADDR(&evt
, info
->u
.f_cd_fetch
.addr
);
195 case SMMU_EVT_C_BAD_CD
:
196 EVT_SET_SSID(&evt
, info
->u
.c_bad_cd
.ssid
);
197 EVT_SET_SSV(&evt
, info
->u
.c_bad_cd
.ssv
);
199 case SMMU_EVT_F_WALK_EABT
:
200 case SMMU_EVT_F_TRANSLATION
:
201 case SMMU_EVT_F_ADDR_SIZE
:
202 case SMMU_EVT_F_ACCESS
:
203 case SMMU_EVT_F_PERMISSION
:
204 EVT_SET_STALL(&evt
, info
->u
.f_walk_eabt
.stall
);
205 EVT_SET_STAG(&evt
, info
->u
.f_walk_eabt
.stag
);
206 EVT_SET_SSID(&evt
, info
->u
.f_walk_eabt
.ssid
);
207 EVT_SET_SSV(&evt
, info
->u
.f_walk_eabt
.ssv
);
208 EVT_SET_S2(&evt
, info
->u
.f_walk_eabt
.s2
);
209 EVT_SET_ADDR(&evt
, info
->u
.f_walk_eabt
.addr
);
210 EVT_SET_RNW(&evt
, info
->u
.f_walk_eabt
.rnw
);
211 EVT_SET_PNU(&evt
, info
->u
.f_walk_eabt
.pnu
);
212 EVT_SET_IND(&evt
, info
->u
.f_walk_eabt
.ind
);
213 EVT_SET_CLASS(&evt
, info
->u
.f_walk_eabt
.class);
214 EVT_SET_ADDR2(&evt
, info
->u
.f_walk_eabt
.addr2
);
216 case SMMU_EVT_F_CFG_CONFLICT
:
217 EVT_SET_SSID(&evt
, info
->u
.f_cfg_conflict
.ssid
);
218 EVT_SET_SSV(&evt
, info
->u
.f_cfg_conflict
.ssv
);
220 /* rest is not implemented */
221 case SMMU_EVT_F_BAD_ATS_TREQ
:
222 case SMMU_EVT_F_TLB_CONFLICT
:
223 case SMMU_EVT_E_PAGE_REQ
:
225 g_assert_not_reached();
228 trace_smmuv3_record_event(smmu_event_string(info
->type
), info
->sid
);
229 r
= smmuv3_write_eventq(s
, &evt
);
231 smmuv3_trigger_irq(s
, SMMU_IRQ_GERROR
, R_GERROR_EVENTQ_ABT_ERR_MASK
);
233 info
->recorded
= true;
236 static void smmuv3_init_regs(SMMUv3State
*s
)
239 * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
240 * multi-level stream table
242 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, S1P
, 1); /* stage 1 supported */
243 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, TTF
, 2); /* AArch64 PTW only */
244 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, COHACC
, 1); /* IO coherent */
245 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, ASID16
, 1); /* 16-bit ASID */
246 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, TTENDIAN
, 2); /* little endian */
247 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, STALL_MODEL
, 1); /* No stall */
248 /* terminated transaction will always be aborted/error returned */
249 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, TERM_MODEL
, 1);
250 /* 2-level stream table supported */
251 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, STLEVEL
, 1);
253 s
->idr
[1] = FIELD_DP32(s
->idr
[1], IDR1
, SIDSIZE
, SMMU_IDR1_SIDSIZE
);
254 s
->idr
[1] = FIELD_DP32(s
->idr
[1], IDR1
, EVENTQS
, SMMU_EVENTQS
);
255 s
->idr
[1] = FIELD_DP32(s
->idr
[1], IDR1
, CMDQS
, SMMU_CMDQS
);
257 /* 4K and 64K granule support */
258 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, GRAN4K
, 1);
259 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, GRAN64K
, 1);
260 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, OAS
, SMMU_IDR5_OAS
); /* 44 bits */
262 s
->cmdq
.base
= deposit64(s
->cmdq
.base
, 0, 5, SMMU_CMDQS
);
265 s
->cmdq
.entry_size
= sizeof(struct Cmd
);
266 s
->eventq
.base
= deposit64(s
->eventq
.base
, 0, 5, SMMU_EVENTQS
);
269 s
->eventq
.entry_size
= sizeof(struct Evt
);
275 static int smmu_get_ste(SMMUv3State
*s
, dma_addr_t addr
, STE
*buf
,
276 SMMUEventInfo
*event
)
280 trace_smmuv3_get_ste(addr
);
281 /* TODO: guarantee 64-bit single-copy atomicity */
282 ret
= dma_memory_read(&address_space_memory
, addr
,
283 (void *)buf
, sizeof(*buf
));
284 if (ret
!= MEMTX_OK
) {
285 qemu_log_mask(LOG_GUEST_ERROR
,
286 "Cannot fetch pte at address=0x%"PRIx64
"\n", addr
);
287 event
->type
= SMMU_EVT_F_STE_FETCH
;
288 event
->u
.f_ste_fetch
.addr
= addr
;
295 /* @ssid > 0 not supported yet */
296 static int smmu_get_cd(SMMUv3State
*s
, STE
*ste
, uint32_t ssid
,
297 CD
*buf
, SMMUEventInfo
*event
)
299 dma_addr_t addr
= STE_CTXPTR(ste
);
302 trace_smmuv3_get_cd(addr
);
303 /* TODO: guarantee 64-bit single-copy atomicity */
304 ret
= dma_memory_read(&address_space_memory
, addr
,
305 (void *)buf
, sizeof(*buf
));
306 if (ret
!= MEMTX_OK
) {
307 qemu_log_mask(LOG_GUEST_ERROR
,
308 "Cannot fetch pte at address=0x%"PRIx64
"\n", addr
);
309 event
->type
= SMMU_EVT_F_CD_FETCH
;
310 event
->u
.f_ste_fetch
.addr
= addr
;
316 /* Returns < 0 in case of invalid STE, 0 otherwise */
317 static int decode_ste(SMMUv3State
*s
, SMMUTransCfg
*cfg
,
318 STE
*ste
, SMMUEventInfo
*event
)
322 if (!STE_VALID(ste
)) {
326 config
= STE_CONFIG(ste
);
328 if (STE_CFG_ABORT(config
)) {
333 if (STE_CFG_BYPASS(config
)) {
334 cfg
->bypassed
= true;
338 if (STE_CFG_S2_ENABLED(config
)) {
339 qemu_log_mask(LOG_UNIMP
, "SMMUv3 does not support stage 2 yet\n");
343 if (STE_S1CDMAX(ste
) != 0) {
344 qemu_log_mask(LOG_UNIMP
,
345 "SMMUv3 does not support multiple context descriptors yet\n");
349 if (STE_S1STALLD(ste
)) {
350 qemu_log_mask(LOG_UNIMP
,
351 "SMMUv3 S1 stalling fault model not allowed yet\n");
357 event
->type
= SMMU_EVT_C_BAD_STE
;
362 * smmu_find_ste - Return the stream table entry associated
367 * @ste: returned stream table entry
368 * @event: handle to an event info
370 * Supports linear and 2-level stream table
371 * Return 0 on success, -EINVAL otherwise
373 static int smmu_find_ste(SMMUv3State
*s
, uint32_t sid
, STE
*ste
,
374 SMMUEventInfo
*event
)
379 trace_smmuv3_find_ste(sid
, s
->features
, s
->sid_split
);
380 /* Check SID range */
381 if (sid
> (1 << SMMU_IDR1_SIDSIZE
)) {
382 event
->type
= SMMU_EVT_C_BAD_STREAMID
;
385 if (s
->features
& SMMU_FEATURE_2LVL_STE
) {
386 int l1_ste_offset
, l2_ste_offset
, max_l2_ste
, span
;
387 dma_addr_t strtab_base
, l1ptr
, l2ptr
;
390 strtab_base
= s
->strtab_base
& SMMU_BASE_ADDR_MASK
;
391 l1_ste_offset
= sid
>> s
->sid_split
;
392 l2_ste_offset
= sid
& ((1 << s
->sid_split
) - 1);
393 l1ptr
= (dma_addr_t
)(strtab_base
+ l1_ste_offset
* sizeof(l1std
));
394 /* TODO: guarantee 64-bit single-copy atomicity */
395 ret
= dma_memory_read(&address_space_memory
, l1ptr
,
396 (uint8_t *)&l1std
, sizeof(l1std
));
397 if (ret
!= MEMTX_OK
) {
398 qemu_log_mask(LOG_GUEST_ERROR
,
399 "Could not read L1PTR at 0X%"PRIx64
"\n", l1ptr
);
400 event
->type
= SMMU_EVT_F_STE_FETCH
;
401 event
->u
.f_ste_fetch
.addr
= l1ptr
;
405 span
= L1STD_SPAN(&l1std
);
408 /* l2ptr is not valid */
409 qemu_log_mask(LOG_GUEST_ERROR
,
410 "invalid sid=%d (L1STD span=0)\n", sid
);
411 event
->type
= SMMU_EVT_C_BAD_STREAMID
;
414 max_l2_ste
= (1 << span
) - 1;
415 l2ptr
= l1std_l2ptr(&l1std
);
416 trace_smmuv3_find_ste_2lvl(s
->strtab_base
, l1ptr
, l1_ste_offset
,
417 l2ptr
, l2_ste_offset
, max_l2_ste
);
418 if (l2_ste_offset
> max_l2_ste
) {
419 qemu_log_mask(LOG_GUEST_ERROR
,
420 "l2_ste_offset=%d > max_l2_ste=%d\n",
421 l2_ste_offset
, max_l2_ste
);
422 event
->type
= SMMU_EVT_C_BAD_STE
;
425 addr
= l2ptr
+ l2_ste_offset
* sizeof(*ste
);
427 addr
= s
->strtab_base
+ sid
* sizeof(*ste
);
430 if (smmu_get_ste(s
, addr
, ste
, event
)) {
437 static int decode_cd(SMMUTransCfg
*cfg
, CD
*cd
, SMMUEventInfo
*event
)
442 if (!CD_VALID(cd
) || !CD_AARCH64(cd
)) {
446 goto bad_cd
; /* SMMU_IDR0.TERM_MODEL == 1 */
449 goto bad_cd
; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
451 if (CD_HA(cd
) || CD_HD(cd
)) {
452 goto bad_cd
; /* HTTU = 0 */
455 /* we support only those at the moment */
459 cfg
->oas
= oas2bits(CD_IPS(cd
));
460 cfg
->oas
= MIN(oas2bits(SMMU_IDR5_OAS
), cfg
->oas
);
461 cfg
->tbi
= CD_TBI(cd
);
462 cfg
->asid
= CD_ASID(cd
);
464 trace_smmuv3_decode_cd(cfg
->oas
);
466 /* decode data dependent on TT */
467 for (i
= 0; i
<= 1; i
++) {
469 SMMUTransTableInfo
*tt
= &cfg
->tt
[i
];
471 cfg
->tt
[i
].disabled
= CD_EPD(cd
, i
);
472 if (cfg
->tt
[i
].disabled
) {
477 if (tsz
< 16 || tsz
> 39) {
482 tt
->granule_sz
= tg2granule(tg
, i
);
483 if ((tt
->granule_sz
!= 12 && tt
->granule_sz
!= 16) || CD_ENDI(cd
)) {
488 tt
->ttb
= CD_TTB(cd
, i
);
489 if (tt
->ttb
& ~(MAKE_64BIT_MASK(0, cfg
->oas
))) {
492 trace_smmuv3_decode_cd_tt(i
, tt
->tsz
, tt
->ttb
, tt
->granule_sz
);
495 event
->record_trans_faults
= CD_R(cd
);
500 event
->type
= SMMU_EVT_C_BAD_CD
;
505 * smmuv3_decode_config - Prepare the translation configuration
506 * for the @mr iommu region
507 * @mr: iommu memory region the translation config must be prepared for
508 * @cfg: output translation configuration which is populated through
509 * the different configuration decoding steps
510 * @event: must be zero'ed by the caller
512 * return < 0 in case of config decoding error (@event is filled
513 * accordingly). Return 0 otherwise.
515 static int smmuv3_decode_config(IOMMUMemoryRegion
*mr
, SMMUTransCfg
*cfg
,
516 SMMUEventInfo
*event
)
518 SMMUDevice
*sdev
= container_of(mr
, SMMUDevice
, iommu
);
519 uint32_t sid
= smmu_get_sid(sdev
);
520 SMMUv3State
*s
= sdev
->smmu
;
525 ret
= smmu_find_ste(s
, sid
, &ste
, event
);
530 ret
= decode_ste(s
, cfg
, &ste
, event
);
535 if (cfg
->aborted
|| cfg
->bypassed
) {
539 ret
= smmu_get_cd(s
, &ste
, 0 /* ssid */, &cd
, event
);
544 return decode_cd(cfg
, &cd
, event
);
548 * smmuv3_get_config - Look up for a cached copy of configuration data for
549 * @sdev and on cache miss performs a configuration structure decoding from
552 * @sdev: SMMUDevice handle
553 * @event: output event info
555 * The configuration cache contains data resulting from both STE and CD
556 * decoding under the form of an SMMUTransCfg struct. The hash table is indexed
557 * by the SMMUDevice handle.
559 static SMMUTransCfg
*smmuv3_get_config(SMMUDevice
*sdev
, SMMUEventInfo
*event
)
561 SMMUv3State
*s
= sdev
->smmu
;
562 SMMUState
*bc
= &s
->smmu_state
;
565 cfg
= g_hash_table_lookup(bc
->configs
, sdev
);
567 sdev
->cfg_cache_hits
++;
568 trace_smmuv3_config_cache_hit(smmu_get_sid(sdev
),
569 sdev
->cfg_cache_hits
, sdev
->cfg_cache_misses
,
570 100 * sdev
->cfg_cache_hits
/
571 (sdev
->cfg_cache_hits
+ sdev
->cfg_cache_misses
));
573 sdev
->cfg_cache_misses
++;
574 trace_smmuv3_config_cache_miss(smmu_get_sid(sdev
),
575 sdev
->cfg_cache_hits
, sdev
->cfg_cache_misses
,
576 100 * sdev
->cfg_cache_hits
/
577 (sdev
->cfg_cache_hits
+ sdev
->cfg_cache_misses
));
578 cfg
= g_new0(SMMUTransCfg
, 1);
580 if (!smmuv3_decode_config(&sdev
->iommu
, cfg
, event
)) {
581 g_hash_table_insert(bc
->configs
, sdev
, cfg
);
590 static void smmuv3_flush_config(SMMUDevice
*sdev
)
592 SMMUv3State
*s
= sdev
->smmu
;
593 SMMUState
*bc
= &s
->smmu_state
;
595 trace_smmuv3_config_cache_inv(smmu_get_sid(sdev
));
596 g_hash_table_remove(bc
->configs
, sdev
);
599 static IOMMUTLBEntry
smmuv3_translate(IOMMUMemoryRegion
*mr
, hwaddr addr
,
600 IOMMUAccessFlags flag
, int iommu_idx
)
602 SMMUDevice
*sdev
= container_of(mr
, SMMUDevice
, iommu
);
603 SMMUv3State
*s
= sdev
->smmu
;
604 uint32_t sid
= smmu_get_sid(sdev
);
605 SMMUEventInfo event
= {.type
= SMMU_EVT_NONE
, .sid
= sid
};
606 SMMUPTWEventInfo ptw_info
= {};
607 SMMUTranslationStatus status
;
608 SMMUState
*bs
= ARM_SMMU(s
);
609 uint64_t page_mask
, aligned_addr
;
610 IOMMUTLBEntry
*cached_entry
= NULL
;
611 SMMUTransTableInfo
*tt
;
612 SMMUTransCfg
*cfg
= NULL
;
613 IOMMUTLBEntry entry
= {
614 .target_as
= &address_space_memory
,
616 .translated_addr
= addr
,
617 .addr_mask
= ~(hwaddr
)0,
620 SMMUIOTLBKey key
, *new_key
;
622 qemu_mutex_lock(&s
->mutex
);
624 if (!smmu_enabled(s
)) {
625 status
= SMMU_TRANS_DISABLE
;
629 cfg
= smmuv3_get_config(sdev
, &event
);
631 status
= SMMU_TRANS_ERROR
;
636 status
= SMMU_TRANS_ABORT
;
641 status
= SMMU_TRANS_BYPASS
;
645 tt
= select_tt(cfg
, addr
);
647 if (event
.record_trans_faults
) {
648 event
.type
= SMMU_EVT_F_TRANSLATION
;
649 event
.u
.f_translation
.addr
= addr
;
650 event
.u
.f_translation
.rnw
= flag
& 0x1;
652 status
= SMMU_TRANS_ERROR
;
656 page_mask
= (1ULL << (tt
->granule_sz
)) - 1;
657 aligned_addr
= addr
& ~page_mask
;
659 key
.asid
= cfg
->asid
;
660 key
.iova
= aligned_addr
;
662 cached_entry
= g_hash_table_lookup(bs
->iotlb
, &key
);
665 trace_smmu_iotlb_cache_hit(cfg
->asid
, aligned_addr
,
666 cfg
->iotlb_hits
, cfg
->iotlb_misses
,
667 100 * cfg
->iotlb_hits
/
668 (cfg
->iotlb_hits
+ cfg
->iotlb_misses
));
669 if ((flag
& IOMMU_WO
) && !(cached_entry
->perm
& IOMMU_WO
)) {
670 status
= SMMU_TRANS_ERROR
;
671 if (event
.record_trans_faults
) {
672 event
.type
= SMMU_EVT_F_PERMISSION
;
673 event
.u
.f_permission
.addr
= addr
;
674 event
.u
.f_permission
.rnw
= flag
& 0x1;
677 status
= SMMU_TRANS_SUCCESS
;
683 trace_smmu_iotlb_cache_miss(cfg
->asid
, addr
& ~page_mask
,
684 cfg
->iotlb_hits
, cfg
->iotlb_misses
,
685 100 * cfg
->iotlb_hits
/
686 (cfg
->iotlb_hits
+ cfg
->iotlb_misses
));
688 if (g_hash_table_size(bs
->iotlb
) >= SMMU_IOTLB_MAX_SIZE
) {
689 smmu_iotlb_inv_all(bs
);
692 cached_entry
= g_new0(IOMMUTLBEntry
, 1);
694 if (smmu_ptw(cfg
, aligned_addr
, flag
, cached_entry
, &ptw_info
)) {
695 g_free(cached_entry
);
696 switch (ptw_info
.type
) {
697 case SMMU_PTW_ERR_WALK_EABT
:
698 event
.type
= SMMU_EVT_F_WALK_EABT
;
699 event
.u
.f_walk_eabt
.addr
= addr
;
700 event
.u
.f_walk_eabt
.rnw
= flag
& 0x1;
701 event
.u
.f_walk_eabt
.class = 0x1;
702 event
.u
.f_walk_eabt
.addr2
= ptw_info
.addr
;
704 case SMMU_PTW_ERR_TRANSLATION
:
705 if (event
.record_trans_faults
) {
706 event
.type
= SMMU_EVT_F_TRANSLATION
;
707 event
.u
.f_translation
.addr
= addr
;
708 event
.u
.f_translation
.rnw
= flag
& 0x1;
711 case SMMU_PTW_ERR_ADDR_SIZE
:
712 if (event
.record_trans_faults
) {
713 event
.type
= SMMU_EVT_F_ADDR_SIZE
;
714 event
.u
.f_addr_size
.addr
= addr
;
715 event
.u
.f_addr_size
.rnw
= flag
& 0x1;
718 case SMMU_PTW_ERR_ACCESS
:
719 if (event
.record_trans_faults
) {
720 event
.type
= SMMU_EVT_F_ACCESS
;
721 event
.u
.f_access
.addr
= addr
;
722 event
.u
.f_access
.rnw
= flag
& 0x1;
725 case SMMU_PTW_ERR_PERMISSION
:
726 if (event
.record_trans_faults
) {
727 event
.type
= SMMU_EVT_F_PERMISSION
;
728 event
.u
.f_permission
.addr
= addr
;
729 event
.u
.f_permission
.rnw
= flag
& 0x1;
733 g_assert_not_reached();
735 status
= SMMU_TRANS_ERROR
;
737 new_key
= g_new0(SMMUIOTLBKey
, 1);
738 new_key
->asid
= cfg
->asid
;
739 new_key
->iova
= aligned_addr
;
740 g_hash_table_insert(bs
->iotlb
, new_key
, cached_entry
);
741 status
= SMMU_TRANS_SUCCESS
;
745 qemu_mutex_unlock(&s
->mutex
);
747 case SMMU_TRANS_SUCCESS
:
749 entry
.translated_addr
= cached_entry
->translated_addr
+
751 entry
.addr_mask
= cached_entry
->addr_mask
;
752 trace_smmuv3_translate_success(mr
->parent_obj
.name
, sid
, addr
,
753 entry
.translated_addr
, entry
.perm
);
755 case SMMU_TRANS_DISABLE
:
757 entry
.addr_mask
= ~TARGET_PAGE_MASK
;
758 trace_smmuv3_translate_disable(mr
->parent_obj
.name
, sid
, addr
,
761 case SMMU_TRANS_BYPASS
:
763 entry
.addr_mask
= ~TARGET_PAGE_MASK
;
764 trace_smmuv3_translate_bypass(mr
->parent_obj
.name
, sid
, addr
,
767 case SMMU_TRANS_ABORT
:
768 /* no event is recorded on abort */
769 trace_smmuv3_translate_abort(mr
->parent_obj
.name
, sid
, addr
,
772 case SMMU_TRANS_ERROR
:
773 qemu_log_mask(LOG_GUEST_ERROR
,
774 "%s translation failed for iova=0x%"PRIx64
"(%s)\n",
775 mr
->parent_obj
.name
, addr
, smmu_event_string(event
.type
));
776 smmuv3_record_event(s
, &event
);
784 * smmuv3_notify_iova - call the notifier @n for a given
785 * @asid and @iova tuple.
787 * @mr: IOMMU mr region handle
788 * @n: notifier to be called
789 * @asid: address space ID or negative value if we don't care
792 static void smmuv3_notify_iova(IOMMUMemoryRegion
*mr
,
797 SMMUDevice
*sdev
= container_of(mr
, SMMUDevice
, iommu
);
798 SMMUEventInfo event
= {};
799 SMMUTransTableInfo
*tt
;
803 cfg
= smmuv3_get_config(sdev
, &event
);
805 qemu_log_mask(LOG_GUEST_ERROR
,
806 "%s error decoding the configuration for iommu mr=%s\n",
807 __func__
, mr
->parent_obj
.name
);
811 if (asid
>= 0 && cfg
->asid
!= asid
) {
815 tt
= select_tt(cfg
, iova
);
820 entry
.target_as
= &address_space_memory
;
822 entry
.addr_mask
= (1 << tt
->granule_sz
) - 1;
823 entry
.perm
= IOMMU_NONE
;
825 memory_region_notify_one(n
, &entry
);
828 /* invalidate an asid/iova tuple in all mr's */
829 static void smmuv3_inv_notifiers_iova(SMMUState
*s
, int asid
, dma_addr_t iova
)
831 SMMUNotifierNode
*node
;
833 QLIST_FOREACH(node
, &s
->notifiers_list
, next
) {
834 IOMMUMemoryRegion
*mr
= &node
->sdev
->iommu
;
837 trace_smmuv3_inv_notifiers_iova(mr
->parent_obj
.name
, asid
, iova
);
839 IOMMU_NOTIFIER_FOREACH(n
, mr
) {
840 smmuv3_notify_iova(mr
, n
, asid
, iova
);
845 static int smmuv3_cmdq_consume(SMMUv3State
*s
)
847 SMMUState
*bs
= ARM_SMMU(s
);
848 SMMUCmdError cmd_error
= SMMU_CERROR_NONE
;
849 SMMUQueue
*q
= &s
->cmdq
;
850 SMMUCommandType type
= 0;
852 if (!smmuv3_cmdq_enabled(s
)) {
856 * some commands depend on register values, typically CR0. In case those
857 * register values change while handling the command, spec says it
858 * is UNPREDICTABLE whether the command is interpreted under the new
862 while (!smmuv3_q_empty(q
)) {
863 uint32_t pending
= s
->gerror
^ s
->gerrorn
;
866 trace_smmuv3_cmdq_consume(Q_PROD(q
), Q_CONS(q
),
867 Q_PROD_WRAP(q
), Q_CONS_WRAP(q
));
869 if (FIELD_EX32(pending
, GERROR
, CMDQ_ERR
)) {
873 if (queue_read(q
, &cmd
) != MEMTX_OK
) {
874 cmd_error
= SMMU_CERROR_ABT
;
878 type
= CMD_TYPE(&cmd
);
880 trace_smmuv3_cmdq_opcode(smmu_cmd_string(type
));
882 qemu_mutex_lock(&s
->mutex
);
885 if (CMD_SYNC_CS(&cmd
) & CMD_SYNC_SIG_IRQ
) {
886 smmuv3_trigger_irq(s
, SMMU_IRQ_CMD_SYNC
, 0);
889 case SMMU_CMD_PREFETCH_CONFIG
:
890 case SMMU_CMD_PREFETCH_ADDR
:
892 case SMMU_CMD_CFGI_STE
:
894 uint32_t sid
= CMD_SID(&cmd
);
895 IOMMUMemoryRegion
*mr
= smmu_iommu_mr(bs
, sid
);
898 if (CMD_SSEC(&cmd
)) {
899 cmd_error
= SMMU_CERROR_ILL
;
907 trace_smmuv3_cmdq_cfgi_ste(sid
);
908 sdev
= container_of(mr
, SMMUDevice
, iommu
);
909 smmuv3_flush_config(sdev
);
913 case SMMU_CMD_CFGI_STE_RANGE
: /* same as SMMU_CMD_CFGI_ALL */
915 uint32_t start
= CMD_SID(&cmd
), end
, i
;
916 uint8_t range
= CMD_STE_RANGE(&cmd
);
918 if (CMD_SSEC(&cmd
)) {
919 cmd_error
= SMMU_CERROR_ILL
;
923 end
= start
+ (1 << (range
+ 1)) - 1;
924 trace_smmuv3_cmdq_cfgi_ste_range(start
, end
);
926 for (i
= start
; i
<= end
; i
++) {
927 IOMMUMemoryRegion
*mr
= smmu_iommu_mr(bs
, i
);
933 sdev
= container_of(mr
, SMMUDevice
, iommu
);
934 smmuv3_flush_config(sdev
);
938 case SMMU_CMD_CFGI_CD
:
939 case SMMU_CMD_CFGI_CD_ALL
:
941 uint32_t sid
= CMD_SID(&cmd
);
942 IOMMUMemoryRegion
*mr
= smmu_iommu_mr(bs
, sid
);
945 if (CMD_SSEC(&cmd
)) {
946 cmd_error
= SMMU_CERROR_ILL
;
954 trace_smmuv3_cmdq_cfgi_cd(sid
);
955 sdev
= container_of(mr
, SMMUDevice
, iommu
);
956 smmuv3_flush_config(sdev
);
959 case SMMU_CMD_TLBI_NH_ASID
:
961 uint16_t asid
= CMD_ASID(&cmd
);
963 trace_smmuv3_cmdq_tlbi_nh_asid(asid
);
964 smmu_inv_notifiers_all(&s
->smmu_state
);
965 smmu_iotlb_inv_asid(bs
, asid
);
968 case SMMU_CMD_TLBI_NH_ALL
:
969 case SMMU_CMD_TLBI_NSNH_ALL
:
970 trace_smmuv3_cmdq_tlbi_nh();
971 smmu_inv_notifiers_all(&s
->smmu_state
);
972 smmu_iotlb_inv_all(bs
);
974 case SMMU_CMD_TLBI_NH_VAA
:
976 dma_addr_t addr
= CMD_ADDR(&cmd
);
977 uint16_t vmid
= CMD_VMID(&cmd
);
979 trace_smmuv3_cmdq_tlbi_nh_vaa(vmid
, addr
);
980 smmuv3_inv_notifiers_iova(bs
, -1, addr
);
981 smmu_iotlb_inv_all(bs
);
984 case SMMU_CMD_TLBI_NH_VA
:
986 uint16_t asid
= CMD_ASID(&cmd
);
987 uint16_t vmid
= CMD_VMID(&cmd
);
988 dma_addr_t addr
= CMD_ADDR(&cmd
);
989 bool leaf
= CMD_LEAF(&cmd
);
991 trace_smmuv3_cmdq_tlbi_nh_va(vmid
, asid
, addr
, leaf
);
992 smmuv3_inv_notifiers_iova(bs
, asid
, addr
);
993 smmu_iotlb_inv_iova(bs
, asid
, addr
);
996 case SMMU_CMD_TLBI_EL3_ALL
:
997 case SMMU_CMD_TLBI_EL3_VA
:
998 case SMMU_CMD_TLBI_EL2_ALL
:
999 case SMMU_CMD_TLBI_EL2_ASID
:
1000 case SMMU_CMD_TLBI_EL2_VA
:
1001 case SMMU_CMD_TLBI_EL2_VAA
:
1002 case SMMU_CMD_TLBI_S12_VMALL
:
1003 case SMMU_CMD_TLBI_S2_IPA
:
1004 case SMMU_CMD_ATC_INV
:
1005 case SMMU_CMD_PRI_RESP
:
1006 case SMMU_CMD_RESUME
:
1007 case SMMU_CMD_STALL_TERM
:
1008 trace_smmuv3_unhandled_cmd(type
);
1011 cmd_error
= SMMU_CERROR_ILL
;
1012 qemu_log_mask(LOG_GUEST_ERROR
,
1013 "Illegal command type: %d\n", CMD_TYPE(&cmd
));
1016 qemu_mutex_unlock(&s
->mutex
);
1021 * We only increment the cons index after the completion of
1022 * the command. We do that because the SYNC returns immediately
1023 * and does not check the completion of previous commands
1029 trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type
), cmd_error
);
1030 smmu_write_cmdq_err(s
, cmd_error
);
1031 smmuv3_trigger_irq(s
, SMMU_IRQ_GERROR
, R_GERROR_CMDQ_ERR_MASK
);
1034 trace_smmuv3_cmdq_consume_out(Q_PROD(q
), Q_CONS(q
),
1035 Q_PROD_WRAP(q
), Q_CONS_WRAP(q
));
1040 static MemTxResult
smmu_writell(SMMUv3State
*s
, hwaddr offset
,
1041 uint64_t data
, MemTxAttrs attrs
)
1044 case A_GERROR_IRQ_CFG0
:
1045 s
->gerror_irq_cfg0
= data
;
1048 s
->strtab_base
= data
;
1051 s
->cmdq
.base
= data
;
1052 s
->cmdq
.log2size
= extract64(s
->cmdq
.base
, 0, 5);
1053 if (s
->cmdq
.log2size
> SMMU_CMDQS
) {
1054 s
->cmdq
.log2size
= SMMU_CMDQS
;
1058 s
->eventq
.base
= data
;
1059 s
->eventq
.log2size
= extract64(s
->eventq
.base
, 0, 5);
1060 if (s
->eventq
.log2size
> SMMU_EVENTQS
) {
1061 s
->eventq
.log2size
= SMMU_EVENTQS
;
1064 case A_EVENTQ_IRQ_CFG0
:
1065 s
->eventq_irq_cfg0
= data
;
1068 qemu_log_mask(LOG_UNIMP
,
1069 "%s Unexpected 64-bit access to 0x%"PRIx64
" (WI)\n",
1075 static MemTxResult
smmu_writel(SMMUv3State
*s
, hwaddr offset
,
1076 uint64_t data
, MemTxAttrs attrs
)
1081 s
->cr0ack
= data
& ~SMMU_CR0_RESERVED
;
1082 /* in case the command queue has been enabled */
1083 smmuv3_cmdq_consume(s
);
1095 smmuv3_write_gerrorn(s
, data
);
1097 * By acknowledging the CMDQ_ERR, SW may notify cmds can
1098 * be processed again
1100 smmuv3_cmdq_consume(s
);
1102 case A_GERROR_IRQ_CFG0
: /* 64b */
1103 s
->gerror_irq_cfg0
= deposit64(s
->gerror_irq_cfg0
, 0, 32, data
);
1105 case A_GERROR_IRQ_CFG0
+ 4:
1106 s
->gerror_irq_cfg0
= deposit64(s
->gerror_irq_cfg0
, 32, 32, data
);
1108 case A_GERROR_IRQ_CFG1
:
1109 s
->gerror_irq_cfg1
= data
;
1111 case A_GERROR_IRQ_CFG2
:
1112 s
->gerror_irq_cfg2
= data
;
1114 case A_STRTAB_BASE
: /* 64b */
1115 s
->strtab_base
= deposit64(s
->strtab_base
, 0, 32, data
);
1117 case A_STRTAB_BASE
+ 4:
1118 s
->strtab_base
= deposit64(s
->strtab_base
, 32, 32, data
);
1120 case A_STRTAB_BASE_CFG
:
1121 s
->strtab_base_cfg
= data
;
1122 if (FIELD_EX32(data
, STRTAB_BASE_CFG
, FMT
) == 1) {
1123 s
->sid_split
= FIELD_EX32(data
, STRTAB_BASE_CFG
, SPLIT
);
1124 s
->features
|= SMMU_FEATURE_2LVL_STE
;
1127 case A_CMDQ_BASE
: /* 64b */
1128 s
->cmdq
.base
= deposit64(s
->cmdq
.base
, 0, 32, data
);
1129 s
->cmdq
.log2size
= extract64(s
->cmdq
.base
, 0, 5);
1130 if (s
->cmdq
.log2size
> SMMU_CMDQS
) {
1131 s
->cmdq
.log2size
= SMMU_CMDQS
;
1134 case A_CMDQ_BASE
+ 4: /* 64b */
1135 s
->cmdq
.base
= deposit64(s
->cmdq
.base
, 32, 32, data
);
1138 s
->cmdq
.prod
= data
;
1139 smmuv3_cmdq_consume(s
);
1142 s
->cmdq
.cons
= data
;
1144 case A_EVENTQ_BASE
: /* 64b */
1145 s
->eventq
.base
= deposit64(s
->eventq
.base
, 0, 32, data
);
1146 s
->eventq
.log2size
= extract64(s
->eventq
.base
, 0, 5);
1147 if (s
->eventq
.log2size
> SMMU_EVENTQS
) {
1148 s
->eventq
.log2size
= SMMU_EVENTQS
;
1151 case A_EVENTQ_BASE
+ 4:
1152 s
->eventq
.base
= deposit64(s
->eventq
.base
, 32, 32, data
);
1155 s
->eventq
.prod
= data
;
1158 s
->eventq
.cons
= data
;
1160 case A_EVENTQ_IRQ_CFG0
: /* 64b */
1161 s
->eventq_irq_cfg0
= deposit64(s
->eventq_irq_cfg0
, 0, 32, data
);
1163 case A_EVENTQ_IRQ_CFG0
+ 4:
1164 s
->eventq_irq_cfg0
= deposit64(s
->eventq_irq_cfg0
, 32, 32, data
);
1166 case A_EVENTQ_IRQ_CFG1
:
1167 s
->eventq_irq_cfg1
= data
;
1169 case A_EVENTQ_IRQ_CFG2
:
1170 s
->eventq_irq_cfg2
= data
;
1173 qemu_log_mask(LOG_UNIMP
,
1174 "%s Unexpected 32-bit access to 0x%"PRIx64
" (WI)\n",
1180 static MemTxResult
smmu_write_mmio(void *opaque
, hwaddr offset
, uint64_t data
,
1181 unsigned size
, MemTxAttrs attrs
)
1183 SMMUState
*sys
= opaque
;
1184 SMMUv3State
*s
= ARM_SMMUV3(sys
);
1187 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1192 r
= smmu_writell(s
, offset
, data
, attrs
);
1195 r
= smmu_writel(s
, offset
, data
, attrs
);
1202 trace_smmuv3_write_mmio(offset
, data
, size
, r
);
1206 static MemTxResult
smmu_readll(SMMUv3State
*s
, hwaddr offset
,
1207 uint64_t *data
, MemTxAttrs attrs
)
1210 case A_GERROR_IRQ_CFG0
:
1211 *data
= s
->gerror_irq_cfg0
;
1214 *data
= s
->strtab_base
;
1217 *data
= s
->cmdq
.base
;
1220 *data
= s
->eventq
.base
;
1224 qemu_log_mask(LOG_UNIMP
,
1225 "%s Unexpected 64-bit access to 0x%"PRIx64
" (RAZ)\n",
1231 static MemTxResult
smmu_readl(SMMUv3State
*s
, hwaddr offset
,
1232 uint64_t *data
, MemTxAttrs attrs
)
1235 case A_IDREGS
... A_IDREGS
+ 0x1f:
1236 *data
= smmuv3_idreg(offset
- A_IDREGS
);
1238 case A_IDR0
... A_IDR5
:
1239 *data
= s
->idr
[(offset
- A_IDR0
) / 4];
1260 case A_IRQ_CTRL_ACK
:
1261 *data
= s
->irq_ctrl
;
1269 case A_GERROR_IRQ_CFG0
: /* 64b */
1270 *data
= extract64(s
->gerror_irq_cfg0
, 0, 32);
1272 case A_GERROR_IRQ_CFG0
+ 4:
1273 *data
= extract64(s
->gerror_irq_cfg0
, 32, 32);
1275 case A_GERROR_IRQ_CFG1
:
1276 *data
= s
->gerror_irq_cfg1
;
1278 case A_GERROR_IRQ_CFG2
:
1279 *data
= s
->gerror_irq_cfg2
;
1281 case A_STRTAB_BASE
: /* 64b */
1282 *data
= extract64(s
->strtab_base
, 0, 32);
1284 case A_STRTAB_BASE
+ 4: /* 64b */
1285 *data
= extract64(s
->strtab_base
, 32, 32);
1287 case A_STRTAB_BASE_CFG
:
1288 *data
= s
->strtab_base_cfg
;
1290 case A_CMDQ_BASE
: /* 64b */
1291 *data
= extract64(s
->cmdq
.base
, 0, 32);
1293 case A_CMDQ_BASE
+ 4:
1294 *data
= extract64(s
->cmdq
.base
, 32, 32);
1297 *data
= s
->cmdq
.prod
;
1300 *data
= s
->cmdq
.cons
;
1302 case A_EVENTQ_BASE
: /* 64b */
1303 *data
= extract64(s
->eventq
.base
, 0, 32);
1305 case A_EVENTQ_BASE
+ 4: /* 64b */
1306 *data
= extract64(s
->eventq
.base
, 32, 32);
1309 *data
= s
->eventq
.prod
;
1312 *data
= s
->eventq
.cons
;
1316 qemu_log_mask(LOG_UNIMP
,
1317 "%s unhandled 32-bit access at 0x%"PRIx64
" (RAZ)\n",
1323 static MemTxResult
smmu_read_mmio(void *opaque
, hwaddr offset
, uint64_t *data
,
1324 unsigned size
, MemTxAttrs attrs
)
1326 SMMUState
*sys
= opaque
;
1327 SMMUv3State
*s
= ARM_SMMUV3(sys
);
1330 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1335 r
= smmu_readll(s
, offset
, data
, attrs
);
1338 r
= smmu_readl(s
, offset
, data
, attrs
);
1345 trace_smmuv3_read_mmio(offset
, *data
, size
, r
);
1349 static const MemoryRegionOps smmu_mem_ops
= {
1350 .read_with_attrs
= smmu_read_mmio
,
1351 .write_with_attrs
= smmu_write_mmio
,
1352 .endianness
= DEVICE_LITTLE_ENDIAN
,
1354 .min_access_size
= 4,
1355 .max_access_size
= 8,
1358 .min_access_size
= 4,
1359 .max_access_size
= 8,
1363 static void smmu_init_irq(SMMUv3State
*s
, SysBusDevice
*dev
)
1367 for (i
= 0; i
< ARRAY_SIZE(s
->irq
); i
++) {
1368 sysbus_init_irq(dev
, &s
->irq
[i
]);
1372 static void smmu_reset(DeviceState
*dev
)
1374 SMMUv3State
*s
= ARM_SMMUV3(dev
);
1375 SMMUv3Class
*c
= ARM_SMMUV3_GET_CLASS(s
);
1377 c
->parent_reset(dev
);
1379 smmuv3_init_regs(s
);
1382 static void smmu_realize(DeviceState
*d
, Error
**errp
)
1384 SMMUState
*sys
= ARM_SMMU(d
);
1385 SMMUv3State
*s
= ARM_SMMUV3(sys
);
1386 SMMUv3Class
*c
= ARM_SMMUV3_GET_CLASS(s
);
1387 SysBusDevice
*dev
= SYS_BUS_DEVICE(d
);
1388 Error
*local_err
= NULL
;
1390 c
->parent_realize(d
, &local_err
);
1392 error_propagate(errp
, local_err
);
1396 qemu_mutex_init(&s
->mutex
);
1398 memory_region_init_io(&sys
->iomem
, OBJECT(s
),
1399 &smmu_mem_ops
, sys
, TYPE_ARM_SMMUV3
, 0x20000);
1401 sys
->mrtypename
= TYPE_SMMUV3_IOMMU_MEMORY_REGION
;
1403 sysbus_init_mmio(dev
, &sys
->iomem
);
1405 smmu_init_irq(s
, dev
);
1408 static const VMStateDescription vmstate_smmuv3_queue
= {
1409 .name
= "smmuv3_queue",
1411 .minimum_version_id
= 1,
1412 .fields
= (VMStateField
[]) {
1413 VMSTATE_UINT64(base
, SMMUQueue
),
1414 VMSTATE_UINT32(prod
, SMMUQueue
),
1415 VMSTATE_UINT32(cons
, SMMUQueue
),
1416 VMSTATE_UINT8(log2size
, SMMUQueue
),
1417 VMSTATE_END_OF_LIST(),
1421 static const VMStateDescription vmstate_smmuv3
= {
1424 .minimum_version_id
= 1,
1425 .fields
= (VMStateField
[]) {
1426 VMSTATE_UINT32(features
, SMMUv3State
),
1427 VMSTATE_UINT8(sid_size
, SMMUv3State
),
1428 VMSTATE_UINT8(sid_split
, SMMUv3State
),
1430 VMSTATE_UINT32_ARRAY(cr
, SMMUv3State
, 3),
1431 VMSTATE_UINT32(cr0ack
, SMMUv3State
),
1432 VMSTATE_UINT32(statusr
, SMMUv3State
),
1433 VMSTATE_UINT32(irq_ctrl
, SMMUv3State
),
1434 VMSTATE_UINT32(gerror
, SMMUv3State
),
1435 VMSTATE_UINT32(gerrorn
, SMMUv3State
),
1436 VMSTATE_UINT64(gerror_irq_cfg0
, SMMUv3State
),
1437 VMSTATE_UINT32(gerror_irq_cfg1
, SMMUv3State
),
1438 VMSTATE_UINT32(gerror_irq_cfg2
, SMMUv3State
),
1439 VMSTATE_UINT64(strtab_base
, SMMUv3State
),
1440 VMSTATE_UINT32(strtab_base_cfg
, SMMUv3State
),
1441 VMSTATE_UINT64(eventq_irq_cfg0
, SMMUv3State
),
1442 VMSTATE_UINT32(eventq_irq_cfg1
, SMMUv3State
),
1443 VMSTATE_UINT32(eventq_irq_cfg2
, SMMUv3State
),
1445 VMSTATE_STRUCT(cmdq
, SMMUv3State
, 0, vmstate_smmuv3_queue
, SMMUQueue
),
1446 VMSTATE_STRUCT(eventq
, SMMUv3State
, 0, vmstate_smmuv3_queue
, SMMUQueue
),
1448 VMSTATE_END_OF_LIST(),
1452 static void smmuv3_instance_init(Object
*obj
)
1454 /* Nothing much to do here as of now */
1457 static void smmuv3_class_init(ObjectClass
*klass
, void *data
)
1459 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1460 SMMUv3Class
*c
= ARM_SMMUV3_CLASS(klass
);
1462 dc
->vmsd
= &vmstate_smmuv3
;
1463 device_class_set_parent_reset(dc
, smmu_reset
, &c
->parent_reset
);
1464 c
->parent_realize
= dc
->realize
;
1465 dc
->realize
= smmu_realize
;
1468 static void smmuv3_notify_flag_changed(IOMMUMemoryRegion
*iommu
,
1469 IOMMUNotifierFlag old
,
1470 IOMMUNotifierFlag
new)
1472 SMMUDevice
*sdev
= container_of(iommu
, SMMUDevice
, iommu
);
1473 SMMUv3State
*s3
= sdev
->smmu
;
1474 SMMUState
*s
= &(s3
->smmu_state
);
1475 SMMUNotifierNode
*node
= NULL
;
1476 SMMUNotifierNode
*next_node
= NULL
;
1478 if (new & IOMMU_NOTIFIER_MAP
) {
1479 int bus_num
= pci_bus_num(sdev
->bus
);
1480 PCIDevice
*pcidev
= pci_find_device(sdev
->bus
, bus_num
, sdev
->devfn
);
1482 warn_report("SMMUv3 does not support notification on MAP: "
1483 "device %s will not function properly", pcidev
->name
);
1486 if (old
== IOMMU_NOTIFIER_NONE
) {
1487 trace_smmuv3_notify_flag_add(iommu
->parent_obj
.name
);
1488 node
= g_malloc0(sizeof(*node
));
1490 QLIST_INSERT_HEAD(&s
->notifiers_list
, node
, next
);
1494 /* update notifier node with new flags */
1495 QLIST_FOREACH_SAFE(node
, &s
->notifiers_list
, next
, next_node
) {
1496 if (node
->sdev
== sdev
) {
1497 if (new == IOMMU_NOTIFIER_NONE
) {
1498 trace_smmuv3_notify_flag_del(iommu
->parent_obj
.name
);
1499 QLIST_REMOVE(node
, next
);
1507 static void smmuv3_iommu_memory_region_class_init(ObjectClass
*klass
,
1510 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_CLASS(klass
);
1512 imrc
->translate
= smmuv3_translate
;
1513 imrc
->notify_flag_changed
= smmuv3_notify_flag_changed
;
1516 static const TypeInfo smmuv3_type_info
= {
1517 .name
= TYPE_ARM_SMMUV3
,
1518 .parent
= TYPE_ARM_SMMU
,
1519 .instance_size
= sizeof(SMMUv3State
),
1520 .instance_init
= smmuv3_instance_init
,
1521 .class_size
= sizeof(SMMUv3Class
),
1522 .class_init
= smmuv3_class_init
,
1525 static const TypeInfo smmuv3_iommu_memory_region_info
= {
1526 .parent
= TYPE_IOMMU_MEMORY_REGION
,
1527 .name
= TYPE_SMMUV3_IOMMU_MEMORY_REGION
,
1528 .class_init
= smmuv3_iommu_memory_region_class_init
,
1531 static void smmuv3_register_types(void)
1533 type_register(&smmuv3_type_info
);
1534 type_register(&smmuv3_iommu_memory_region_info
);
1537 type_init(smmuv3_register_types
)