2 * QEMU emulation of an Intel IOMMU (VT-d)
3 * (DMA Remapping device)
5 * Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com>
6 * Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "hw/sysbus.h"
23 #include "exec/address-spaces.h"
24 #include "intel_iommu_internal.h"
26 /*#define DEBUG_INTEL_IOMMU*/
27 #ifdef DEBUG_INTEL_IOMMU
29 DEBUG_GENERAL
, DEBUG_CSR
, DEBUG_INV
, DEBUG_MMU
, DEBUG_FLOG
,
32 #define VTD_DBGBIT(x) (1 << DEBUG_##x)
33 static int vtd_dbgflags
= VTD_DBGBIT(GENERAL
) | VTD_DBGBIT(CSR
);
35 #define VTD_DPRINTF(what, fmt, ...) do { \
36 if (vtd_dbgflags & VTD_DBGBIT(what)) { \
37 fprintf(stderr, "(vtd)%s: " fmt "\n", __func__, \
41 #define VTD_DPRINTF(what, fmt, ...) do {} while (0)
44 static void vtd_define_quad(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
,
45 uint64_t wmask
, uint64_t w1cmask
)
47 stq_le_p(&s
->csr
[addr
], val
);
48 stq_le_p(&s
->wmask
[addr
], wmask
);
49 stq_le_p(&s
->w1cmask
[addr
], w1cmask
);
52 static void vtd_define_quad_wo(IntelIOMMUState
*s
, hwaddr addr
, uint64_t mask
)
54 stq_le_p(&s
->womask
[addr
], mask
);
57 static void vtd_define_long(IntelIOMMUState
*s
, hwaddr addr
, uint32_t val
,
58 uint32_t wmask
, uint32_t w1cmask
)
60 stl_le_p(&s
->csr
[addr
], val
);
61 stl_le_p(&s
->wmask
[addr
], wmask
);
62 stl_le_p(&s
->w1cmask
[addr
], w1cmask
);
65 static void vtd_define_long_wo(IntelIOMMUState
*s
, hwaddr addr
, uint32_t mask
)
67 stl_le_p(&s
->womask
[addr
], mask
);
70 /* "External" get/set operations */
71 static void vtd_set_quad(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
)
73 uint64_t oldval
= ldq_le_p(&s
->csr
[addr
]);
74 uint64_t wmask
= ldq_le_p(&s
->wmask
[addr
]);
75 uint64_t w1cmask
= ldq_le_p(&s
->w1cmask
[addr
]);
76 stq_le_p(&s
->csr
[addr
],
77 ((oldval
& ~wmask
) | (val
& wmask
)) & ~(w1cmask
& val
));
80 static void vtd_set_long(IntelIOMMUState
*s
, hwaddr addr
, uint32_t val
)
82 uint32_t oldval
= ldl_le_p(&s
->csr
[addr
]);
83 uint32_t wmask
= ldl_le_p(&s
->wmask
[addr
]);
84 uint32_t w1cmask
= ldl_le_p(&s
->w1cmask
[addr
]);
85 stl_le_p(&s
->csr
[addr
],
86 ((oldval
& ~wmask
) | (val
& wmask
)) & ~(w1cmask
& val
));
89 static uint64_t vtd_get_quad(IntelIOMMUState
*s
, hwaddr addr
)
91 uint64_t val
= ldq_le_p(&s
->csr
[addr
]);
92 uint64_t womask
= ldq_le_p(&s
->womask
[addr
]);
96 static uint32_t vtd_get_long(IntelIOMMUState
*s
, hwaddr addr
)
98 uint32_t val
= ldl_le_p(&s
->csr
[addr
]);
99 uint32_t womask
= ldl_le_p(&s
->womask
[addr
]);
100 return val
& ~womask
;
103 /* "Internal" get/set operations */
104 static uint64_t vtd_get_quad_raw(IntelIOMMUState
*s
, hwaddr addr
)
106 return ldq_le_p(&s
->csr
[addr
]);
109 static uint32_t vtd_get_long_raw(IntelIOMMUState
*s
, hwaddr addr
)
111 return ldl_le_p(&s
->csr
[addr
]);
114 static void vtd_set_quad_raw(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
)
116 stq_le_p(&s
->csr
[addr
], val
);
119 static uint32_t vtd_set_clear_mask_long(IntelIOMMUState
*s
, hwaddr addr
,
120 uint32_t clear
, uint32_t mask
)
122 uint32_t new_val
= (ldl_le_p(&s
->csr
[addr
]) & ~clear
) | mask
;
123 stl_le_p(&s
->csr
[addr
], new_val
);
127 static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState
*s
, hwaddr addr
,
128 uint64_t clear
, uint64_t mask
)
130 uint64_t new_val
= (ldq_le_p(&s
->csr
[addr
]) & ~clear
) | mask
;
131 stq_le_p(&s
->csr
[addr
], new_val
);
135 /* GHashTable functions */
136 static gboolean
vtd_uint64_equal(gconstpointer v1
, gconstpointer v2
)
138 return *((const uint64_t *)v1
) == *((const uint64_t *)v2
);
141 static guint
vtd_uint64_hash(gconstpointer v
)
143 return (guint
)*(const uint64_t *)v
;
146 static gboolean
vtd_hash_remove_by_domain(gpointer key
, gpointer value
,
149 VTDIOTLBEntry
*entry
= (VTDIOTLBEntry
*)value
;
150 uint16_t domain_id
= *(uint16_t *)user_data
;
151 return entry
->domain_id
== domain_id
;
154 static gboolean
vtd_hash_remove_by_page(gpointer key
, gpointer value
,
157 VTDIOTLBEntry
*entry
= (VTDIOTLBEntry
*)value
;
158 VTDIOTLBPageInvInfo
*info
= (VTDIOTLBPageInvInfo
*)user_data
;
159 uint64_t gfn
= info
->gfn
& info
->mask
;
160 return (entry
->domain_id
== info
->domain_id
) &&
161 ((entry
->gfn
& info
->mask
) == gfn
);
164 /* Reset all the gen of VTDAddressSpace to zero and set the gen of
165 * IntelIOMMUState to 1.
167 static void vtd_reset_context_cache(IntelIOMMUState
*s
)
169 VTDAddressSpace
**pvtd_as
;
170 VTDAddressSpace
*vtd_as
;
174 VTD_DPRINTF(CACHE
, "global context_cache_gen=1");
175 for (bus_it
= 0; bus_it
< VTD_PCI_BUS_MAX
; ++bus_it
) {
176 pvtd_as
= s
->address_spaces
[bus_it
];
180 for (devfn_it
= 0; devfn_it
< VTD_PCI_DEVFN_MAX
; ++devfn_it
) {
181 vtd_as
= pvtd_as
[devfn_it
];
185 vtd_as
->context_cache_entry
.context_cache_gen
= 0;
188 s
->context_cache_gen
= 1;
191 static void vtd_reset_iotlb(IntelIOMMUState
*s
)
194 g_hash_table_remove_all(s
->iotlb
);
197 static VTDIOTLBEntry
*vtd_lookup_iotlb(IntelIOMMUState
*s
, uint16_t source_id
,
202 key
= (addr
>> VTD_PAGE_SHIFT_4K
) |
203 ((uint64_t)(source_id
) << VTD_IOTLB_SID_SHIFT
);
204 return g_hash_table_lookup(s
->iotlb
, &key
);
208 static void vtd_update_iotlb(IntelIOMMUState
*s
, uint16_t source_id
,
209 uint16_t domain_id
, hwaddr addr
, uint64_t slpte
,
210 bool read_flags
, bool write_flags
)
212 VTDIOTLBEntry
*entry
= g_malloc(sizeof(*entry
));
213 uint64_t *key
= g_malloc(sizeof(*key
));
214 uint64_t gfn
= addr
>> VTD_PAGE_SHIFT_4K
;
216 VTD_DPRINTF(CACHE
, "update iotlb sid 0x%"PRIx16
" gpa 0x%"PRIx64
217 " slpte 0x%"PRIx64
" did 0x%"PRIx16
, source_id
, addr
, slpte
,
219 if (g_hash_table_size(s
->iotlb
) >= VTD_IOTLB_MAX_SIZE
) {
220 VTD_DPRINTF(CACHE
, "iotlb exceeds size limit, forced to reset");
225 entry
->domain_id
= domain_id
;
226 entry
->slpte
= slpte
;
227 entry
->read_flags
= read_flags
;
228 entry
->write_flags
= write_flags
;
229 *key
= gfn
| ((uint64_t)(source_id
) << VTD_IOTLB_SID_SHIFT
);
230 g_hash_table_replace(s
->iotlb
, key
, entry
);
233 /* Given the reg addr of both the message data and address, generate an
236 static void vtd_generate_interrupt(IntelIOMMUState
*s
, hwaddr mesg_addr_reg
,
237 hwaddr mesg_data_reg
)
242 assert(mesg_data_reg
< DMAR_REG_SIZE
);
243 assert(mesg_addr_reg
< DMAR_REG_SIZE
);
245 addr
= vtd_get_long_raw(s
, mesg_addr_reg
);
246 data
= vtd_get_long_raw(s
, mesg_data_reg
);
248 VTD_DPRINTF(FLOG
, "msi: addr 0x%"PRIx64
" data 0x%"PRIx32
, addr
, data
);
249 stl_le_phys(&address_space_memory
, addr
, data
);
252 /* Generate a fault event to software via MSI if conditions are met.
253 * Notice that the value of FSTS_REG being passed to it should be the one
256 static void vtd_generate_fault_event(IntelIOMMUState
*s
, uint32_t pre_fsts
)
258 if (pre_fsts
& VTD_FSTS_PPF
|| pre_fsts
& VTD_FSTS_PFO
||
259 pre_fsts
& VTD_FSTS_IQE
) {
260 VTD_DPRINTF(FLOG
, "there are previous interrupt conditions "
261 "to be serviced by software, fault event is not generated "
262 "(FSTS_REG 0x%"PRIx32
")", pre_fsts
);
265 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, 0, VTD_FECTL_IP
);
266 if (vtd_get_long_raw(s
, DMAR_FECTL_REG
) & VTD_FECTL_IM
) {
267 VTD_DPRINTF(FLOG
, "Interrupt Mask set, fault event is not generated");
269 vtd_generate_interrupt(s
, DMAR_FEADDR_REG
, DMAR_FEDATA_REG
);
270 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
274 /* Check if the Fault (F) field of the Fault Recording Register referenced by
277 static bool vtd_is_frcd_set(IntelIOMMUState
*s
, uint16_t index
)
279 /* Each reg is 128-bit */
280 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
281 addr
+= 8; /* Access the high 64-bit half */
283 assert(index
< DMAR_FRCD_REG_NR
);
285 return vtd_get_quad_raw(s
, addr
) & VTD_FRCD_F
;
288 /* Update the PPF field of Fault Status Register.
289 * Should be called whenever change the F field of any fault recording
292 static void vtd_update_fsts_ppf(IntelIOMMUState
*s
)
295 uint32_t ppf_mask
= 0;
297 for (i
= 0; i
< DMAR_FRCD_REG_NR
; i
++) {
298 if (vtd_is_frcd_set(s
, i
)) {
299 ppf_mask
= VTD_FSTS_PPF
;
303 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, VTD_FSTS_PPF
, ppf_mask
);
304 VTD_DPRINTF(FLOG
, "set PPF of FSTS_REG to %d", ppf_mask
? 1 : 0);
307 static void vtd_set_frcd_and_update_ppf(IntelIOMMUState
*s
, uint16_t index
)
309 /* Each reg is 128-bit */
310 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
311 addr
+= 8; /* Access the high 64-bit half */
313 assert(index
< DMAR_FRCD_REG_NR
);
315 vtd_set_clear_mask_quad(s
, addr
, 0, VTD_FRCD_F
);
316 vtd_update_fsts_ppf(s
);
319 /* Must not update F field now, should be done later */
320 static void vtd_record_frcd(IntelIOMMUState
*s
, uint16_t index
,
321 uint16_t source_id
, hwaddr addr
,
322 VTDFaultReason fault
, bool is_write
)
325 hwaddr frcd_reg_addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
327 assert(index
< DMAR_FRCD_REG_NR
);
329 lo
= VTD_FRCD_FI(addr
);
330 hi
= VTD_FRCD_SID(source_id
) | VTD_FRCD_FR(fault
);
334 vtd_set_quad_raw(s
, frcd_reg_addr
, lo
);
335 vtd_set_quad_raw(s
, frcd_reg_addr
+ 8, hi
);
336 VTD_DPRINTF(FLOG
, "record to FRCD_REG #%"PRIu16
": hi 0x%"PRIx64
337 ", lo 0x%"PRIx64
, index
, hi
, lo
);
340 /* Try to collapse multiple pending faults from the same requester */
341 static bool vtd_try_collapse_fault(IntelIOMMUState
*s
, uint16_t source_id
)
345 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ 8; /* The high 64-bit half */
347 for (i
= 0; i
< DMAR_FRCD_REG_NR
; i
++) {
348 frcd_reg
= vtd_get_quad_raw(s
, addr
);
349 VTD_DPRINTF(FLOG
, "frcd_reg #%d 0x%"PRIx64
, i
, frcd_reg
);
350 if ((frcd_reg
& VTD_FRCD_F
) &&
351 ((frcd_reg
& VTD_FRCD_SID_MASK
) == source_id
)) {
354 addr
+= 16; /* 128-bit for each */
359 /* Log and report an DMAR (address translation) fault to software */
360 static void vtd_report_dmar_fault(IntelIOMMUState
*s
, uint16_t source_id
,
361 hwaddr addr
, VTDFaultReason fault
,
364 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
366 assert(fault
< VTD_FR_MAX
);
368 if (fault
== VTD_FR_RESERVED_ERR
) {
369 /* This is not a normal fault reason case. Drop it. */
372 VTD_DPRINTF(FLOG
, "sid 0x%"PRIx16
", fault %d, addr 0x%"PRIx64
373 ", is_write %d", source_id
, fault
, addr
, is_write
);
374 if (fsts_reg
& VTD_FSTS_PFO
) {
375 VTD_DPRINTF(FLOG
, "new fault is not recorded due to "
376 "Primary Fault Overflow");
379 if (vtd_try_collapse_fault(s
, source_id
)) {
380 VTD_DPRINTF(FLOG
, "new fault is not recorded due to "
381 "compression of faults");
384 if (vtd_is_frcd_set(s
, s
->next_frcd_reg
)) {
385 VTD_DPRINTF(FLOG
, "Primary Fault Overflow and "
386 "new fault is not recorded, set PFO field");
387 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, 0, VTD_FSTS_PFO
);
391 vtd_record_frcd(s
, s
->next_frcd_reg
, source_id
, addr
, fault
, is_write
);
393 if (fsts_reg
& VTD_FSTS_PPF
) {
394 VTD_DPRINTF(FLOG
, "there are pending faults already, "
395 "fault event is not generated");
396 vtd_set_frcd_and_update_ppf(s
, s
->next_frcd_reg
);
398 if (s
->next_frcd_reg
== DMAR_FRCD_REG_NR
) {
399 s
->next_frcd_reg
= 0;
402 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, VTD_FSTS_FRI_MASK
,
403 VTD_FSTS_FRI(s
->next_frcd_reg
));
404 vtd_set_frcd_and_update_ppf(s
, s
->next_frcd_reg
); /* Will set PPF */
406 if (s
->next_frcd_reg
== DMAR_FRCD_REG_NR
) {
407 s
->next_frcd_reg
= 0;
409 /* This case actually cause the PPF to be Set.
410 * So generate fault event (interrupt).
412 vtd_generate_fault_event(s
, fsts_reg
);
416 /* Handle Invalidation Queue Errors of queued invalidation interface error
419 static void vtd_handle_inv_queue_error(IntelIOMMUState
*s
)
421 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
423 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, 0, VTD_FSTS_IQE
);
424 vtd_generate_fault_event(s
, fsts_reg
);
427 /* Set the IWC field and try to generate an invalidation completion interrupt */
428 static void vtd_generate_completion_event(IntelIOMMUState
*s
)
430 VTD_DPRINTF(INV
, "completes an invalidation wait command with "
432 if (vtd_get_long_raw(s
, DMAR_ICS_REG
) & VTD_ICS_IWC
) {
433 VTD_DPRINTF(INV
, "there is a previous interrupt condition to be "
434 "serviced by software, "
435 "new invalidation event is not generated");
438 vtd_set_clear_mask_long(s
, DMAR_ICS_REG
, 0, VTD_ICS_IWC
);
439 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, 0, VTD_IECTL_IP
);
440 if (vtd_get_long_raw(s
, DMAR_IECTL_REG
) & VTD_IECTL_IM
) {
441 VTD_DPRINTF(INV
, "IM filed in IECTL_REG is set, new invalidation "
442 "event is not generated");
445 /* Generate the interrupt event */
446 vtd_generate_interrupt(s
, DMAR_IEADDR_REG
, DMAR_IEDATA_REG
);
447 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
451 static inline bool vtd_root_entry_present(VTDRootEntry
*root
)
453 return root
->val
& VTD_ROOT_ENTRY_P
;
456 static int vtd_get_root_entry(IntelIOMMUState
*s
, uint8_t index
,
461 addr
= s
->root
+ index
* sizeof(*re
);
462 if (dma_memory_read(&address_space_memory
, addr
, re
, sizeof(*re
))) {
463 VTD_DPRINTF(GENERAL
, "error: fail to access root-entry at 0x%"PRIx64
464 " + %"PRIu8
, s
->root
, index
);
466 return -VTD_FR_ROOT_TABLE_INV
;
468 re
->val
= le64_to_cpu(re
->val
);
472 static inline bool vtd_context_entry_present(VTDContextEntry
*context
)
474 return context
->lo
& VTD_CONTEXT_ENTRY_P
;
477 static int vtd_get_context_entry_from_root(VTDRootEntry
*root
, uint8_t index
,
482 if (!vtd_root_entry_present(root
)) {
483 VTD_DPRINTF(GENERAL
, "error: root-entry is not present");
484 return -VTD_FR_ROOT_ENTRY_P
;
486 addr
= (root
->val
& VTD_ROOT_ENTRY_CTP
) + index
* sizeof(*ce
);
487 if (dma_memory_read(&address_space_memory
, addr
, ce
, sizeof(*ce
))) {
488 VTD_DPRINTF(GENERAL
, "error: fail to access context-entry at 0x%"PRIx64
490 (uint64_t)(root
->val
& VTD_ROOT_ENTRY_CTP
), index
);
491 return -VTD_FR_CONTEXT_TABLE_INV
;
493 ce
->lo
= le64_to_cpu(ce
->lo
);
494 ce
->hi
= le64_to_cpu(ce
->hi
);
498 static inline dma_addr_t
vtd_get_slpt_base_from_context(VTDContextEntry
*ce
)
500 return ce
->lo
& VTD_CONTEXT_ENTRY_SLPTPTR
;
503 /* The shift of an addr for a certain level of paging structure */
504 static inline uint32_t vtd_slpt_level_shift(uint32_t level
)
506 return VTD_PAGE_SHIFT_4K
+ (level
- 1) * VTD_SL_LEVEL_BITS
;
509 static inline uint64_t vtd_get_slpte_addr(uint64_t slpte
)
511 return slpte
& VTD_SL_PT_BASE_ADDR_MASK
;
514 /* Whether the pte indicates the address of the page frame */
515 static inline bool vtd_is_last_slpte(uint64_t slpte
, uint32_t level
)
517 return level
== VTD_SL_PT_LEVEL
|| (slpte
& VTD_SL_PT_PAGE_SIZE_MASK
);
520 /* Get the content of a spte located in @base_addr[@index] */
521 static uint64_t vtd_get_slpte(dma_addr_t base_addr
, uint32_t index
)
525 assert(index
< VTD_SL_PT_ENTRY_NR
);
527 if (dma_memory_read(&address_space_memory
,
528 base_addr
+ index
* sizeof(slpte
), &slpte
,
530 slpte
= (uint64_t)-1;
533 slpte
= le64_to_cpu(slpte
);
537 /* Given a gpa and the level of paging structure, return the offset of current
540 static inline uint32_t vtd_gpa_level_offset(uint64_t gpa
, uint32_t level
)
542 return (gpa
>> vtd_slpt_level_shift(level
)) &
543 ((1ULL << VTD_SL_LEVEL_BITS
) - 1);
546 /* Check Capability Register to see if the @level of page-table is supported */
547 static inline bool vtd_is_level_supported(IntelIOMMUState
*s
, uint32_t level
)
549 return VTD_CAP_SAGAW_MASK
& s
->cap
&
550 (1ULL << (level
- 2 + VTD_CAP_SAGAW_SHIFT
));
553 /* Get the page-table level that hardware should use for the second-level
554 * page-table walk from the Address Width field of context-entry.
556 static inline uint32_t vtd_get_level_from_context_entry(VTDContextEntry
*ce
)
558 return 2 + (ce
->hi
& VTD_CONTEXT_ENTRY_AW
);
561 static inline uint32_t vtd_get_agaw_from_context_entry(VTDContextEntry
*ce
)
563 return 30 + (ce
->hi
& VTD_CONTEXT_ENTRY_AW
) * 9;
566 static const uint64_t vtd_paging_entry_rsvd_field
[] = {
568 /* For not large page */
569 [1] = 0x800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
570 [2] = 0x800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
571 [3] = 0x800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
572 [4] = 0x880ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
574 [5] = 0x800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
575 [6] = 0x1ff800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
576 [7] = 0x3ffff800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
577 [8] = 0x880ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
580 static bool vtd_slpte_nonzero_rsvd(uint64_t slpte
, uint32_t level
)
582 if (slpte
& VTD_SL_PT_PAGE_SIZE_MASK
) {
583 /* Maybe large page */
584 return slpte
& vtd_paging_entry_rsvd_field
[level
+ 4];
586 return slpte
& vtd_paging_entry_rsvd_field
[level
];
590 /* Given the @gpa, get relevant @slptep. @slpte_level will be the last level
591 * of the translation, can be used for deciding the size of large page.
593 static int vtd_gpa_to_slpte(VTDContextEntry
*ce
, uint64_t gpa
, bool is_write
,
594 uint64_t *slptep
, uint32_t *slpte_level
,
595 bool *reads
, bool *writes
)
597 dma_addr_t addr
= vtd_get_slpt_base_from_context(ce
);
598 uint32_t level
= vtd_get_level_from_context_entry(ce
);
601 uint32_t ce_agaw
= vtd_get_agaw_from_context_entry(ce
);
602 uint64_t access_right_check
;
604 /* Check if @gpa is above 2^X-1, where X is the minimum of MGAW in CAP_REG
605 * and AW in context-entry.
607 if (gpa
& ~((1ULL << MIN(ce_agaw
, VTD_MGAW
)) - 1)) {
608 VTD_DPRINTF(GENERAL
, "error: gpa 0x%"PRIx64
" exceeds limits", gpa
);
609 return -VTD_FR_ADDR_BEYOND_MGAW
;
612 /* FIXME: what is the Atomics request here? */
613 access_right_check
= is_write
? VTD_SL_W
: VTD_SL_R
;
616 offset
= vtd_gpa_level_offset(gpa
, level
);
617 slpte
= vtd_get_slpte(addr
, offset
);
619 if (slpte
== (uint64_t)-1) {
620 VTD_DPRINTF(GENERAL
, "error: fail to access second-level paging "
621 "entry at level %"PRIu32
" for gpa 0x%"PRIx64
,
623 if (level
== vtd_get_level_from_context_entry(ce
)) {
624 /* Invalid programming of context-entry */
625 return -VTD_FR_CONTEXT_ENTRY_INV
;
627 return -VTD_FR_PAGING_ENTRY_INV
;
630 *reads
= (*reads
) && (slpte
& VTD_SL_R
);
631 *writes
= (*writes
) && (slpte
& VTD_SL_W
);
632 if (!(slpte
& access_right_check
)) {
633 VTD_DPRINTF(GENERAL
, "error: lack of %s permission for "
634 "gpa 0x%"PRIx64
" slpte 0x%"PRIx64
,
635 (is_write
? "write" : "read"), gpa
, slpte
);
636 return is_write
? -VTD_FR_WRITE
: -VTD_FR_READ
;
638 if (vtd_slpte_nonzero_rsvd(slpte
, level
)) {
639 VTD_DPRINTF(GENERAL
, "error: non-zero reserved field in second "
640 "level paging entry level %"PRIu32
" slpte 0x%"PRIx64
,
642 return -VTD_FR_PAGING_ENTRY_RSVD
;
645 if (vtd_is_last_slpte(slpte
, level
)) {
647 *slpte_level
= level
;
650 addr
= vtd_get_slpte_addr(slpte
);
655 /* Map a device to its corresponding domain (context-entry) */
656 static int vtd_dev_to_context_entry(IntelIOMMUState
*s
, uint8_t bus_num
,
657 uint8_t devfn
, VTDContextEntry
*ce
)
662 ret_fr
= vtd_get_root_entry(s
, bus_num
, &re
);
667 if (!vtd_root_entry_present(&re
)) {
668 VTD_DPRINTF(GENERAL
, "error: root-entry #%"PRIu8
" is not present",
670 return -VTD_FR_ROOT_ENTRY_P
;
671 } else if (re
.rsvd
|| (re
.val
& VTD_ROOT_ENTRY_RSVD
)) {
672 VTD_DPRINTF(GENERAL
, "error: non-zero reserved field in root-entry "
673 "hi 0x%"PRIx64
" lo 0x%"PRIx64
, re
.rsvd
, re
.val
);
674 return -VTD_FR_ROOT_ENTRY_RSVD
;
677 ret_fr
= vtd_get_context_entry_from_root(&re
, devfn
, ce
);
682 if (!vtd_context_entry_present(ce
)) {
684 "error: context-entry #%"PRIu8
"(bus #%"PRIu8
") "
685 "is not present", devfn
, bus_num
);
686 return -VTD_FR_CONTEXT_ENTRY_P
;
687 } else if ((ce
->hi
& VTD_CONTEXT_ENTRY_RSVD_HI
) ||
688 (ce
->lo
& VTD_CONTEXT_ENTRY_RSVD_LO
)) {
690 "error: non-zero reserved field in context-entry "
691 "hi 0x%"PRIx64
" lo 0x%"PRIx64
, ce
->hi
, ce
->lo
);
692 return -VTD_FR_CONTEXT_ENTRY_RSVD
;
694 /* Check if the programming of context-entry is valid */
695 if (!vtd_is_level_supported(s
, vtd_get_level_from_context_entry(ce
))) {
696 VTD_DPRINTF(GENERAL
, "error: unsupported Address Width value in "
697 "context-entry hi 0x%"PRIx64
" lo 0x%"PRIx64
,
699 return -VTD_FR_CONTEXT_ENTRY_INV
;
700 } else if (ce
->lo
& VTD_CONTEXT_ENTRY_TT
) {
701 VTD_DPRINTF(GENERAL
, "error: unsupported Translation Type in "
702 "context-entry hi 0x%"PRIx64
" lo 0x%"PRIx64
,
704 return -VTD_FR_CONTEXT_ENTRY_INV
;
709 static inline uint16_t vtd_make_source_id(uint8_t bus_num
, uint8_t devfn
)
711 return ((bus_num
& 0xffUL
) << 8) | (devfn
& 0xffUL
);
714 static const bool vtd_qualified_faults
[] = {
715 [VTD_FR_RESERVED
] = false,
716 [VTD_FR_ROOT_ENTRY_P
] = false,
717 [VTD_FR_CONTEXT_ENTRY_P
] = true,
718 [VTD_FR_CONTEXT_ENTRY_INV
] = true,
719 [VTD_FR_ADDR_BEYOND_MGAW
] = true,
720 [VTD_FR_WRITE
] = true,
721 [VTD_FR_READ
] = true,
722 [VTD_FR_PAGING_ENTRY_INV
] = true,
723 [VTD_FR_ROOT_TABLE_INV
] = false,
724 [VTD_FR_CONTEXT_TABLE_INV
] = false,
725 [VTD_FR_ROOT_ENTRY_RSVD
] = false,
726 [VTD_FR_PAGING_ENTRY_RSVD
] = true,
727 [VTD_FR_CONTEXT_ENTRY_TT
] = true,
728 [VTD_FR_RESERVED_ERR
] = false,
729 [VTD_FR_MAX
] = false,
732 /* To see if a fault condition is "qualified", which is reported to software
733 * only if the FPD field in the context-entry used to process the faulting
736 static inline bool vtd_is_qualified_fault(VTDFaultReason fault
)
738 return vtd_qualified_faults
[fault
];
741 static inline bool vtd_is_interrupt_addr(hwaddr addr
)
743 return VTD_INTERRUPT_ADDR_FIRST
<= addr
&& addr
<= VTD_INTERRUPT_ADDR_LAST
;
746 /* Map dev to context-entry then do a paging-structures walk to do a iommu
748 * @bus_num: The bus number
749 * @devfn: The devfn, which is the combined of device and function number
750 * @is_write: The access is a write operation
751 * @entry: IOMMUTLBEntry that contain the addr to be translated and result
753 static void vtd_do_iommu_translate(VTDAddressSpace
*vtd_as
, uint8_t bus_num
,
754 uint8_t devfn
, hwaddr addr
, bool is_write
,
755 IOMMUTLBEntry
*entry
)
757 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
759 VTDContextCacheEntry
*cc_entry
= &vtd_as
->context_cache_entry
;
762 uint16_t source_id
= vtd_make_source_id(bus_num
, devfn
);
764 bool is_fpd_set
= false;
767 VTDIOTLBEntry
*iotlb_entry
;
769 /* Check if the request is in interrupt address range */
770 if (vtd_is_interrupt_addr(addr
)) {
772 /* FIXME: since we don't know the length of the access here, we
773 * treat Non-DWORD length write requests without PASID as
774 * interrupt requests, too. Withoud interrupt remapping support,
775 * we just use 1:1 mapping.
777 VTD_DPRINTF(MMU
, "write request to interrupt address "
778 "gpa 0x%"PRIx64
, addr
);
779 entry
->iova
= addr
& VTD_PAGE_MASK_4K
;
780 entry
->translated_addr
= addr
& VTD_PAGE_MASK_4K
;
781 entry
->addr_mask
= ~VTD_PAGE_MASK_4K
;
782 entry
->perm
= IOMMU_WO
;
785 VTD_DPRINTF(GENERAL
, "error: read request from interrupt address "
786 "gpa 0x%"PRIx64
, addr
);
787 vtd_report_dmar_fault(s
, source_id
, addr
, VTD_FR_READ
, is_write
);
791 /* Try to fetch slpte form IOTLB */
792 iotlb_entry
= vtd_lookup_iotlb(s
, source_id
, addr
);
794 VTD_DPRINTF(CACHE
, "hit iotlb sid 0x%"PRIx16
" gpa 0x%"PRIx64
795 " slpte 0x%"PRIx64
" did 0x%"PRIx16
, source_id
, addr
,
796 iotlb_entry
->slpte
, iotlb_entry
->domain_id
);
797 slpte
= iotlb_entry
->slpte
;
798 reads
= iotlb_entry
->read_flags
;
799 writes
= iotlb_entry
->write_flags
;
802 /* Try to fetch context-entry from cache first */
803 if (cc_entry
->context_cache_gen
== s
->context_cache_gen
) {
804 VTD_DPRINTF(CACHE
, "hit context-cache bus %d devfn %d "
805 "(hi %"PRIx64
" lo %"PRIx64
" gen %"PRIu32
")",
806 bus_num
, devfn
, cc_entry
->context_entry
.hi
,
807 cc_entry
->context_entry
.lo
, cc_entry
->context_cache_gen
);
808 ce
= cc_entry
->context_entry
;
809 is_fpd_set
= ce
.lo
& VTD_CONTEXT_ENTRY_FPD
;
811 ret_fr
= vtd_dev_to_context_entry(s
, bus_num
, devfn
, &ce
);
812 is_fpd_set
= ce
.lo
& VTD_CONTEXT_ENTRY_FPD
;
815 if (is_fpd_set
&& vtd_is_qualified_fault(ret_fr
)) {
816 VTD_DPRINTF(FLOG
, "fault processing is disabled for DMA "
817 "requests through this context-entry "
820 vtd_report_dmar_fault(s
, source_id
, addr
, ret_fr
, is_write
);
824 /* Update context-cache */
825 VTD_DPRINTF(CACHE
, "update context-cache bus %d devfn %d "
826 "(hi %"PRIx64
" lo %"PRIx64
" gen %"PRIu32
"->%"PRIu32
")",
827 bus_num
, devfn
, ce
.hi
, ce
.lo
,
828 cc_entry
->context_cache_gen
, s
->context_cache_gen
);
829 cc_entry
->context_entry
= ce
;
830 cc_entry
->context_cache_gen
= s
->context_cache_gen
;
833 ret_fr
= vtd_gpa_to_slpte(&ce
, addr
, is_write
, &slpte
, &level
,
837 if (is_fpd_set
&& vtd_is_qualified_fault(ret_fr
)) {
838 VTD_DPRINTF(FLOG
, "fault processing is disabled for DMA requests "
839 "through this context-entry (with FPD Set)");
841 vtd_report_dmar_fault(s
, source_id
, addr
, ret_fr
, is_write
);
846 vtd_update_iotlb(s
, source_id
, VTD_CONTEXT_ENTRY_DID(ce
.hi
), addr
, slpte
,
849 entry
->iova
= addr
& VTD_PAGE_MASK_4K
;
850 entry
->translated_addr
= vtd_get_slpte_addr(slpte
) & VTD_PAGE_MASK_4K
;
851 entry
->addr_mask
= ~VTD_PAGE_MASK_4K
;
852 entry
->perm
= (writes
? 2 : 0) + (reads
? 1 : 0);
855 static void vtd_root_table_setup(IntelIOMMUState
*s
)
857 s
->root
= vtd_get_quad_raw(s
, DMAR_RTADDR_REG
);
858 s
->root_extended
= s
->root
& VTD_RTADDR_RTT
;
859 s
->root
&= VTD_RTADDR_ADDR_MASK
;
861 VTD_DPRINTF(CSR
, "root_table addr 0x%"PRIx64
" %s", s
->root
,
862 (s
->root_extended
? "(extended)" : ""));
865 static void vtd_context_global_invalidate(IntelIOMMUState
*s
)
867 s
->context_cache_gen
++;
868 if (s
->context_cache_gen
== VTD_CONTEXT_CACHE_GEN_MAX
) {
869 vtd_reset_context_cache(s
);
873 /* Do a context-cache device-selective invalidation.
874 * @func_mask: FM field after shifting
876 static void vtd_context_device_invalidate(IntelIOMMUState
*s
,
881 VTDAddressSpace
**pvtd_as
;
882 VTDAddressSpace
*vtd_as
;
886 switch (func_mask
& 3) {
888 mask
= 0; /* No bits in the SID field masked */
891 mask
= 4; /* Mask bit 2 in the SID field */
894 mask
= 6; /* Mask bit 2:1 in the SID field */
897 mask
= 7; /* Mask bit 2:0 in the SID field */
900 VTD_DPRINTF(INV
, "device-selective invalidation source 0x%"PRIx16
901 " mask %"PRIu16
, source_id
, mask
);
902 pvtd_as
= s
->address_spaces
[VTD_SID_TO_BUS(source_id
)];
904 devfn
= VTD_SID_TO_DEVFN(source_id
);
905 for (devfn_it
= 0; devfn_it
< VTD_PCI_DEVFN_MAX
; ++devfn_it
) {
906 vtd_as
= pvtd_as
[devfn_it
];
907 if (vtd_as
&& ((devfn_it
& mask
) == (devfn
& mask
))) {
908 VTD_DPRINTF(INV
, "invalidate context-cahce of devfn 0x%"PRIx16
,
910 vtd_as
->context_cache_entry
.context_cache_gen
= 0;
916 /* Context-cache invalidation
917 * Returns the Context Actual Invalidation Granularity.
918 * @val: the content of the CCMD_REG
920 static uint64_t vtd_context_cache_invalidate(IntelIOMMUState
*s
, uint64_t val
)
923 uint64_t type
= val
& VTD_CCMD_CIRG_MASK
;
926 case VTD_CCMD_DOMAIN_INVL
:
927 VTD_DPRINTF(INV
, "domain-selective invalidation domain 0x%"PRIx16
,
928 (uint16_t)VTD_CCMD_DID(val
));
930 case VTD_CCMD_GLOBAL_INVL
:
931 VTD_DPRINTF(INV
, "global invalidation");
932 caig
= VTD_CCMD_GLOBAL_INVL_A
;
933 vtd_context_global_invalidate(s
);
936 case VTD_CCMD_DEVICE_INVL
:
937 caig
= VTD_CCMD_DEVICE_INVL_A
;
938 vtd_context_device_invalidate(s
, VTD_CCMD_SID(val
), VTD_CCMD_FM(val
));
942 VTD_DPRINTF(GENERAL
, "error: invalid granularity");
948 static void vtd_iotlb_global_invalidate(IntelIOMMUState
*s
)
953 static void vtd_iotlb_domain_invalidate(IntelIOMMUState
*s
, uint16_t domain_id
)
955 g_hash_table_foreach_remove(s
->iotlb
, vtd_hash_remove_by_domain
,
959 static void vtd_iotlb_page_invalidate(IntelIOMMUState
*s
, uint16_t domain_id
,
960 hwaddr addr
, uint8_t am
)
962 VTDIOTLBPageInvInfo info
;
964 assert(am
<= VTD_MAMV
);
965 info
.domain_id
= domain_id
;
966 info
.gfn
= addr
>> VTD_PAGE_SHIFT_4K
;
967 info
.mask
= ~((1 << am
) - 1);
968 g_hash_table_foreach_remove(s
->iotlb
, vtd_hash_remove_by_page
, &info
);
972 * Returns the IOTLB Actual Invalidation Granularity.
973 * @val: the content of the IOTLB_REG
975 static uint64_t vtd_iotlb_flush(IntelIOMMUState
*s
, uint64_t val
)
978 uint64_t type
= val
& VTD_TLB_FLUSH_GRANU_MASK
;
984 case VTD_TLB_GLOBAL_FLUSH
:
985 VTD_DPRINTF(INV
, "global invalidation");
986 iaig
= VTD_TLB_GLOBAL_FLUSH_A
;
987 vtd_iotlb_global_invalidate(s
);
990 case VTD_TLB_DSI_FLUSH
:
991 domain_id
= VTD_TLB_DID(val
);
992 VTD_DPRINTF(INV
, "domain-selective invalidation domain 0x%"PRIx16
,
994 iaig
= VTD_TLB_DSI_FLUSH_A
;
995 vtd_iotlb_domain_invalidate(s
, domain_id
);
998 case VTD_TLB_PSI_FLUSH
:
999 domain_id
= VTD_TLB_DID(val
);
1000 addr
= vtd_get_quad_raw(s
, DMAR_IVA_REG
);
1001 am
= VTD_IVA_AM(addr
);
1002 addr
= VTD_IVA_ADDR(addr
);
1003 VTD_DPRINTF(INV
, "page-selective invalidation domain 0x%"PRIx16
1004 " addr 0x%"PRIx64
" mask %"PRIu8
, domain_id
, addr
, am
);
1005 if (am
> VTD_MAMV
) {
1006 VTD_DPRINTF(GENERAL
, "error: supported max address mask value is "
1007 "%"PRIu8
, (uint8_t)VTD_MAMV
);
1011 iaig
= VTD_TLB_PSI_FLUSH_A
;
1012 vtd_iotlb_page_invalidate(s
, domain_id
, addr
, am
);
1016 VTD_DPRINTF(GENERAL
, "error: invalid granularity");
1022 static inline bool vtd_queued_inv_enable_check(IntelIOMMUState
*s
)
1024 return s
->iq_tail
== 0;
1027 static inline bool vtd_queued_inv_disable_check(IntelIOMMUState
*s
)
1029 return s
->qi_enabled
&& (s
->iq_tail
== s
->iq_head
) &&
1030 (s
->iq_last_desc_type
== VTD_INV_DESC_WAIT
);
1033 static void vtd_handle_gcmd_qie(IntelIOMMUState
*s
, bool en
)
1035 uint64_t iqa_val
= vtd_get_quad_raw(s
, DMAR_IQA_REG
);
1037 VTD_DPRINTF(INV
, "Queued Invalidation Enable %s", (en
? "on" : "off"));
1039 if (vtd_queued_inv_enable_check(s
)) {
1040 s
->iq
= iqa_val
& VTD_IQA_IQA_MASK
;
1041 /* 2^(x+8) entries */
1042 s
->iq_size
= 1UL << ((iqa_val
& VTD_IQA_QS
) + 8);
1043 s
->qi_enabled
= true;
1044 VTD_DPRINTF(INV
, "DMAR_IQA_REG 0x%"PRIx64
, iqa_val
);
1045 VTD_DPRINTF(INV
, "Invalidation Queue addr 0x%"PRIx64
" size %d",
1047 /* Ok - report back to driver */
1048 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_QIES
);
1050 VTD_DPRINTF(GENERAL
, "error: can't enable Queued Invalidation: "
1051 "tail %"PRIu16
, s
->iq_tail
);
1054 if (vtd_queued_inv_disable_check(s
)) {
1055 /* disable Queued Invalidation */
1056 vtd_set_quad_raw(s
, DMAR_IQH_REG
, 0);
1058 s
->qi_enabled
= false;
1059 /* Ok - report back to driver */
1060 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_QIES
, 0);
1062 VTD_DPRINTF(GENERAL
, "error: can't disable Queued Invalidation: "
1063 "head %"PRIu16
", tail %"PRIu16
1064 ", last_descriptor %"PRIu8
,
1065 s
->iq_head
, s
->iq_tail
, s
->iq_last_desc_type
);
1070 /* Set Root Table Pointer */
1071 static void vtd_handle_gcmd_srtp(IntelIOMMUState
*s
)
1073 VTD_DPRINTF(CSR
, "set Root Table Pointer");
1075 vtd_root_table_setup(s
);
1076 /* Ok - report back to driver */
1077 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_RTPS
);
1080 /* Handle Translation Enable/Disable */
1081 static void vtd_handle_gcmd_te(IntelIOMMUState
*s
, bool en
)
1083 VTD_DPRINTF(CSR
, "Translation Enable %s", (en
? "on" : "off"));
1086 s
->dmar_enabled
= true;
1087 /* Ok - report back to driver */
1088 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_TES
);
1090 s
->dmar_enabled
= false;
1092 /* Clear the index of Fault Recording Register */
1093 s
->next_frcd_reg
= 0;
1094 /* Ok - report back to driver */
1095 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_TES
, 0);
1099 /* Handle write to Global Command Register */
1100 static void vtd_handle_gcmd_write(IntelIOMMUState
*s
)
1102 uint32_t status
= vtd_get_long_raw(s
, DMAR_GSTS_REG
);
1103 uint32_t val
= vtd_get_long_raw(s
, DMAR_GCMD_REG
);
1104 uint32_t changed
= status
^ val
;
1106 VTD_DPRINTF(CSR
, "value 0x%"PRIx32
" status 0x%"PRIx32
, val
, status
);
1107 if (changed
& VTD_GCMD_TE
) {
1108 /* Translation enable/disable */
1109 vtd_handle_gcmd_te(s
, val
& VTD_GCMD_TE
);
1111 if (val
& VTD_GCMD_SRTP
) {
1112 /* Set/update the root-table pointer */
1113 vtd_handle_gcmd_srtp(s
);
1115 if (changed
& VTD_GCMD_QIE
) {
1116 /* Queued Invalidation Enable */
1117 vtd_handle_gcmd_qie(s
, val
& VTD_GCMD_QIE
);
1121 /* Handle write to Context Command Register */
1122 static void vtd_handle_ccmd_write(IntelIOMMUState
*s
)
1125 uint64_t val
= vtd_get_quad_raw(s
, DMAR_CCMD_REG
);
1127 /* Context-cache invalidation request */
1128 if (val
& VTD_CCMD_ICC
) {
1129 if (s
->qi_enabled
) {
1130 VTD_DPRINTF(GENERAL
, "error: Queued Invalidation enabled, "
1131 "should not use register-based invalidation");
1134 ret
= vtd_context_cache_invalidate(s
, val
);
1135 /* Invalidation completed. Change something to show */
1136 vtd_set_clear_mask_quad(s
, DMAR_CCMD_REG
, VTD_CCMD_ICC
, 0ULL);
1137 ret
= vtd_set_clear_mask_quad(s
, DMAR_CCMD_REG
, VTD_CCMD_CAIG_MASK
,
1139 VTD_DPRINTF(INV
, "CCMD_REG write-back val: 0x%"PRIx64
, ret
);
1143 /* Handle write to IOTLB Invalidation Register */
1144 static void vtd_handle_iotlb_write(IntelIOMMUState
*s
)
1147 uint64_t val
= vtd_get_quad_raw(s
, DMAR_IOTLB_REG
);
1149 /* IOTLB invalidation request */
1150 if (val
& VTD_TLB_IVT
) {
1151 if (s
->qi_enabled
) {
1152 VTD_DPRINTF(GENERAL
, "error: Queued Invalidation enabled, "
1153 "should not use register-based invalidation");
1156 ret
= vtd_iotlb_flush(s
, val
);
1157 /* Invalidation completed. Change something to show */
1158 vtd_set_clear_mask_quad(s
, DMAR_IOTLB_REG
, VTD_TLB_IVT
, 0ULL);
1159 ret
= vtd_set_clear_mask_quad(s
, DMAR_IOTLB_REG
,
1160 VTD_TLB_FLUSH_GRANU_MASK_A
, ret
);
1161 VTD_DPRINTF(INV
, "IOTLB_REG write-back val: 0x%"PRIx64
, ret
);
1165 /* Fetch an Invalidation Descriptor from the Invalidation Queue */
1166 static bool vtd_get_inv_desc(dma_addr_t base_addr
, uint32_t offset
,
1167 VTDInvDesc
*inv_desc
)
1169 dma_addr_t addr
= base_addr
+ offset
* sizeof(*inv_desc
);
1170 if (dma_memory_read(&address_space_memory
, addr
, inv_desc
,
1171 sizeof(*inv_desc
))) {
1172 VTD_DPRINTF(GENERAL
, "error: fail to fetch Invalidation Descriptor "
1173 "base_addr 0x%"PRIx64
" offset %"PRIu32
, base_addr
, offset
);
1179 inv_desc
->lo
= le64_to_cpu(inv_desc
->lo
);
1180 inv_desc
->hi
= le64_to_cpu(inv_desc
->hi
);
1184 static bool vtd_process_wait_desc(IntelIOMMUState
*s
, VTDInvDesc
*inv_desc
)
1186 if ((inv_desc
->hi
& VTD_INV_DESC_WAIT_RSVD_HI
) ||
1187 (inv_desc
->lo
& VTD_INV_DESC_WAIT_RSVD_LO
)) {
1188 VTD_DPRINTF(GENERAL
, "error: non-zero reserved field in Invalidation "
1189 "Wait Descriptor hi 0x%"PRIx64
" lo 0x%"PRIx64
,
1190 inv_desc
->hi
, inv_desc
->lo
);
1193 if (inv_desc
->lo
& VTD_INV_DESC_WAIT_SW
) {
1195 uint32_t status_data
= (uint32_t)(inv_desc
->lo
>>
1196 VTD_INV_DESC_WAIT_DATA_SHIFT
);
1198 assert(!(inv_desc
->lo
& VTD_INV_DESC_WAIT_IF
));
1200 /* FIXME: need to be masked with HAW? */
1201 dma_addr_t status_addr
= inv_desc
->hi
;
1202 VTD_DPRINTF(INV
, "status data 0x%x, status addr 0x%"PRIx64
,
1203 status_data
, status_addr
);
1204 status_data
= cpu_to_le32(status_data
);
1205 if (dma_memory_write(&address_space_memory
, status_addr
, &status_data
,
1206 sizeof(status_data
))) {
1207 VTD_DPRINTF(GENERAL
, "error: fail to perform a coherent write");
1210 } else if (inv_desc
->lo
& VTD_INV_DESC_WAIT_IF
) {
1211 /* Interrupt flag */
1212 VTD_DPRINTF(INV
, "Invalidation Wait Descriptor interrupt completion");
1213 vtd_generate_completion_event(s
);
1215 VTD_DPRINTF(GENERAL
, "error: invalid Invalidation Wait Descriptor: "
1216 "hi 0x%"PRIx64
" lo 0x%"PRIx64
, inv_desc
->hi
, inv_desc
->lo
);
1222 static bool vtd_process_context_cache_desc(IntelIOMMUState
*s
,
1223 VTDInvDesc
*inv_desc
)
1225 if ((inv_desc
->lo
& VTD_INV_DESC_CC_RSVD
) || inv_desc
->hi
) {
1226 VTD_DPRINTF(GENERAL
, "error: non-zero reserved field in Context-cache "
1227 "Invalidate Descriptor");
1230 switch (inv_desc
->lo
& VTD_INV_DESC_CC_G
) {
1231 case VTD_INV_DESC_CC_DOMAIN
:
1232 VTD_DPRINTF(INV
, "domain-selective invalidation domain 0x%"PRIx16
,
1233 (uint16_t)VTD_INV_DESC_CC_DID(inv_desc
->lo
));
1235 case VTD_INV_DESC_CC_GLOBAL
:
1236 VTD_DPRINTF(INV
, "global invalidation");
1237 vtd_context_global_invalidate(s
);
1240 case VTD_INV_DESC_CC_DEVICE
:
1241 vtd_context_device_invalidate(s
, VTD_INV_DESC_CC_SID(inv_desc
->lo
),
1242 VTD_INV_DESC_CC_FM(inv_desc
->lo
));
1246 VTD_DPRINTF(GENERAL
, "error: invalid granularity in Context-cache "
1247 "Invalidate Descriptor hi 0x%"PRIx64
" lo 0x%"PRIx64
,
1248 inv_desc
->hi
, inv_desc
->lo
);
1254 static bool vtd_process_iotlb_desc(IntelIOMMUState
*s
, VTDInvDesc
*inv_desc
)
1260 if ((inv_desc
->lo
& VTD_INV_DESC_IOTLB_RSVD_LO
) ||
1261 (inv_desc
->hi
& VTD_INV_DESC_IOTLB_RSVD_HI
)) {
1262 VTD_DPRINTF(GENERAL
, "error: non-zero reserved field in IOTLB "
1263 "Invalidate Descriptor hi 0x%"PRIx64
" lo 0x%"PRIx64
,
1264 inv_desc
->hi
, inv_desc
->lo
);
1268 switch (inv_desc
->lo
& VTD_INV_DESC_IOTLB_G
) {
1269 case VTD_INV_DESC_IOTLB_GLOBAL
:
1270 VTD_DPRINTF(INV
, "global invalidation");
1271 vtd_iotlb_global_invalidate(s
);
1274 case VTD_INV_DESC_IOTLB_DOMAIN
:
1275 domain_id
= VTD_INV_DESC_IOTLB_DID(inv_desc
->lo
);
1276 VTD_DPRINTF(INV
, "domain-selective invalidation domain 0x%"PRIx16
,
1278 vtd_iotlb_domain_invalidate(s
, domain_id
);
1281 case VTD_INV_DESC_IOTLB_PAGE
:
1282 domain_id
= VTD_INV_DESC_IOTLB_DID(inv_desc
->lo
);
1283 addr
= VTD_INV_DESC_IOTLB_ADDR(inv_desc
->hi
);
1284 am
= VTD_INV_DESC_IOTLB_AM(inv_desc
->hi
);
1285 VTD_DPRINTF(INV
, "page-selective invalidation domain 0x%"PRIx16
1286 " addr 0x%"PRIx64
" mask %"PRIu8
, domain_id
, addr
, am
);
1287 if (am
> VTD_MAMV
) {
1288 VTD_DPRINTF(GENERAL
, "error: supported max address mask value is "
1289 "%"PRIu8
, (uint8_t)VTD_MAMV
);
1292 vtd_iotlb_page_invalidate(s
, domain_id
, addr
, am
);
1296 VTD_DPRINTF(GENERAL
, "error: invalid granularity in IOTLB Invalidate "
1297 "Descriptor hi 0x%"PRIx64
" lo 0x%"PRIx64
,
1298 inv_desc
->hi
, inv_desc
->lo
);
1304 static bool vtd_process_inv_desc(IntelIOMMUState
*s
)
1306 VTDInvDesc inv_desc
;
1309 VTD_DPRINTF(INV
, "iq head %"PRIu16
, s
->iq_head
);
1310 if (!vtd_get_inv_desc(s
->iq
, s
->iq_head
, &inv_desc
)) {
1311 s
->iq_last_desc_type
= VTD_INV_DESC_NONE
;
1314 desc_type
= inv_desc
.lo
& VTD_INV_DESC_TYPE
;
1315 /* FIXME: should update at first or at last? */
1316 s
->iq_last_desc_type
= desc_type
;
1318 switch (desc_type
) {
1319 case VTD_INV_DESC_CC
:
1320 VTD_DPRINTF(INV
, "Context-cache Invalidate Descriptor hi 0x%"PRIx64
1321 " lo 0x%"PRIx64
, inv_desc
.hi
, inv_desc
.lo
);
1322 if (!vtd_process_context_cache_desc(s
, &inv_desc
)) {
1327 case VTD_INV_DESC_IOTLB
:
1328 VTD_DPRINTF(INV
, "IOTLB Invalidate Descriptor hi 0x%"PRIx64
1329 " lo 0x%"PRIx64
, inv_desc
.hi
, inv_desc
.lo
);
1330 if (!vtd_process_iotlb_desc(s
, &inv_desc
)) {
1335 case VTD_INV_DESC_WAIT
:
1336 VTD_DPRINTF(INV
, "Invalidation Wait Descriptor hi 0x%"PRIx64
1337 " lo 0x%"PRIx64
, inv_desc
.hi
, inv_desc
.lo
);
1338 if (!vtd_process_wait_desc(s
, &inv_desc
)) {
1344 VTD_DPRINTF(GENERAL
, "error: unkonw Invalidation Descriptor type "
1345 "hi 0x%"PRIx64
" lo 0x%"PRIx64
" type %"PRIu8
,
1346 inv_desc
.hi
, inv_desc
.lo
, desc_type
);
1350 if (s
->iq_head
== s
->iq_size
) {
1356 /* Try to fetch and process more Invalidation Descriptors */
1357 static void vtd_fetch_inv_desc(IntelIOMMUState
*s
)
1359 VTD_DPRINTF(INV
, "fetch Invalidation Descriptors");
1360 if (s
->iq_tail
>= s
->iq_size
) {
1361 /* Detects an invalid Tail pointer */
1362 VTD_DPRINTF(GENERAL
, "error: iq_tail is %"PRIu16
1363 " while iq_size is %"PRIu16
, s
->iq_tail
, s
->iq_size
);
1364 vtd_handle_inv_queue_error(s
);
1367 while (s
->iq_head
!= s
->iq_tail
) {
1368 if (!vtd_process_inv_desc(s
)) {
1369 /* Invalidation Queue Errors */
1370 vtd_handle_inv_queue_error(s
);
1373 /* Must update the IQH_REG in time */
1374 vtd_set_quad_raw(s
, DMAR_IQH_REG
,
1375 (((uint64_t)(s
->iq_head
)) << VTD_IQH_QH_SHIFT
) &
1380 /* Handle write to Invalidation Queue Tail Register */
1381 static void vtd_handle_iqt_write(IntelIOMMUState
*s
)
1383 uint64_t val
= vtd_get_quad_raw(s
, DMAR_IQT_REG
);
1385 s
->iq_tail
= VTD_IQT_QT(val
);
1386 VTD_DPRINTF(INV
, "set iq tail %"PRIu16
, s
->iq_tail
);
1387 if (s
->qi_enabled
&& !(vtd_get_long_raw(s
, DMAR_FSTS_REG
) & VTD_FSTS_IQE
)) {
1388 /* Process Invalidation Queue here */
1389 vtd_fetch_inv_desc(s
);
1393 static void vtd_handle_fsts_write(IntelIOMMUState
*s
)
1395 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
1396 uint32_t fectl_reg
= vtd_get_long_raw(s
, DMAR_FECTL_REG
);
1397 uint32_t status_fields
= VTD_FSTS_PFO
| VTD_FSTS_PPF
| VTD_FSTS_IQE
;
1399 if ((fectl_reg
& VTD_FECTL_IP
) && !(fsts_reg
& status_fields
)) {
1400 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
1401 VTD_DPRINTF(FLOG
, "all pending interrupt conditions serviced, clear "
1402 "IP field of FECTL_REG");
1404 /* FIXME: when IQE is Clear, should we try to fetch some Invalidation
1405 * Descriptors if there are any when Queued Invalidation is enabled?
1409 static void vtd_handle_fectl_write(IntelIOMMUState
*s
)
1412 /* FIXME: when software clears the IM field, check the IP field. But do we
1413 * need to compare the old value and the new value to conclude that
1414 * software clears the IM field? Or just check if the IM field is zero?
1416 fectl_reg
= vtd_get_long_raw(s
, DMAR_FECTL_REG
);
1417 if ((fectl_reg
& VTD_FECTL_IP
) && !(fectl_reg
& VTD_FECTL_IM
)) {
1418 vtd_generate_interrupt(s
, DMAR_FEADDR_REG
, DMAR_FEDATA_REG
);
1419 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
1420 VTD_DPRINTF(FLOG
, "IM field is cleared, generate "
1421 "fault event interrupt");
1425 static void vtd_handle_ics_write(IntelIOMMUState
*s
)
1427 uint32_t ics_reg
= vtd_get_long_raw(s
, DMAR_ICS_REG
);
1428 uint32_t iectl_reg
= vtd_get_long_raw(s
, DMAR_IECTL_REG
);
1430 if ((iectl_reg
& VTD_IECTL_IP
) && !(ics_reg
& VTD_ICS_IWC
)) {
1431 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
1432 VTD_DPRINTF(INV
, "pending completion interrupt condition serviced, "
1433 "clear IP field of IECTL_REG");
1437 static void vtd_handle_iectl_write(IntelIOMMUState
*s
)
1440 /* FIXME: when software clears the IM field, check the IP field. But do we
1441 * need to compare the old value and the new value to conclude that
1442 * software clears the IM field? Or just check if the IM field is zero?
1444 iectl_reg
= vtd_get_long_raw(s
, DMAR_IECTL_REG
);
1445 if ((iectl_reg
& VTD_IECTL_IP
) && !(iectl_reg
& VTD_IECTL_IM
)) {
1446 vtd_generate_interrupt(s
, DMAR_IEADDR_REG
, DMAR_IEDATA_REG
);
1447 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
1448 VTD_DPRINTF(INV
, "IM field is cleared, generate "
1449 "invalidation event interrupt");
1453 static uint64_t vtd_mem_read(void *opaque
, hwaddr addr
, unsigned size
)
1455 IntelIOMMUState
*s
= opaque
;
1458 if (addr
+ size
> DMAR_REG_SIZE
) {
1459 VTD_DPRINTF(GENERAL
, "error: addr outside region: max 0x%"PRIx64
1460 ", got 0x%"PRIx64
" %d",
1461 (uint64_t)DMAR_REG_SIZE
, addr
, size
);
1462 return (uint64_t)-1;
1466 /* Root Table Address Register, 64-bit */
1467 case DMAR_RTADDR_REG
:
1469 val
= s
->root
& ((1ULL << 32) - 1);
1475 case DMAR_RTADDR_REG_HI
:
1477 val
= s
->root
>> 32;
1480 /* Invalidation Queue Address Register, 64-bit */
1482 val
= s
->iq
| (vtd_get_quad(s
, DMAR_IQA_REG
) & VTD_IQA_QS
);
1484 val
= val
& ((1ULL << 32) - 1);
1488 case DMAR_IQA_REG_HI
:
1495 val
= vtd_get_long(s
, addr
);
1497 val
= vtd_get_quad(s
, addr
);
1500 VTD_DPRINTF(CSR
, "addr 0x%"PRIx64
" size %d val 0x%"PRIx64
,
1505 static void vtd_mem_write(void *opaque
, hwaddr addr
,
1506 uint64_t val
, unsigned size
)
1508 IntelIOMMUState
*s
= opaque
;
1510 if (addr
+ size
> DMAR_REG_SIZE
) {
1511 VTD_DPRINTF(GENERAL
, "error: addr outside region: max 0x%"PRIx64
1512 ", got 0x%"PRIx64
" %d",
1513 (uint64_t)DMAR_REG_SIZE
, addr
, size
);
1518 /* Global Command Register, 32-bit */
1520 VTD_DPRINTF(CSR
, "DMAR_GCMD_REG write addr 0x%"PRIx64
1521 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1522 vtd_set_long(s
, addr
, val
);
1523 vtd_handle_gcmd_write(s
);
1526 /* Context Command Register, 64-bit */
1528 VTD_DPRINTF(CSR
, "DMAR_CCMD_REG write addr 0x%"PRIx64
1529 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1531 vtd_set_long(s
, addr
, val
);
1533 vtd_set_quad(s
, addr
, val
);
1534 vtd_handle_ccmd_write(s
);
1538 case DMAR_CCMD_REG_HI
:
1539 VTD_DPRINTF(CSR
, "DMAR_CCMD_REG_HI write addr 0x%"PRIx64
1540 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1542 vtd_set_long(s
, addr
, val
);
1543 vtd_handle_ccmd_write(s
);
1546 /* IOTLB Invalidation Register, 64-bit */
1547 case DMAR_IOTLB_REG
:
1548 VTD_DPRINTF(INV
, "DMAR_IOTLB_REG write addr 0x%"PRIx64
1549 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1551 vtd_set_long(s
, addr
, val
);
1553 vtd_set_quad(s
, addr
, val
);
1554 vtd_handle_iotlb_write(s
);
1558 case DMAR_IOTLB_REG_HI
:
1559 VTD_DPRINTF(INV
, "DMAR_IOTLB_REG_HI write addr 0x%"PRIx64
1560 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1562 vtd_set_long(s
, addr
, val
);
1563 vtd_handle_iotlb_write(s
);
1566 /* Invalidate Address Register, 64-bit */
1568 VTD_DPRINTF(INV
, "DMAR_IVA_REG write addr 0x%"PRIx64
1569 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1571 vtd_set_long(s
, addr
, val
);
1573 vtd_set_quad(s
, addr
, val
);
1577 case DMAR_IVA_REG_HI
:
1578 VTD_DPRINTF(INV
, "DMAR_IVA_REG_HI write addr 0x%"PRIx64
1579 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1581 vtd_set_long(s
, addr
, val
);
1584 /* Fault Status Register, 32-bit */
1586 VTD_DPRINTF(FLOG
, "DMAR_FSTS_REG write addr 0x%"PRIx64
1587 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1589 vtd_set_long(s
, addr
, val
);
1590 vtd_handle_fsts_write(s
);
1593 /* Fault Event Control Register, 32-bit */
1594 case DMAR_FECTL_REG
:
1595 VTD_DPRINTF(FLOG
, "DMAR_FECTL_REG write addr 0x%"PRIx64
1596 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1598 vtd_set_long(s
, addr
, val
);
1599 vtd_handle_fectl_write(s
);
1602 /* Fault Event Data Register, 32-bit */
1603 case DMAR_FEDATA_REG
:
1604 VTD_DPRINTF(FLOG
, "DMAR_FEDATA_REG write addr 0x%"PRIx64
1605 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1607 vtd_set_long(s
, addr
, val
);
1610 /* Fault Event Address Register, 32-bit */
1611 case DMAR_FEADDR_REG
:
1612 VTD_DPRINTF(FLOG
, "DMAR_FEADDR_REG write addr 0x%"PRIx64
1613 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1615 vtd_set_long(s
, addr
, val
);
1618 /* Fault Event Upper Address Register, 32-bit */
1619 case DMAR_FEUADDR_REG
:
1620 VTD_DPRINTF(FLOG
, "DMAR_FEUADDR_REG write addr 0x%"PRIx64
1621 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1623 vtd_set_long(s
, addr
, val
);
1626 /* Protected Memory Enable Register, 32-bit */
1628 VTD_DPRINTF(CSR
, "DMAR_PMEN_REG write addr 0x%"PRIx64
1629 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1631 vtd_set_long(s
, addr
, val
);
1634 /* Root Table Address Register, 64-bit */
1635 case DMAR_RTADDR_REG
:
1636 VTD_DPRINTF(CSR
, "DMAR_RTADDR_REG write addr 0x%"PRIx64
1637 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1639 vtd_set_long(s
, addr
, val
);
1641 vtd_set_quad(s
, addr
, val
);
1645 case DMAR_RTADDR_REG_HI
:
1646 VTD_DPRINTF(CSR
, "DMAR_RTADDR_REG_HI write addr 0x%"PRIx64
1647 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1649 vtd_set_long(s
, addr
, val
);
1652 /* Invalidation Queue Tail Register, 64-bit */
1654 VTD_DPRINTF(INV
, "DMAR_IQT_REG write addr 0x%"PRIx64
1655 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1657 vtd_set_long(s
, addr
, val
);
1659 vtd_set_quad(s
, addr
, val
);
1661 vtd_handle_iqt_write(s
);
1664 case DMAR_IQT_REG_HI
:
1665 VTD_DPRINTF(INV
, "DMAR_IQT_REG_HI write addr 0x%"PRIx64
1666 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1668 vtd_set_long(s
, addr
, val
);
1669 /* 19:63 of IQT_REG is RsvdZ, do nothing here */
1672 /* Invalidation Queue Address Register, 64-bit */
1674 VTD_DPRINTF(INV
, "DMAR_IQA_REG write addr 0x%"PRIx64
1675 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1677 vtd_set_long(s
, addr
, val
);
1679 vtd_set_quad(s
, addr
, val
);
1683 case DMAR_IQA_REG_HI
:
1684 VTD_DPRINTF(INV
, "DMAR_IQA_REG_HI write addr 0x%"PRIx64
1685 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1687 vtd_set_long(s
, addr
, val
);
1690 /* Invalidation Completion Status Register, 32-bit */
1692 VTD_DPRINTF(INV
, "DMAR_ICS_REG write addr 0x%"PRIx64
1693 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1695 vtd_set_long(s
, addr
, val
);
1696 vtd_handle_ics_write(s
);
1699 /* Invalidation Event Control Register, 32-bit */
1700 case DMAR_IECTL_REG
:
1701 VTD_DPRINTF(INV
, "DMAR_IECTL_REG write addr 0x%"PRIx64
1702 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1704 vtd_set_long(s
, addr
, val
);
1705 vtd_handle_iectl_write(s
);
1708 /* Invalidation Event Data Register, 32-bit */
1709 case DMAR_IEDATA_REG
:
1710 VTD_DPRINTF(INV
, "DMAR_IEDATA_REG write addr 0x%"PRIx64
1711 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1713 vtd_set_long(s
, addr
, val
);
1716 /* Invalidation Event Address Register, 32-bit */
1717 case DMAR_IEADDR_REG
:
1718 VTD_DPRINTF(INV
, "DMAR_IEADDR_REG write addr 0x%"PRIx64
1719 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1721 vtd_set_long(s
, addr
, val
);
1724 /* Invalidation Event Upper Address Register, 32-bit */
1725 case DMAR_IEUADDR_REG
:
1726 VTD_DPRINTF(INV
, "DMAR_IEUADDR_REG write addr 0x%"PRIx64
1727 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1729 vtd_set_long(s
, addr
, val
);
1732 /* Fault Recording Registers, 128-bit */
1733 case DMAR_FRCD_REG_0_0
:
1734 VTD_DPRINTF(FLOG
, "DMAR_FRCD_REG_0_0 write addr 0x%"PRIx64
1735 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1737 vtd_set_long(s
, addr
, val
);
1739 vtd_set_quad(s
, addr
, val
);
1743 case DMAR_FRCD_REG_0_1
:
1744 VTD_DPRINTF(FLOG
, "DMAR_FRCD_REG_0_1 write addr 0x%"PRIx64
1745 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1747 vtd_set_long(s
, addr
, val
);
1750 case DMAR_FRCD_REG_0_2
:
1751 VTD_DPRINTF(FLOG
, "DMAR_FRCD_REG_0_2 write addr 0x%"PRIx64
1752 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1754 vtd_set_long(s
, addr
, val
);
1756 vtd_set_quad(s
, addr
, val
);
1757 /* May clear bit 127 (Fault), update PPF */
1758 vtd_update_fsts_ppf(s
);
1762 case DMAR_FRCD_REG_0_3
:
1763 VTD_DPRINTF(FLOG
, "DMAR_FRCD_REG_0_3 write addr 0x%"PRIx64
1764 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1766 vtd_set_long(s
, addr
, val
);
1767 /* May clear bit 127 (Fault), update PPF */
1768 vtd_update_fsts_ppf(s
);
1772 VTD_DPRINTF(GENERAL
, "error: unhandled reg write addr 0x%"PRIx64
1773 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1775 vtd_set_long(s
, addr
, val
);
1777 vtd_set_quad(s
, addr
, val
);
1782 static IOMMUTLBEntry
vtd_iommu_translate(MemoryRegion
*iommu
, hwaddr addr
,
1785 VTDAddressSpace
*vtd_as
= container_of(iommu
, VTDAddressSpace
, iommu
);
1786 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
1787 IOMMUTLBEntry ret
= {
1788 .target_as
= &address_space_memory
,
1790 .translated_addr
= 0,
1791 .addr_mask
= ~(hwaddr
)0,
1795 if (!s
->dmar_enabled
) {
1796 /* DMAR disabled, passthrough, use 4k-page*/
1797 ret
.iova
= addr
& VTD_PAGE_MASK_4K
;
1798 ret
.translated_addr
= addr
& VTD_PAGE_MASK_4K
;
1799 ret
.addr_mask
= ~VTD_PAGE_MASK_4K
;
1800 ret
.perm
= IOMMU_RW
;
1804 vtd_do_iommu_translate(vtd_as
, vtd_as
->bus_num
, vtd_as
->devfn
, addr
,
1807 "bus %"PRIu8
" slot %"PRIu8
" func %"PRIu8
" devfn %"PRIu8
1808 " gpa 0x%"PRIx64
" hpa 0x%"PRIx64
, vtd_as
->bus_num
,
1809 VTD_PCI_SLOT(vtd_as
->devfn
), VTD_PCI_FUNC(vtd_as
->devfn
),
1810 vtd_as
->devfn
, addr
, ret
.translated_addr
);
1814 static const VMStateDescription vtd_vmstate
= {
1815 .name
= "iommu-intel",
1819 static const MemoryRegionOps vtd_mem_ops
= {
1820 .read
= vtd_mem_read
,
1821 .write
= vtd_mem_write
,
1822 .endianness
= DEVICE_LITTLE_ENDIAN
,
1824 .min_access_size
= 4,
1825 .max_access_size
= 8,
1828 .min_access_size
= 4,
1829 .max_access_size
= 8,
1833 static Property vtd_properties
[] = {
1834 DEFINE_PROP_UINT32("version", IntelIOMMUState
, version
, 0),
1835 DEFINE_PROP_END_OF_LIST(),
1838 /* Do the initialization. It will also be called when reset, so pay
1839 * attention when adding new initialization stuff.
1841 static void vtd_init(IntelIOMMUState
*s
)
1843 memset(s
->csr
, 0, DMAR_REG_SIZE
);
1844 memset(s
->wmask
, 0, DMAR_REG_SIZE
);
1845 memset(s
->w1cmask
, 0, DMAR_REG_SIZE
);
1846 memset(s
->womask
, 0, DMAR_REG_SIZE
);
1848 s
->iommu_ops
.translate
= vtd_iommu_translate
;
1850 s
->root_extended
= false;
1851 s
->dmar_enabled
= false;
1856 s
->qi_enabled
= false;
1857 s
->iq_last_desc_type
= VTD_INV_DESC_NONE
;
1858 s
->next_frcd_reg
= 0;
1859 s
->cap
= VTD_CAP_FRO
| VTD_CAP_NFR
| VTD_CAP_ND
| VTD_CAP_MGAW
|
1860 VTD_CAP_SAGAW
| VTD_CAP_MAMV
| VTD_CAP_PSI
;
1861 s
->ecap
= VTD_ECAP_QI
| VTD_ECAP_IRO
;
1863 vtd_reset_context_cache(s
);
1866 /* Define registers with default values and bit semantics */
1867 vtd_define_long(s
, DMAR_VER_REG
, 0x10UL
, 0, 0);
1868 vtd_define_quad(s
, DMAR_CAP_REG
, s
->cap
, 0, 0);
1869 vtd_define_quad(s
, DMAR_ECAP_REG
, s
->ecap
, 0, 0);
1870 vtd_define_long(s
, DMAR_GCMD_REG
, 0, 0xff800000UL
, 0);
1871 vtd_define_long_wo(s
, DMAR_GCMD_REG
, 0xff800000UL
);
1872 vtd_define_long(s
, DMAR_GSTS_REG
, 0, 0, 0);
1873 vtd_define_quad(s
, DMAR_RTADDR_REG
, 0, 0xfffffffffffff000ULL
, 0);
1874 vtd_define_quad(s
, DMAR_CCMD_REG
, 0, 0xe0000003ffffffffULL
, 0);
1875 vtd_define_quad_wo(s
, DMAR_CCMD_REG
, 0x3ffff0000ULL
);
1877 /* Advanced Fault Logging not supported */
1878 vtd_define_long(s
, DMAR_FSTS_REG
, 0, 0, 0x11UL
);
1879 vtd_define_long(s
, DMAR_FECTL_REG
, 0x80000000UL
, 0x80000000UL
, 0);
1880 vtd_define_long(s
, DMAR_FEDATA_REG
, 0, 0x0000ffffUL
, 0);
1881 vtd_define_long(s
, DMAR_FEADDR_REG
, 0, 0xfffffffcUL
, 0);
1883 /* Treated as RsvdZ when EIM in ECAP_REG is not supported
1884 * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0);
1886 vtd_define_long(s
, DMAR_FEUADDR_REG
, 0, 0, 0);
1888 /* Treated as RO for implementations that PLMR and PHMR fields reported
1889 * as Clear in the CAP_REG.
1890 * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0);
1892 vtd_define_long(s
, DMAR_PMEN_REG
, 0, 0, 0);
1894 vtd_define_quad(s
, DMAR_IQH_REG
, 0, 0, 0);
1895 vtd_define_quad(s
, DMAR_IQT_REG
, 0, 0x7fff0ULL
, 0);
1896 vtd_define_quad(s
, DMAR_IQA_REG
, 0, 0xfffffffffffff007ULL
, 0);
1897 vtd_define_long(s
, DMAR_ICS_REG
, 0, 0, 0x1UL
);
1898 vtd_define_long(s
, DMAR_IECTL_REG
, 0x80000000UL
, 0x80000000UL
, 0);
1899 vtd_define_long(s
, DMAR_IEDATA_REG
, 0, 0xffffffffUL
, 0);
1900 vtd_define_long(s
, DMAR_IEADDR_REG
, 0, 0xfffffffcUL
, 0);
1901 /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */
1902 vtd_define_long(s
, DMAR_IEUADDR_REG
, 0, 0, 0);
1904 /* IOTLB registers */
1905 vtd_define_quad(s
, DMAR_IOTLB_REG
, 0, 0Xb003ffff00000000ULL
, 0);
1906 vtd_define_quad(s
, DMAR_IVA_REG
, 0, 0xfffffffffffff07fULL
, 0);
1907 vtd_define_quad_wo(s
, DMAR_IVA_REG
, 0xfffffffffffff07fULL
);
1909 /* Fault Recording Registers, 128-bit */
1910 vtd_define_quad(s
, DMAR_FRCD_REG_0_0
, 0, 0, 0);
1911 vtd_define_quad(s
, DMAR_FRCD_REG_0_2
, 0, 0, 0x8000000000000000ULL
);
1914 /* Should not reset address_spaces when reset because devices will still use
1915 * the address space they got at first (won't ask the bus again).
1917 static void vtd_reset(DeviceState
*dev
)
1919 IntelIOMMUState
*s
= INTEL_IOMMU_DEVICE(dev
);
1921 VTD_DPRINTF(GENERAL
, "");
1925 static void vtd_realize(DeviceState
*dev
, Error
**errp
)
1927 IntelIOMMUState
*s
= INTEL_IOMMU_DEVICE(dev
);
1929 VTD_DPRINTF(GENERAL
, "");
1930 memset(s
->address_spaces
, 0, sizeof(s
->address_spaces
));
1931 memory_region_init_io(&s
->csrmem
, OBJECT(s
), &vtd_mem_ops
, s
,
1932 "intel_iommu", DMAR_REG_SIZE
);
1933 sysbus_init_mmio(SYS_BUS_DEVICE(s
), &s
->csrmem
);
1934 /* No corresponding destroy */
1935 s
->iotlb
= g_hash_table_new_full(vtd_uint64_hash
, vtd_uint64_equal
,
1940 static void vtd_class_init(ObjectClass
*klass
, void *data
)
1942 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1944 dc
->reset
= vtd_reset
;
1945 dc
->realize
= vtd_realize
;
1946 dc
->vmsd
= &vtd_vmstate
;
1947 dc
->props
= vtd_properties
;
1950 static const TypeInfo vtd_info
= {
1951 .name
= TYPE_INTEL_IOMMU_DEVICE
,
1952 .parent
= TYPE_SYS_BUS_DEVICE
,
1953 .instance_size
= sizeof(IntelIOMMUState
),
1954 .class_init
= vtd_class_init
,
1957 static void vtd_register_types(void)
1959 VTD_DPRINTF(GENERAL
, "");
1960 type_register_static(&vtd_info
);
1963 type_init(vtd_register_types
)