2 * QEMU emulation of an Intel IOMMU (VT-d)
3 * (DMA Remapping device)
5 * Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com>
6 * Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "qemu/error-report.h"
24 #include "qapi/error.h"
25 #include "hw/sysbus.h"
26 #include "exec/address-spaces.h"
27 #include "intel_iommu_internal.h"
28 #include "hw/pci/pci.h"
29 #include "hw/pci/pci_bus.h"
30 #include "hw/i386/pc.h"
31 #include "hw/i386/apic-msidef.h"
32 #include "hw/boards.h"
33 #include "hw/i386/x86-iommu.h"
34 #include "hw/pci-host/q35.h"
35 #include "sysemu/kvm.h"
36 #include "hw/i386/apic_internal.h"
40 static void vtd_define_quad(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
,
41 uint64_t wmask
, uint64_t w1cmask
)
43 stq_le_p(&s
->csr
[addr
], val
);
44 stq_le_p(&s
->wmask
[addr
], wmask
);
45 stq_le_p(&s
->w1cmask
[addr
], w1cmask
);
48 static void vtd_define_quad_wo(IntelIOMMUState
*s
, hwaddr addr
, uint64_t mask
)
50 stq_le_p(&s
->womask
[addr
], mask
);
53 static void vtd_define_long(IntelIOMMUState
*s
, hwaddr addr
, uint32_t val
,
54 uint32_t wmask
, uint32_t w1cmask
)
56 stl_le_p(&s
->csr
[addr
], val
);
57 stl_le_p(&s
->wmask
[addr
], wmask
);
58 stl_le_p(&s
->w1cmask
[addr
], w1cmask
);
61 static void vtd_define_long_wo(IntelIOMMUState
*s
, hwaddr addr
, uint32_t mask
)
63 stl_le_p(&s
->womask
[addr
], mask
);
66 /* "External" get/set operations */
67 static void vtd_set_quad(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
)
69 uint64_t oldval
= ldq_le_p(&s
->csr
[addr
]);
70 uint64_t wmask
= ldq_le_p(&s
->wmask
[addr
]);
71 uint64_t w1cmask
= ldq_le_p(&s
->w1cmask
[addr
]);
72 stq_le_p(&s
->csr
[addr
],
73 ((oldval
& ~wmask
) | (val
& wmask
)) & ~(w1cmask
& val
));
76 static void vtd_set_long(IntelIOMMUState
*s
, hwaddr addr
, uint32_t val
)
78 uint32_t oldval
= ldl_le_p(&s
->csr
[addr
]);
79 uint32_t wmask
= ldl_le_p(&s
->wmask
[addr
]);
80 uint32_t w1cmask
= ldl_le_p(&s
->w1cmask
[addr
]);
81 stl_le_p(&s
->csr
[addr
],
82 ((oldval
& ~wmask
) | (val
& wmask
)) & ~(w1cmask
& val
));
85 static uint64_t vtd_get_quad(IntelIOMMUState
*s
, hwaddr addr
)
87 uint64_t val
= ldq_le_p(&s
->csr
[addr
]);
88 uint64_t womask
= ldq_le_p(&s
->womask
[addr
]);
92 static uint32_t vtd_get_long(IntelIOMMUState
*s
, hwaddr addr
)
94 uint32_t val
= ldl_le_p(&s
->csr
[addr
]);
95 uint32_t womask
= ldl_le_p(&s
->womask
[addr
]);
99 /* "Internal" get/set operations */
100 static uint64_t vtd_get_quad_raw(IntelIOMMUState
*s
, hwaddr addr
)
102 return ldq_le_p(&s
->csr
[addr
]);
105 static uint32_t vtd_get_long_raw(IntelIOMMUState
*s
, hwaddr addr
)
107 return ldl_le_p(&s
->csr
[addr
]);
110 static void vtd_set_quad_raw(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
)
112 stq_le_p(&s
->csr
[addr
], val
);
115 static uint32_t vtd_set_clear_mask_long(IntelIOMMUState
*s
, hwaddr addr
,
116 uint32_t clear
, uint32_t mask
)
118 uint32_t new_val
= (ldl_le_p(&s
->csr
[addr
]) & ~clear
) | mask
;
119 stl_le_p(&s
->csr
[addr
], new_val
);
123 static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState
*s
, hwaddr addr
,
124 uint64_t clear
, uint64_t mask
)
126 uint64_t new_val
= (ldq_le_p(&s
->csr
[addr
]) & ~clear
) | mask
;
127 stq_le_p(&s
->csr
[addr
], new_val
);
131 /* GHashTable functions */
132 static gboolean
vtd_uint64_equal(gconstpointer v1
, gconstpointer v2
)
134 return *((const uint64_t *)v1
) == *((const uint64_t *)v2
);
137 static guint
vtd_uint64_hash(gconstpointer v
)
139 return (guint
)*(const uint64_t *)v
;
142 static gboolean
vtd_hash_remove_by_domain(gpointer key
, gpointer value
,
145 VTDIOTLBEntry
*entry
= (VTDIOTLBEntry
*)value
;
146 uint16_t domain_id
= *(uint16_t *)user_data
;
147 return entry
->domain_id
== domain_id
;
150 /* The shift of an addr for a certain level of paging structure */
151 static inline uint32_t vtd_slpt_level_shift(uint32_t level
)
154 return VTD_PAGE_SHIFT_4K
+ (level
- 1) * VTD_SL_LEVEL_BITS
;
157 static inline uint64_t vtd_slpt_level_page_mask(uint32_t level
)
159 return ~((1ULL << vtd_slpt_level_shift(level
)) - 1);
162 static gboolean
vtd_hash_remove_by_page(gpointer key
, gpointer value
,
165 VTDIOTLBEntry
*entry
= (VTDIOTLBEntry
*)value
;
166 VTDIOTLBPageInvInfo
*info
= (VTDIOTLBPageInvInfo
*)user_data
;
167 uint64_t gfn
= (info
->addr
>> VTD_PAGE_SHIFT_4K
) & info
->mask
;
168 uint64_t gfn_tlb
= (info
->addr
& entry
->mask
) >> VTD_PAGE_SHIFT_4K
;
169 return (entry
->domain_id
== info
->domain_id
) &&
170 (((entry
->gfn
& info
->mask
) == gfn
) ||
171 (entry
->gfn
== gfn_tlb
));
174 /* Reset all the gen of VTDAddressSpace to zero and set the gen of
175 * IntelIOMMUState to 1.
177 static void vtd_reset_context_cache(IntelIOMMUState
*s
)
179 VTDAddressSpace
*vtd_as
;
181 GHashTableIter bus_it
;
184 trace_vtd_context_cache_reset();
186 g_hash_table_iter_init(&bus_it
, s
->vtd_as_by_busptr
);
188 while (g_hash_table_iter_next (&bus_it
, NULL
, (void**)&vtd_bus
)) {
189 for (devfn_it
= 0; devfn_it
< X86_IOMMU_PCI_DEVFN_MAX
; ++devfn_it
) {
190 vtd_as
= vtd_bus
->dev_as
[devfn_it
];
194 vtd_as
->context_cache_entry
.context_cache_gen
= 0;
197 s
->context_cache_gen
= 1;
200 static void vtd_reset_iotlb(IntelIOMMUState
*s
)
203 g_hash_table_remove_all(s
->iotlb
);
206 static uint64_t vtd_get_iotlb_key(uint64_t gfn
, uint16_t source_id
,
209 return gfn
| ((uint64_t)(source_id
) << VTD_IOTLB_SID_SHIFT
) |
210 ((uint64_t)(level
) << VTD_IOTLB_LVL_SHIFT
);
213 static uint64_t vtd_get_iotlb_gfn(hwaddr addr
, uint32_t level
)
215 return (addr
& vtd_slpt_level_page_mask(level
)) >> VTD_PAGE_SHIFT_4K
;
218 static VTDIOTLBEntry
*vtd_lookup_iotlb(IntelIOMMUState
*s
, uint16_t source_id
,
221 VTDIOTLBEntry
*entry
;
225 for (level
= VTD_SL_PT_LEVEL
; level
< VTD_SL_PML4_LEVEL
; level
++) {
226 key
= vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr
, level
),
228 entry
= g_hash_table_lookup(s
->iotlb
, &key
);
238 static void vtd_update_iotlb(IntelIOMMUState
*s
, uint16_t source_id
,
239 uint16_t domain_id
, hwaddr addr
, uint64_t slpte
,
240 bool read_flags
, bool write_flags
,
243 VTDIOTLBEntry
*entry
= g_malloc(sizeof(*entry
));
244 uint64_t *key
= g_malloc(sizeof(*key
));
245 uint64_t gfn
= vtd_get_iotlb_gfn(addr
, level
);
247 trace_vtd_iotlb_page_update(source_id
, addr
, slpte
, domain_id
);
248 if (g_hash_table_size(s
->iotlb
) >= VTD_IOTLB_MAX_SIZE
) {
249 trace_vtd_iotlb_reset("iotlb exceeds size limit");
254 entry
->domain_id
= domain_id
;
255 entry
->slpte
= slpte
;
256 entry
->read_flags
= read_flags
;
257 entry
->write_flags
= write_flags
;
258 entry
->mask
= vtd_slpt_level_page_mask(level
);
259 *key
= vtd_get_iotlb_key(gfn
, source_id
, level
);
260 g_hash_table_replace(s
->iotlb
, key
, entry
);
263 /* Given the reg addr of both the message data and address, generate an
266 static void vtd_generate_interrupt(IntelIOMMUState
*s
, hwaddr mesg_addr_reg
,
267 hwaddr mesg_data_reg
)
271 assert(mesg_data_reg
< DMAR_REG_SIZE
);
272 assert(mesg_addr_reg
< DMAR_REG_SIZE
);
274 msi
.address
= vtd_get_long_raw(s
, mesg_addr_reg
);
275 msi
.data
= vtd_get_long_raw(s
, mesg_data_reg
);
277 trace_vtd_irq_generate(msi
.address
, msi
.data
);
279 apic_get_class()->send_msi(&msi
);
282 /* Generate a fault event to software via MSI if conditions are met.
283 * Notice that the value of FSTS_REG being passed to it should be the one
286 static void vtd_generate_fault_event(IntelIOMMUState
*s
, uint32_t pre_fsts
)
288 if (pre_fsts
& VTD_FSTS_PPF
|| pre_fsts
& VTD_FSTS_PFO
||
289 pre_fsts
& VTD_FSTS_IQE
) {
290 trace_vtd_err("There are previous interrupt conditions "
291 "to be serviced by software, fault event "
292 "is not generated.");
295 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, 0, VTD_FECTL_IP
);
296 if (vtd_get_long_raw(s
, DMAR_FECTL_REG
) & VTD_FECTL_IM
) {
297 trace_vtd_err("Interrupt Mask set, irq is not generated.");
299 vtd_generate_interrupt(s
, DMAR_FEADDR_REG
, DMAR_FEDATA_REG
);
300 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
304 /* Check if the Fault (F) field of the Fault Recording Register referenced by
307 static bool vtd_is_frcd_set(IntelIOMMUState
*s
, uint16_t index
)
309 /* Each reg is 128-bit */
310 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
311 addr
+= 8; /* Access the high 64-bit half */
313 assert(index
< DMAR_FRCD_REG_NR
);
315 return vtd_get_quad_raw(s
, addr
) & VTD_FRCD_F
;
318 /* Update the PPF field of Fault Status Register.
319 * Should be called whenever change the F field of any fault recording
322 static void vtd_update_fsts_ppf(IntelIOMMUState
*s
)
325 uint32_t ppf_mask
= 0;
327 for (i
= 0; i
< DMAR_FRCD_REG_NR
; i
++) {
328 if (vtd_is_frcd_set(s
, i
)) {
329 ppf_mask
= VTD_FSTS_PPF
;
333 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, VTD_FSTS_PPF
, ppf_mask
);
334 trace_vtd_fsts_ppf(!!ppf_mask
);
337 static void vtd_set_frcd_and_update_ppf(IntelIOMMUState
*s
, uint16_t index
)
339 /* Each reg is 128-bit */
340 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
341 addr
+= 8; /* Access the high 64-bit half */
343 assert(index
< DMAR_FRCD_REG_NR
);
345 vtd_set_clear_mask_quad(s
, addr
, 0, VTD_FRCD_F
);
346 vtd_update_fsts_ppf(s
);
349 /* Must not update F field now, should be done later */
350 static void vtd_record_frcd(IntelIOMMUState
*s
, uint16_t index
,
351 uint16_t source_id
, hwaddr addr
,
352 VTDFaultReason fault
, bool is_write
)
355 hwaddr frcd_reg_addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
357 assert(index
< DMAR_FRCD_REG_NR
);
359 lo
= VTD_FRCD_FI(addr
);
360 hi
= VTD_FRCD_SID(source_id
) | VTD_FRCD_FR(fault
);
364 vtd_set_quad_raw(s
, frcd_reg_addr
, lo
);
365 vtd_set_quad_raw(s
, frcd_reg_addr
+ 8, hi
);
367 trace_vtd_frr_new(index
, hi
, lo
);
370 /* Try to collapse multiple pending faults from the same requester */
371 static bool vtd_try_collapse_fault(IntelIOMMUState
*s
, uint16_t source_id
)
375 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ 8; /* The high 64-bit half */
377 for (i
= 0; i
< DMAR_FRCD_REG_NR
; i
++) {
378 frcd_reg
= vtd_get_quad_raw(s
, addr
);
379 if ((frcd_reg
& VTD_FRCD_F
) &&
380 ((frcd_reg
& VTD_FRCD_SID_MASK
) == source_id
)) {
383 addr
+= 16; /* 128-bit for each */
388 /* Log and report an DMAR (address translation) fault to software */
389 static void vtd_report_dmar_fault(IntelIOMMUState
*s
, uint16_t source_id
,
390 hwaddr addr
, VTDFaultReason fault
,
393 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
395 assert(fault
< VTD_FR_MAX
);
397 if (fault
== VTD_FR_RESERVED_ERR
) {
398 /* This is not a normal fault reason case. Drop it. */
402 trace_vtd_dmar_fault(source_id
, fault
, addr
, is_write
);
404 if (fsts_reg
& VTD_FSTS_PFO
) {
405 trace_vtd_err("New fault is not recorded due to "
406 "Primary Fault Overflow.");
410 if (vtd_try_collapse_fault(s
, source_id
)) {
411 trace_vtd_err("New fault is not recorded due to "
412 "compression of faults.");
416 if (vtd_is_frcd_set(s
, s
->next_frcd_reg
)) {
417 trace_vtd_err("Next Fault Recording Reg is used, "
418 "new fault is not recorded, set PFO field.");
419 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, 0, VTD_FSTS_PFO
);
423 vtd_record_frcd(s
, s
->next_frcd_reg
, source_id
, addr
, fault
, is_write
);
425 if (fsts_reg
& VTD_FSTS_PPF
) {
426 trace_vtd_err("There are pending faults already, "
427 "fault event is not generated.");
428 vtd_set_frcd_and_update_ppf(s
, s
->next_frcd_reg
);
430 if (s
->next_frcd_reg
== DMAR_FRCD_REG_NR
) {
431 s
->next_frcd_reg
= 0;
434 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, VTD_FSTS_FRI_MASK
,
435 VTD_FSTS_FRI(s
->next_frcd_reg
));
436 vtd_set_frcd_and_update_ppf(s
, s
->next_frcd_reg
); /* Will set PPF */
438 if (s
->next_frcd_reg
== DMAR_FRCD_REG_NR
) {
439 s
->next_frcd_reg
= 0;
441 /* This case actually cause the PPF to be Set.
442 * So generate fault event (interrupt).
444 vtd_generate_fault_event(s
, fsts_reg
);
448 /* Handle Invalidation Queue Errors of queued invalidation interface error
451 static void vtd_handle_inv_queue_error(IntelIOMMUState
*s
)
453 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
455 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, 0, VTD_FSTS_IQE
);
456 vtd_generate_fault_event(s
, fsts_reg
);
459 /* Set the IWC field and try to generate an invalidation completion interrupt */
460 static void vtd_generate_completion_event(IntelIOMMUState
*s
)
462 if (vtd_get_long_raw(s
, DMAR_ICS_REG
) & VTD_ICS_IWC
) {
463 trace_vtd_inv_desc_wait_irq("One pending, skip current");
466 vtd_set_clear_mask_long(s
, DMAR_ICS_REG
, 0, VTD_ICS_IWC
);
467 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, 0, VTD_IECTL_IP
);
468 if (vtd_get_long_raw(s
, DMAR_IECTL_REG
) & VTD_IECTL_IM
) {
469 trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, "
470 "new event not generated");
473 /* Generate the interrupt event */
474 trace_vtd_inv_desc_wait_irq("Generating complete event");
475 vtd_generate_interrupt(s
, DMAR_IEADDR_REG
, DMAR_IEDATA_REG
);
476 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
480 static inline bool vtd_root_entry_present(VTDRootEntry
*root
)
482 return root
->val
& VTD_ROOT_ENTRY_P
;
485 static int vtd_get_root_entry(IntelIOMMUState
*s
, uint8_t index
,
490 addr
= s
->root
+ index
* sizeof(*re
);
491 if (dma_memory_read(&address_space_memory
, addr
, re
, sizeof(*re
))) {
492 trace_vtd_re_invalid(re
->rsvd
, re
->val
);
494 return -VTD_FR_ROOT_TABLE_INV
;
496 re
->val
= le64_to_cpu(re
->val
);
500 static inline bool vtd_ce_present(VTDContextEntry
*context
)
502 return context
->lo
& VTD_CONTEXT_ENTRY_P
;
505 static int vtd_get_context_entry_from_root(VTDRootEntry
*root
, uint8_t index
,
510 /* we have checked that root entry is present */
511 addr
= (root
->val
& VTD_ROOT_ENTRY_CTP
) + index
* sizeof(*ce
);
512 if (dma_memory_read(&address_space_memory
, addr
, ce
, sizeof(*ce
))) {
513 trace_vtd_re_invalid(root
->rsvd
, root
->val
);
514 return -VTD_FR_CONTEXT_TABLE_INV
;
516 ce
->lo
= le64_to_cpu(ce
->lo
);
517 ce
->hi
= le64_to_cpu(ce
->hi
);
521 static inline dma_addr_t
vtd_ce_get_slpt_base(VTDContextEntry
*ce
)
523 return ce
->lo
& VTD_CONTEXT_ENTRY_SLPTPTR
;
526 static inline uint64_t vtd_get_slpte_addr(uint64_t slpte
)
528 return slpte
& VTD_SL_PT_BASE_ADDR_MASK
;
531 /* Whether the pte indicates the address of the page frame */
532 static inline bool vtd_is_last_slpte(uint64_t slpte
, uint32_t level
)
534 return level
== VTD_SL_PT_LEVEL
|| (slpte
& VTD_SL_PT_PAGE_SIZE_MASK
);
537 /* Get the content of a spte located in @base_addr[@index] */
538 static uint64_t vtd_get_slpte(dma_addr_t base_addr
, uint32_t index
)
542 assert(index
< VTD_SL_PT_ENTRY_NR
);
544 if (dma_memory_read(&address_space_memory
,
545 base_addr
+ index
* sizeof(slpte
), &slpte
,
547 slpte
= (uint64_t)-1;
550 slpte
= le64_to_cpu(slpte
);
554 /* Given an iova and the level of paging structure, return the offset
557 static inline uint32_t vtd_iova_level_offset(uint64_t iova
, uint32_t level
)
559 return (iova
>> vtd_slpt_level_shift(level
)) &
560 ((1ULL << VTD_SL_LEVEL_BITS
) - 1);
563 /* Check Capability Register to see if the @level of page-table is supported */
564 static inline bool vtd_is_level_supported(IntelIOMMUState
*s
, uint32_t level
)
566 return VTD_CAP_SAGAW_MASK
& s
->cap
&
567 (1ULL << (level
- 2 + VTD_CAP_SAGAW_SHIFT
));
570 /* Get the page-table level that hardware should use for the second-level
571 * page-table walk from the Address Width field of context-entry.
573 static inline uint32_t vtd_ce_get_level(VTDContextEntry
*ce
)
575 return 2 + (ce
->hi
& VTD_CONTEXT_ENTRY_AW
);
578 static inline uint32_t vtd_ce_get_agaw(VTDContextEntry
*ce
)
580 return 30 + (ce
->hi
& VTD_CONTEXT_ENTRY_AW
) * 9;
583 static inline uint32_t vtd_ce_get_type(VTDContextEntry
*ce
)
585 return ce
->lo
& VTD_CONTEXT_ENTRY_TT
;
588 /* Return true if check passed, otherwise false */
589 static inline bool vtd_ce_type_check(X86IOMMUState
*x86_iommu
,
592 switch (vtd_ce_get_type(ce
)) {
593 case VTD_CONTEXT_TT_MULTI_LEVEL
:
594 /* Always supported */
596 case VTD_CONTEXT_TT_DEV_IOTLB
:
597 if (!x86_iommu
->dt_supported
) {
601 case VTD_CONTEXT_TT_PASS_THROUGH
:
602 if (!x86_iommu
->pt_supported
) {
613 static inline uint64_t vtd_iova_limit(VTDContextEntry
*ce
)
615 uint32_t ce_agaw
= vtd_ce_get_agaw(ce
);
616 return 1ULL << MIN(ce_agaw
, VTD_MGAW
);
619 /* Return true if IOVA passes range check, otherwise false. */
620 static inline bool vtd_iova_range_check(uint64_t iova
, VTDContextEntry
*ce
)
623 * Check if @iova is above 2^X-1, where X is the minimum of MGAW
624 * in CAP_REG and AW in context-entry.
626 return !(iova
& ~(vtd_iova_limit(ce
) - 1));
629 static const uint64_t vtd_paging_entry_rsvd_field
[] = {
631 /* For not large page */
632 [1] = 0x800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
633 [2] = 0x800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
634 [3] = 0x800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
635 [4] = 0x880ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
637 [5] = 0x800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
638 [6] = 0x1ff800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
639 [7] = 0x3ffff800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
640 [8] = 0x880ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
643 static bool vtd_slpte_nonzero_rsvd(uint64_t slpte
, uint32_t level
)
645 if (slpte
& VTD_SL_PT_PAGE_SIZE_MASK
) {
646 /* Maybe large page */
647 return slpte
& vtd_paging_entry_rsvd_field
[level
+ 4];
649 return slpte
& vtd_paging_entry_rsvd_field
[level
];
653 /* Find the VTD address space associated with a given bus number */
654 static VTDBus
*vtd_find_as_from_bus_num(IntelIOMMUState
*s
, uint8_t bus_num
)
656 VTDBus
*vtd_bus
= s
->vtd_as_by_bus_num
[bus_num
];
659 * Iterate over the registered buses to find the one which
660 * currently hold this bus number, and update the bus_num
665 g_hash_table_iter_init(&iter
, s
->vtd_as_by_busptr
);
666 while (g_hash_table_iter_next(&iter
, NULL
, (void **)&vtd_bus
)) {
667 if (pci_bus_num(vtd_bus
->bus
) == bus_num
) {
668 s
->vtd_as_by_bus_num
[bus_num
] = vtd_bus
;
676 /* Given the @iova, get relevant @slptep. @slpte_level will be the last level
677 * of the translation, can be used for deciding the size of large page.
679 static int vtd_iova_to_slpte(VTDContextEntry
*ce
, uint64_t iova
, bool is_write
,
680 uint64_t *slptep
, uint32_t *slpte_level
,
681 bool *reads
, bool *writes
)
683 dma_addr_t addr
= vtd_ce_get_slpt_base(ce
);
684 uint32_t level
= vtd_ce_get_level(ce
);
687 uint64_t access_right_check
;
689 if (!vtd_iova_range_check(iova
, ce
)) {
690 trace_vtd_err_dmar_iova_overflow(iova
);
691 return -VTD_FR_ADDR_BEYOND_MGAW
;
694 /* FIXME: what is the Atomics request here? */
695 access_right_check
= is_write
? VTD_SL_W
: VTD_SL_R
;
698 offset
= vtd_iova_level_offset(iova
, level
);
699 slpte
= vtd_get_slpte(addr
, offset
);
701 if (slpte
== (uint64_t)-1) {
702 trace_vtd_err_dmar_slpte_read_error(iova
, level
);
703 if (level
== vtd_ce_get_level(ce
)) {
704 /* Invalid programming of context-entry */
705 return -VTD_FR_CONTEXT_ENTRY_INV
;
707 return -VTD_FR_PAGING_ENTRY_INV
;
710 *reads
= (*reads
) && (slpte
& VTD_SL_R
);
711 *writes
= (*writes
) && (slpte
& VTD_SL_W
);
712 if (!(slpte
& access_right_check
)) {
713 trace_vtd_err_dmar_slpte_perm_error(iova
, level
, slpte
, is_write
);
714 return is_write
? -VTD_FR_WRITE
: -VTD_FR_READ
;
716 if (vtd_slpte_nonzero_rsvd(slpte
, level
)) {
717 trace_vtd_err_dmar_slpte_resv_error(iova
, level
, slpte
);
718 return -VTD_FR_PAGING_ENTRY_RSVD
;
721 if (vtd_is_last_slpte(slpte
, level
)) {
723 *slpte_level
= level
;
726 addr
= vtd_get_slpte_addr(slpte
);
731 typedef int (*vtd_page_walk_hook
)(IOMMUTLBEntry
*entry
, void *private);
734 * vtd_page_walk_level - walk over specific level for IOVA range
736 * @addr: base GPA addr to start the walk
737 * @start: IOVA range start address
738 * @end: IOVA range end address (start <= addr < end)
739 * @hook_fn: hook func to be called when detected page
740 * @private: private data to be passed into hook func
741 * @read: whether parent level has read permission
742 * @write: whether parent level has write permission
743 * @notify_unmap: whether we should notify invalid entries
745 static int vtd_page_walk_level(dma_addr_t addr
, uint64_t start
,
746 uint64_t end
, vtd_page_walk_hook hook_fn
,
747 void *private, uint32_t level
,
748 bool read
, bool write
, bool notify_unmap
)
750 bool read_cur
, write_cur
, entry_valid
;
753 uint64_t subpage_size
, subpage_mask
;
755 uint64_t iova
= start
;
759 trace_vtd_page_walk_level(addr
, level
, start
, end
);
761 subpage_size
= 1ULL << vtd_slpt_level_shift(level
);
762 subpage_mask
= vtd_slpt_level_page_mask(level
);
765 iova_next
= (iova
& subpage_mask
) + subpage_size
;
767 offset
= vtd_iova_level_offset(iova
, level
);
768 slpte
= vtd_get_slpte(addr
, offset
);
770 if (slpte
== (uint64_t)-1) {
771 trace_vtd_page_walk_skip_read(iova
, iova_next
);
775 if (vtd_slpte_nonzero_rsvd(slpte
, level
)) {
776 trace_vtd_page_walk_skip_reserve(iova
, iova_next
);
780 /* Permissions are stacked with parents' */
781 read_cur
= read
&& (slpte
& VTD_SL_R
);
782 write_cur
= write
&& (slpte
& VTD_SL_W
);
785 * As long as we have either read/write permission, this is a
786 * valid entry. The rule works for both page entries and page
789 entry_valid
= read_cur
| write_cur
;
791 if (vtd_is_last_slpte(slpte
, level
)) {
792 entry
.target_as
= &address_space_memory
;
793 entry
.iova
= iova
& subpage_mask
;
794 /* NOTE: this is only meaningful if entry_valid == true */
795 entry
.translated_addr
= vtd_get_slpte_addr(slpte
);
796 entry
.addr_mask
= ~subpage_mask
;
797 entry
.perm
= IOMMU_ACCESS_FLAG(read_cur
, write_cur
);
798 if (!entry_valid
&& !notify_unmap
) {
799 trace_vtd_page_walk_skip_perm(iova
, iova_next
);
802 trace_vtd_page_walk_one(level
, entry
.iova
, entry
.translated_addr
,
803 entry
.addr_mask
, entry
.perm
);
805 ret
= hook_fn(&entry
, private);
812 trace_vtd_page_walk_skip_perm(iova
, iova_next
);
815 ret
= vtd_page_walk_level(vtd_get_slpte_addr(slpte
), iova
,
816 MIN(iova_next
, end
), hook_fn
, private,
817 level
- 1, read_cur
, write_cur
,
832 * vtd_page_walk - walk specific IOVA range, and call the hook
834 * @ce: context entry to walk upon
835 * @start: IOVA address to start the walk
836 * @end: IOVA range end address (start <= addr < end)
837 * @hook_fn: the hook that to be called for each detected area
838 * @private: private data for the hook function
840 static int vtd_page_walk(VTDContextEntry
*ce
, uint64_t start
, uint64_t end
,
841 vtd_page_walk_hook hook_fn
, void *private,
844 dma_addr_t addr
= vtd_ce_get_slpt_base(ce
);
845 uint32_t level
= vtd_ce_get_level(ce
);
847 if (!vtd_iova_range_check(start
, ce
)) {
848 return -VTD_FR_ADDR_BEYOND_MGAW
;
851 if (!vtd_iova_range_check(end
, ce
)) {
852 /* Fix end so that it reaches the maximum */
853 end
= vtd_iova_limit(ce
);
856 return vtd_page_walk_level(addr
, start
, end
, hook_fn
, private,
857 level
, true, true, notify_unmap
);
860 /* Map a device to its corresponding domain (context-entry) */
861 static int vtd_dev_to_context_entry(IntelIOMMUState
*s
, uint8_t bus_num
,
862 uint8_t devfn
, VTDContextEntry
*ce
)
866 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
868 ret_fr
= vtd_get_root_entry(s
, bus_num
, &re
);
873 if (!vtd_root_entry_present(&re
)) {
874 /* Not error - it's okay we don't have root entry. */
875 trace_vtd_re_not_present(bus_num
);
876 return -VTD_FR_ROOT_ENTRY_P
;
879 if (re
.rsvd
|| (re
.val
& VTD_ROOT_ENTRY_RSVD
)) {
880 trace_vtd_re_invalid(re
.rsvd
, re
.val
);
881 return -VTD_FR_ROOT_ENTRY_RSVD
;
884 ret_fr
= vtd_get_context_entry_from_root(&re
, devfn
, ce
);
889 if (!vtd_ce_present(ce
)) {
890 /* Not error - it's okay we don't have context entry. */
891 trace_vtd_ce_not_present(bus_num
, devfn
);
892 return -VTD_FR_CONTEXT_ENTRY_P
;
895 if ((ce
->hi
& VTD_CONTEXT_ENTRY_RSVD_HI
) ||
896 (ce
->lo
& VTD_CONTEXT_ENTRY_RSVD_LO
)) {
897 trace_vtd_ce_invalid(ce
->hi
, ce
->lo
);
898 return -VTD_FR_CONTEXT_ENTRY_RSVD
;
901 /* Check if the programming of context-entry is valid */
902 if (!vtd_is_level_supported(s
, vtd_ce_get_level(ce
))) {
903 trace_vtd_ce_invalid(ce
->hi
, ce
->lo
);
904 return -VTD_FR_CONTEXT_ENTRY_INV
;
907 /* Do translation type check */
908 if (!vtd_ce_type_check(x86_iommu
, ce
)) {
909 trace_vtd_ce_invalid(ce
->hi
, ce
->lo
);
910 return -VTD_FR_CONTEXT_ENTRY_INV
;
917 * Fetch translation type for specific device. Returns <0 if error
918 * happens, otherwise return the shifted type to check against
921 static int vtd_dev_get_trans_type(VTDAddressSpace
*as
)
929 ret
= vtd_dev_to_context_entry(s
, pci_bus_num(as
->bus
),
935 return vtd_ce_get_type(&ce
);
938 static bool vtd_dev_pt_enabled(VTDAddressSpace
*as
)
944 ret
= vtd_dev_get_trans_type(as
);
947 * Possibly failed to parse the context entry for some reason
948 * (e.g., during init, or any guest configuration errors on
949 * context entries). We should assume PT not enabled for
955 return ret
== VTD_CONTEXT_TT_PASS_THROUGH
;
958 /* Return whether the device is using IOMMU translation. */
959 static bool vtd_switch_address_space(VTDAddressSpace
*as
)
965 use_iommu
= as
->iommu_state
->dmar_enabled
& !vtd_dev_pt_enabled(as
);
967 trace_vtd_switch_address_space(pci_bus_num(as
->bus
),
968 VTD_PCI_SLOT(as
->devfn
),
969 VTD_PCI_FUNC(as
->devfn
),
972 /* Turn off first then on the other */
974 memory_region_set_enabled(&as
->sys_alias
, false);
975 memory_region_set_enabled(&as
->iommu
, true);
977 memory_region_set_enabled(&as
->iommu
, false);
978 memory_region_set_enabled(&as
->sys_alias
, true);
984 static void vtd_switch_address_space_all(IntelIOMMUState
*s
)
990 g_hash_table_iter_init(&iter
, s
->vtd_as_by_busptr
);
991 while (g_hash_table_iter_next(&iter
, NULL
, (void **)&vtd_bus
)) {
992 for (i
= 0; i
< X86_IOMMU_PCI_DEVFN_MAX
; i
++) {
993 if (!vtd_bus
->dev_as
[i
]) {
996 vtd_switch_address_space(vtd_bus
->dev_as
[i
]);
1001 static inline uint16_t vtd_make_source_id(uint8_t bus_num
, uint8_t devfn
)
1003 return ((bus_num
& 0xffUL
) << 8) | (devfn
& 0xffUL
);
1006 static const bool vtd_qualified_faults
[] = {
1007 [VTD_FR_RESERVED
] = false,
1008 [VTD_FR_ROOT_ENTRY_P
] = false,
1009 [VTD_FR_CONTEXT_ENTRY_P
] = true,
1010 [VTD_FR_CONTEXT_ENTRY_INV
] = true,
1011 [VTD_FR_ADDR_BEYOND_MGAW
] = true,
1012 [VTD_FR_WRITE
] = true,
1013 [VTD_FR_READ
] = true,
1014 [VTD_FR_PAGING_ENTRY_INV
] = true,
1015 [VTD_FR_ROOT_TABLE_INV
] = false,
1016 [VTD_FR_CONTEXT_TABLE_INV
] = false,
1017 [VTD_FR_ROOT_ENTRY_RSVD
] = false,
1018 [VTD_FR_PAGING_ENTRY_RSVD
] = true,
1019 [VTD_FR_CONTEXT_ENTRY_TT
] = true,
1020 [VTD_FR_RESERVED_ERR
] = false,
1021 [VTD_FR_MAX
] = false,
1024 /* To see if a fault condition is "qualified", which is reported to software
1025 * only if the FPD field in the context-entry used to process the faulting
1028 static inline bool vtd_is_qualified_fault(VTDFaultReason fault
)
1030 return vtd_qualified_faults
[fault
];
1033 static inline bool vtd_is_interrupt_addr(hwaddr addr
)
1035 return VTD_INTERRUPT_ADDR_FIRST
<= addr
&& addr
<= VTD_INTERRUPT_ADDR_LAST
;
1038 static void vtd_pt_enable_fast_path(IntelIOMMUState
*s
, uint16_t source_id
)
1041 VTDAddressSpace
*vtd_as
;
1042 bool success
= false;
1044 vtd_bus
= vtd_find_as_from_bus_num(s
, VTD_SID_TO_BUS(source_id
));
1049 vtd_as
= vtd_bus
->dev_as
[VTD_SID_TO_DEVFN(source_id
)];
1054 if (vtd_switch_address_space(vtd_as
) == false) {
1055 /* We switched off IOMMU region successfully. */
1060 trace_vtd_pt_enable_fast_path(source_id
, success
);
1063 /* Map dev to context-entry then do a paging-structures walk to do a iommu
1066 * Called from RCU critical section.
1068 * @bus_num: The bus number
1069 * @devfn: The devfn, which is the combined of device and function number
1070 * @is_write: The access is a write operation
1071 * @entry: IOMMUTLBEntry that contain the addr to be translated and result
1073 * Returns true if translation is successful, otherwise false.
1075 static bool vtd_do_iommu_translate(VTDAddressSpace
*vtd_as
, PCIBus
*bus
,
1076 uint8_t devfn
, hwaddr addr
, bool is_write
,
1077 IOMMUTLBEntry
*entry
)
1079 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
1081 uint8_t bus_num
= pci_bus_num(bus
);
1082 VTDContextCacheEntry
*cc_entry
= &vtd_as
->context_cache_entry
;
1083 uint64_t slpte
, page_mask
;
1085 uint16_t source_id
= vtd_make_source_id(bus_num
, devfn
);
1087 bool is_fpd_set
= false;
1090 VTDIOTLBEntry
*iotlb_entry
;
1093 * We have standalone memory region for interrupt addresses, we
1094 * should never receive translation requests in this region.
1096 assert(!vtd_is_interrupt_addr(addr
));
1098 /* Try to fetch slpte form IOTLB */
1099 iotlb_entry
= vtd_lookup_iotlb(s
, source_id
, addr
);
1101 trace_vtd_iotlb_page_hit(source_id
, addr
, iotlb_entry
->slpte
,
1102 iotlb_entry
->domain_id
);
1103 slpte
= iotlb_entry
->slpte
;
1104 reads
= iotlb_entry
->read_flags
;
1105 writes
= iotlb_entry
->write_flags
;
1106 page_mask
= iotlb_entry
->mask
;
1110 /* Try to fetch context-entry from cache first */
1111 if (cc_entry
->context_cache_gen
== s
->context_cache_gen
) {
1112 trace_vtd_iotlb_cc_hit(bus_num
, devfn
, cc_entry
->context_entry
.hi
,
1113 cc_entry
->context_entry
.lo
,
1114 cc_entry
->context_cache_gen
);
1115 ce
= cc_entry
->context_entry
;
1116 is_fpd_set
= ce
.lo
& VTD_CONTEXT_ENTRY_FPD
;
1118 ret_fr
= vtd_dev_to_context_entry(s
, bus_num
, devfn
, &ce
);
1119 is_fpd_set
= ce
.lo
& VTD_CONTEXT_ENTRY_FPD
;
1122 if (is_fpd_set
&& vtd_is_qualified_fault(ret_fr
)) {
1123 trace_vtd_fault_disabled();
1125 vtd_report_dmar_fault(s
, source_id
, addr
, ret_fr
, is_write
);
1129 /* Update context-cache */
1130 trace_vtd_iotlb_cc_update(bus_num
, devfn
, ce
.hi
, ce
.lo
,
1131 cc_entry
->context_cache_gen
,
1132 s
->context_cache_gen
);
1133 cc_entry
->context_entry
= ce
;
1134 cc_entry
->context_cache_gen
= s
->context_cache_gen
;
1138 * We don't need to translate for pass-through context entries.
1139 * Also, let's ignore IOTLB caching as well for PT devices.
1141 if (vtd_ce_get_type(&ce
) == VTD_CONTEXT_TT_PASS_THROUGH
) {
1142 entry
->iova
= addr
& VTD_PAGE_MASK
;
1143 entry
->translated_addr
= entry
->iova
;
1144 entry
->addr_mask
= VTD_PAGE_MASK
;
1145 entry
->perm
= IOMMU_RW
;
1146 trace_vtd_translate_pt(source_id
, entry
->iova
);
1149 * When this happens, it means firstly caching-mode is not
1150 * enabled, and this is the first passthrough translation for
1151 * the device. Let's enable the fast path for passthrough.
1153 * When passthrough is disabled again for the device, we can
1154 * capture it via the context entry invalidation, then the
1155 * IOMMU region can be swapped back.
1157 vtd_pt_enable_fast_path(s
, source_id
);
1162 ret_fr
= vtd_iova_to_slpte(&ce
, addr
, is_write
, &slpte
, &level
,
1166 if (is_fpd_set
&& vtd_is_qualified_fault(ret_fr
)) {
1167 trace_vtd_fault_disabled();
1169 vtd_report_dmar_fault(s
, source_id
, addr
, ret_fr
, is_write
);
1174 page_mask
= vtd_slpt_level_page_mask(level
);
1175 vtd_update_iotlb(s
, source_id
, VTD_CONTEXT_ENTRY_DID(ce
.hi
), addr
, slpte
,
1176 reads
, writes
, level
);
1178 entry
->iova
= addr
& page_mask
;
1179 entry
->translated_addr
= vtd_get_slpte_addr(slpte
) & page_mask
;
1180 entry
->addr_mask
= ~page_mask
;
1181 entry
->perm
= IOMMU_ACCESS_FLAG(reads
, writes
);
1186 entry
->translated_addr
= 0;
1187 entry
->addr_mask
= 0;
1188 entry
->perm
= IOMMU_NONE
;
1192 static void vtd_root_table_setup(IntelIOMMUState
*s
)
1194 s
->root
= vtd_get_quad_raw(s
, DMAR_RTADDR_REG
);
1195 s
->root_extended
= s
->root
& VTD_RTADDR_RTT
;
1196 s
->root
&= VTD_RTADDR_ADDR_MASK
;
1198 trace_vtd_reg_dmar_root(s
->root
, s
->root_extended
);
1201 static void vtd_iec_notify_all(IntelIOMMUState
*s
, bool global
,
1202 uint32_t index
, uint32_t mask
)
1204 x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s
), global
, index
, mask
);
1207 static void vtd_interrupt_remap_table_setup(IntelIOMMUState
*s
)
1210 value
= vtd_get_quad_raw(s
, DMAR_IRTA_REG
);
1211 s
->intr_size
= 1UL << ((value
& VTD_IRTA_SIZE_MASK
) + 1);
1212 s
->intr_root
= value
& VTD_IRTA_ADDR_MASK
;
1213 s
->intr_eime
= value
& VTD_IRTA_EIME
;
1215 /* Notify global invalidation */
1216 vtd_iec_notify_all(s
, true, 0, 0);
1218 trace_vtd_reg_ir_root(s
->intr_root
, s
->intr_size
);
1221 static void vtd_iommu_replay_all(IntelIOMMUState
*s
)
1223 IntelIOMMUNotifierNode
*node
;
1225 QLIST_FOREACH(node
, &s
->notifiers_list
, next
) {
1226 memory_region_iommu_replay_all(&node
->vtd_as
->iommu
);
1230 static void vtd_context_global_invalidate(IntelIOMMUState
*s
)
1232 trace_vtd_inv_desc_cc_global();
1233 s
->context_cache_gen
++;
1234 if (s
->context_cache_gen
== VTD_CONTEXT_CACHE_GEN_MAX
) {
1235 vtd_reset_context_cache(s
);
1237 vtd_switch_address_space_all(s
);
1239 * From VT-d spec 6.5.2.1, a global context entry invalidation
1240 * should be followed by a IOTLB global invalidation, so we should
1241 * be safe even without this. Hoewever, let's replay the region as
1242 * well to be safer, and go back here when we need finer tunes for
1243 * VT-d emulation codes.
1245 vtd_iommu_replay_all(s
);
1248 /* Do a context-cache device-selective invalidation.
1249 * @func_mask: FM field after shifting
1251 static void vtd_context_device_invalidate(IntelIOMMUState
*s
,
1257 VTDAddressSpace
*vtd_as
;
1258 uint8_t bus_n
, devfn
;
1261 trace_vtd_inv_desc_cc_devices(source_id
, func_mask
);
1263 switch (func_mask
& 3) {
1265 mask
= 0; /* No bits in the SID field masked */
1268 mask
= 4; /* Mask bit 2 in the SID field */
1271 mask
= 6; /* Mask bit 2:1 in the SID field */
1274 mask
= 7; /* Mask bit 2:0 in the SID field */
1279 bus_n
= VTD_SID_TO_BUS(source_id
);
1280 vtd_bus
= vtd_find_as_from_bus_num(s
, bus_n
);
1282 devfn
= VTD_SID_TO_DEVFN(source_id
);
1283 for (devfn_it
= 0; devfn_it
< X86_IOMMU_PCI_DEVFN_MAX
; ++devfn_it
) {
1284 vtd_as
= vtd_bus
->dev_as
[devfn_it
];
1285 if (vtd_as
&& ((devfn_it
& mask
) == (devfn
& mask
))) {
1286 trace_vtd_inv_desc_cc_device(bus_n
, VTD_PCI_SLOT(devfn_it
),
1287 VTD_PCI_FUNC(devfn_it
));
1288 vtd_as
->context_cache_entry
.context_cache_gen
= 0;
1290 * Do switch address space when needed, in case if the
1291 * device passthrough bit is switched.
1293 vtd_switch_address_space(vtd_as
);
1295 * So a device is moving out of (or moving into) a
1296 * domain, a replay() suites here to notify all the
1297 * IOMMU_NOTIFIER_MAP registers about this change.
1298 * This won't bring bad even if we have no such
1299 * notifier registered - the IOMMU notification
1300 * framework will skip MAP notifications if that
1303 memory_region_iommu_replay_all(&vtd_as
->iommu
);
1309 /* Context-cache invalidation
1310 * Returns the Context Actual Invalidation Granularity.
1311 * @val: the content of the CCMD_REG
1313 static uint64_t vtd_context_cache_invalidate(IntelIOMMUState
*s
, uint64_t val
)
1316 uint64_t type
= val
& VTD_CCMD_CIRG_MASK
;
1319 case VTD_CCMD_DOMAIN_INVL
:
1321 case VTD_CCMD_GLOBAL_INVL
:
1322 caig
= VTD_CCMD_GLOBAL_INVL_A
;
1323 vtd_context_global_invalidate(s
);
1326 case VTD_CCMD_DEVICE_INVL
:
1327 caig
= VTD_CCMD_DEVICE_INVL_A
;
1328 vtd_context_device_invalidate(s
, VTD_CCMD_SID(val
), VTD_CCMD_FM(val
));
1332 trace_vtd_err("Context cache invalidate type error.");
1338 static void vtd_iotlb_global_invalidate(IntelIOMMUState
*s
)
1340 trace_vtd_inv_desc_iotlb_global();
1342 vtd_iommu_replay_all(s
);
1345 static void vtd_iotlb_domain_invalidate(IntelIOMMUState
*s
, uint16_t domain_id
)
1347 IntelIOMMUNotifierNode
*node
;
1349 VTDAddressSpace
*vtd_as
;
1351 trace_vtd_inv_desc_iotlb_domain(domain_id
);
1353 g_hash_table_foreach_remove(s
->iotlb
, vtd_hash_remove_by_domain
,
1356 QLIST_FOREACH(node
, &s
->notifiers_list
, next
) {
1357 vtd_as
= node
->vtd_as
;
1358 if (!vtd_dev_to_context_entry(s
, pci_bus_num(vtd_as
->bus
),
1359 vtd_as
->devfn
, &ce
) &&
1360 domain_id
== VTD_CONTEXT_ENTRY_DID(ce
.hi
)) {
1361 memory_region_iommu_replay_all(&vtd_as
->iommu
);
1366 static int vtd_page_invalidate_notify_hook(IOMMUTLBEntry
*entry
,
1369 memory_region_notify_iommu((MemoryRegion
*)private, *entry
);
1373 static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState
*s
,
1374 uint16_t domain_id
, hwaddr addr
,
1377 IntelIOMMUNotifierNode
*node
;
1381 QLIST_FOREACH(node
, &(s
->notifiers_list
), next
) {
1382 VTDAddressSpace
*vtd_as
= node
->vtd_as
;
1383 ret
= vtd_dev_to_context_entry(s
, pci_bus_num(vtd_as
->bus
),
1384 vtd_as
->devfn
, &ce
);
1385 if (!ret
&& domain_id
== VTD_CONTEXT_ENTRY_DID(ce
.hi
)) {
1386 vtd_page_walk(&ce
, addr
, addr
+ (1 << am
) * VTD_PAGE_SIZE
,
1387 vtd_page_invalidate_notify_hook
,
1388 (void *)&vtd_as
->iommu
, true);
1393 static void vtd_iotlb_page_invalidate(IntelIOMMUState
*s
, uint16_t domain_id
,
1394 hwaddr addr
, uint8_t am
)
1396 VTDIOTLBPageInvInfo info
;
1398 trace_vtd_inv_desc_iotlb_pages(domain_id
, addr
, am
);
1400 assert(am
<= VTD_MAMV
);
1401 info
.domain_id
= domain_id
;
1403 info
.mask
= ~((1 << am
) - 1);
1404 g_hash_table_foreach_remove(s
->iotlb
, vtd_hash_remove_by_page
, &info
);
1405 vtd_iotlb_page_invalidate_notify(s
, domain_id
, addr
, am
);
1409 * Returns the IOTLB Actual Invalidation Granularity.
1410 * @val: the content of the IOTLB_REG
1412 static uint64_t vtd_iotlb_flush(IntelIOMMUState
*s
, uint64_t val
)
1415 uint64_t type
= val
& VTD_TLB_FLUSH_GRANU_MASK
;
1421 case VTD_TLB_GLOBAL_FLUSH
:
1422 iaig
= VTD_TLB_GLOBAL_FLUSH_A
;
1423 vtd_iotlb_global_invalidate(s
);
1426 case VTD_TLB_DSI_FLUSH
:
1427 domain_id
= VTD_TLB_DID(val
);
1428 iaig
= VTD_TLB_DSI_FLUSH_A
;
1429 vtd_iotlb_domain_invalidate(s
, domain_id
);
1432 case VTD_TLB_PSI_FLUSH
:
1433 domain_id
= VTD_TLB_DID(val
);
1434 addr
= vtd_get_quad_raw(s
, DMAR_IVA_REG
);
1435 am
= VTD_IVA_AM(addr
);
1436 addr
= VTD_IVA_ADDR(addr
);
1437 if (am
> VTD_MAMV
) {
1438 trace_vtd_err("IOTLB PSI flush: address mask overflow.");
1442 iaig
= VTD_TLB_PSI_FLUSH_A
;
1443 vtd_iotlb_page_invalidate(s
, domain_id
, addr
, am
);
1447 trace_vtd_err("IOTLB flush: invalid granularity.");
1453 static void vtd_fetch_inv_desc(IntelIOMMUState
*s
);
1455 static inline bool vtd_queued_inv_disable_check(IntelIOMMUState
*s
)
1457 return s
->qi_enabled
&& (s
->iq_tail
== s
->iq_head
) &&
1458 (s
->iq_last_desc_type
== VTD_INV_DESC_WAIT
);
1461 static void vtd_handle_gcmd_qie(IntelIOMMUState
*s
, bool en
)
1463 uint64_t iqa_val
= vtd_get_quad_raw(s
, DMAR_IQA_REG
);
1465 trace_vtd_inv_qi_enable(en
);
1468 s
->iq
= iqa_val
& VTD_IQA_IQA_MASK
;
1469 /* 2^(x+8) entries */
1470 s
->iq_size
= 1UL << ((iqa_val
& VTD_IQA_QS
) + 8);
1471 s
->qi_enabled
= true;
1472 trace_vtd_inv_qi_setup(s
->iq
, s
->iq_size
);
1473 /* Ok - report back to driver */
1474 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_QIES
);
1476 if (s
->iq_tail
!= 0) {
1478 * This is a spec violation but Windows guests are known to set up
1479 * Queued Invalidation this way so we allow the write and process
1480 * Invalidation Descriptors right away.
1482 trace_vtd_warn_invalid_qi_tail(s
->iq_tail
);
1483 if (!(vtd_get_long_raw(s
, DMAR_FSTS_REG
) & VTD_FSTS_IQE
)) {
1484 vtd_fetch_inv_desc(s
);
1488 if (vtd_queued_inv_disable_check(s
)) {
1489 /* disable Queued Invalidation */
1490 vtd_set_quad_raw(s
, DMAR_IQH_REG
, 0);
1492 s
->qi_enabled
= false;
1493 /* Ok - report back to driver */
1494 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_QIES
, 0);
1496 trace_vtd_err_qi_disable(s
->iq_head
, s
->iq_tail
, s
->iq_last_desc_type
);
1501 /* Set Root Table Pointer */
1502 static void vtd_handle_gcmd_srtp(IntelIOMMUState
*s
)
1504 vtd_root_table_setup(s
);
1505 /* Ok - report back to driver */
1506 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_RTPS
);
1509 /* Set Interrupt Remap Table Pointer */
1510 static void vtd_handle_gcmd_sirtp(IntelIOMMUState
*s
)
1512 vtd_interrupt_remap_table_setup(s
);
1513 /* Ok - report back to driver */
1514 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_IRTPS
);
1517 /* Handle Translation Enable/Disable */
1518 static void vtd_handle_gcmd_te(IntelIOMMUState
*s
, bool en
)
1520 if (s
->dmar_enabled
== en
) {
1524 trace_vtd_dmar_enable(en
);
1527 s
->dmar_enabled
= true;
1528 /* Ok - report back to driver */
1529 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_TES
);
1531 s
->dmar_enabled
= false;
1533 /* Clear the index of Fault Recording Register */
1534 s
->next_frcd_reg
= 0;
1535 /* Ok - report back to driver */
1536 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_TES
, 0);
1539 vtd_switch_address_space_all(s
);
1542 /* Handle Interrupt Remap Enable/Disable */
1543 static void vtd_handle_gcmd_ire(IntelIOMMUState
*s
, bool en
)
1545 trace_vtd_ir_enable(en
);
1548 s
->intr_enabled
= true;
1549 /* Ok - report back to driver */
1550 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_IRES
);
1552 s
->intr_enabled
= false;
1553 /* Ok - report back to driver */
1554 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_IRES
, 0);
1558 /* Handle write to Global Command Register */
1559 static void vtd_handle_gcmd_write(IntelIOMMUState
*s
)
1561 uint32_t status
= vtd_get_long_raw(s
, DMAR_GSTS_REG
);
1562 uint32_t val
= vtd_get_long_raw(s
, DMAR_GCMD_REG
);
1563 uint32_t changed
= status
^ val
;
1565 trace_vtd_reg_write_gcmd(status
, val
);
1566 if (changed
& VTD_GCMD_TE
) {
1567 /* Translation enable/disable */
1568 vtd_handle_gcmd_te(s
, val
& VTD_GCMD_TE
);
1570 if (val
& VTD_GCMD_SRTP
) {
1571 /* Set/update the root-table pointer */
1572 vtd_handle_gcmd_srtp(s
);
1574 if (changed
& VTD_GCMD_QIE
) {
1575 /* Queued Invalidation Enable */
1576 vtd_handle_gcmd_qie(s
, val
& VTD_GCMD_QIE
);
1578 if (val
& VTD_GCMD_SIRTP
) {
1579 /* Set/update the interrupt remapping root-table pointer */
1580 vtd_handle_gcmd_sirtp(s
);
1582 if (changed
& VTD_GCMD_IRE
) {
1583 /* Interrupt remap enable/disable */
1584 vtd_handle_gcmd_ire(s
, val
& VTD_GCMD_IRE
);
1588 /* Handle write to Context Command Register */
1589 static void vtd_handle_ccmd_write(IntelIOMMUState
*s
)
1592 uint64_t val
= vtd_get_quad_raw(s
, DMAR_CCMD_REG
);
1594 /* Context-cache invalidation request */
1595 if (val
& VTD_CCMD_ICC
) {
1596 if (s
->qi_enabled
) {
1597 trace_vtd_err("Queued Invalidation enabled, "
1598 "should not use register-based invalidation");
1601 ret
= vtd_context_cache_invalidate(s
, val
);
1602 /* Invalidation completed. Change something to show */
1603 vtd_set_clear_mask_quad(s
, DMAR_CCMD_REG
, VTD_CCMD_ICC
, 0ULL);
1604 ret
= vtd_set_clear_mask_quad(s
, DMAR_CCMD_REG
, VTD_CCMD_CAIG_MASK
,
1609 /* Handle write to IOTLB Invalidation Register */
1610 static void vtd_handle_iotlb_write(IntelIOMMUState
*s
)
1613 uint64_t val
= vtd_get_quad_raw(s
, DMAR_IOTLB_REG
);
1615 /* IOTLB invalidation request */
1616 if (val
& VTD_TLB_IVT
) {
1617 if (s
->qi_enabled
) {
1618 trace_vtd_err("Queued Invalidation enabled, "
1619 "should not use register-based invalidation.");
1622 ret
= vtd_iotlb_flush(s
, val
);
1623 /* Invalidation completed. Change something to show */
1624 vtd_set_clear_mask_quad(s
, DMAR_IOTLB_REG
, VTD_TLB_IVT
, 0ULL);
1625 ret
= vtd_set_clear_mask_quad(s
, DMAR_IOTLB_REG
,
1626 VTD_TLB_FLUSH_GRANU_MASK_A
, ret
);
1630 /* Fetch an Invalidation Descriptor from the Invalidation Queue */
1631 static bool vtd_get_inv_desc(dma_addr_t base_addr
, uint32_t offset
,
1632 VTDInvDesc
*inv_desc
)
1634 dma_addr_t addr
= base_addr
+ offset
* sizeof(*inv_desc
);
1635 if (dma_memory_read(&address_space_memory
, addr
, inv_desc
,
1636 sizeof(*inv_desc
))) {
1637 trace_vtd_err("Read INV DESC failed.");
1642 inv_desc
->lo
= le64_to_cpu(inv_desc
->lo
);
1643 inv_desc
->hi
= le64_to_cpu(inv_desc
->hi
);
1647 static bool vtd_process_wait_desc(IntelIOMMUState
*s
, VTDInvDesc
*inv_desc
)
1649 if ((inv_desc
->hi
& VTD_INV_DESC_WAIT_RSVD_HI
) ||
1650 (inv_desc
->lo
& VTD_INV_DESC_WAIT_RSVD_LO
)) {
1651 trace_vtd_inv_desc_wait_invalid(inv_desc
->hi
, inv_desc
->lo
);
1654 if (inv_desc
->lo
& VTD_INV_DESC_WAIT_SW
) {
1656 uint32_t status_data
= (uint32_t)(inv_desc
->lo
>>
1657 VTD_INV_DESC_WAIT_DATA_SHIFT
);
1659 assert(!(inv_desc
->lo
& VTD_INV_DESC_WAIT_IF
));
1661 /* FIXME: need to be masked with HAW? */
1662 dma_addr_t status_addr
= inv_desc
->hi
;
1663 trace_vtd_inv_desc_wait_sw(status_addr
, status_data
);
1664 status_data
= cpu_to_le32(status_data
);
1665 if (dma_memory_write(&address_space_memory
, status_addr
, &status_data
,
1666 sizeof(status_data
))) {
1667 trace_vtd_inv_desc_wait_write_fail(inv_desc
->hi
, inv_desc
->lo
);
1670 } else if (inv_desc
->lo
& VTD_INV_DESC_WAIT_IF
) {
1671 /* Interrupt flag */
1672 vtd_generate_completion_event(s
);
1674 trace_vtd_inv_desc_wait_invalid(inv_desc
->hi
, inv_desc
->lo
);
1680 static bool vtd_process_context_cache_desc(IntelIOMMUState
*s
,
1681 VTDInvDesc
*inv_desc
)
1683 uint16_t sid
, fmask
;
1685 if ((inv_desc
->lo
& VTD_INV_DESC_CC_RSVD
) || inv_desc
->hi
) {
1686 trace_vtd_inv_desc_cc_invalid(inv_desc
->hi
, inv_desc
->lo
);
1689 switch (inv_desc
->lo
& VTD_INV_DESC_CC_G
) {
1690 case VTD_INV_DESC_CC_DOMAIN
:
1691 trace_vtd_inv_desc_cc_domain(
1692 (uint16_t)VTD_INV_DESC_CC_DID(inv_desc
->lo
));
1694 case VTD_INV_DESC_CC_GLOBAL
:
1695 vtd_context_global_invalidate(s
);
1698 case VTD_INV_DESC_CC_DEVICE
:
1699 sid
= VTD_INV_DESC_CC_SID(inv_desc
->lo
);
1700 fmask
= VTD_INV_DESC_CC_FM(inv_desc
->lo
);
1701 vtd_context_device_invalidate(s
, sid
, fmask
);
1705 trace_vtd_inv_desc_cc_invalid(inv_desc
->hi
, inv_desc
->lo
);
1711 static bool vtd_process_iotlb_desc(IntelIOMMUState
*s
, VTDInvDesc
*inv_desc
)
1717 if ((inv_desc
->lo
& VTD_INV_DESC_IOTLB_RSVD_LO
) ||
1718 (inv_desc
->hi
& VTD_INV_DESC_IOTLB_RSVD_HI
)) {
1719 trace_vtd_inv_desc_iotlb_invalid(inv_desc
->hi
, inv_desc
->lo
);
1723 switch (inv_desc
->lo
& VTD_INV_DESC_IOTLB_G
) {
1724 case VTD_INV_DESC_IOTLB_GLOBAL
:
1725 vtd_iotlb_global_invalidate(s
);
1728 case VTD_INV_DESC_IOTLB_DOMAIN
:
1729 domain_id
= VTD_INV_DESC_IOTLB_DID(inv_desc
->lo
);
1730 vtd_iotlb_domain_invalidate(s
, domain_id
);
1733 case VTD_INV_DESC_IOTLB_PAGE
:
1734 domain_id
= VTD_INV_DESC_IOTLB_DID(inv_desc
->lo
);
1735 addr
= VTD_INV_DESC_IOTLB_ADDR(inv_desc
->hi
);
1736 am
= VTD_INV_DESC_IOTLB_AM(inv_desc
->hi
);
1737 if (am
> VTD_MAMV
) {
1738 trace_vtd_inv_desc_iotlb_invalid(inv_desc
->hi
, inv_desc
->lo
);
1741 vtd_iotlb_page_invalidate(s
, domain_id
, addr
, am
);
1745 trace_vtd_inv_desc_iotlb_invalid(inv_desc
->hi
, inv_desc
->lo
);
1751 static bool vtd_process_inv_iec_desc(IntelIOMMUState
*s
,
1752 VTDInvDesc
*inv_desc
)
1754 trace_vtd_inv_desc_iec(inv_desc
->iec
.granularity
,
1755 inv_desc
->iec
.index
,
1756 inv_desc
->iec
.index_mask
);
1758 vtd_iec_notify_all(s
, !inv_desc
->iec
.granularity
,
1759 inv_desc
->iec
.index
,
1760 inv_desc
->iec
.index_mask
);
1764 static bool vtd_process_device_iotlb_desc(IntelIOMMUState
*s
,
1765 VTDInvDesc
*inv_desc
)
1767 VTDAddressSpace
*vtd_dev_as
;
1768 IOMMUTLBEntry entry
;
1769 struct VTDBus
*vtd_bus
;
1777 addr
= VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc
->hi
);
1778 sid
= VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc
->lo
);
1781 size
= VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc
->hi
);
1783 if ((inv_desc
->lo
& VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO
) ||
1784 (inv_desc
->hi
& VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI
)) {
1785 trace_vtd_inv_desc_iotlb_invalid(inv_desc
->hi
, inv_desc
->lo
);
1789 vtd_bus
= vtd_find_as_from_bus_num(s
, bus_num
);
1794 vtd_dev_as
= vtd_bus
->dev_as
[devfn
];
1799 /* According to ATS spec table 2.4:
1800 * S = 0, bits 15:12 = xxxx range size: 4K
1801 * S = 1, bits 15:12 = xxx0 range size: 8K
1802 * S = 1, bits 15:12 = xx01 range size: 16K
1803 * S = 1, bits 15:12 = x011 range size: 32K
1804 * S = 1, bits 15:12 = 0111 range size: 64K
1808 sz
= (VTD_PAGE_SIZE
* 2) << cto64(addr
>> VTD_PAGE_SHIFT
);
1814 entry
.target_as
= &vtd_dev_as
->as
;
1815 entry
.addr_mask
= sz
- 1;
1817 entry
.perm
= IOMMU_NONE
;
1818 entry
.translated_addr
= 0;
1819 memory_region_notify_iommu(&vtd_dev_as
->iommu
, entry
);
1825 static bool vtd_process_inv_desc(IntelIOMMUState
*s
)
1827 VTDInvDesc inv_desc
;
1830 trace_vtd_inv_qi_head(s
->iq_head
);
1831 if (!vtd_get_inv_desc(s
->iq
, s
->iq_head
, &inv_desc
)) {
1832 s
->iq_last_desc_type
= VTD_INV_DESC_NONE
;
1835 desc_type
= inv_desc
.lo
& VTD_INV_DESC_TYPE
;
1836 /* FIXME: should update at first or at last? */
1837 s
->iq_last_desc_type
= desc_type
;
1839 switch (desc_type
) {
1840 case VTD_INV_DESC_CC
:
1841 trace_vtd_inv_desc("context-cache", inv_desc
.hi
, inv_desc
.lo
);
1842 if (!vtd_process_context_cache_desc(s
, &inv_desc
)) {
1847 case VTD_INV_DESC_IOTLB
:
1848 trace_vtd_inv_desc("iotlb", inv_desc
.hi
, inv_desc
.lo
);
1849 if (!vtd_process_iotlb_desc(s
, &inv_desc
)) {
1854 case VTD_INV_DESC_WAIT
:
1855 trace_vtd_inv_desc("wait", inv_desc
.hi
, inv_desc
.lo
);
1856 if (!vtd_process_wait_desc(s
, &inv_desc
)) {
1861 case VTD_INV_DESC_IEC
:
1862 trace_vtd_inv_desc("iec", inv_desc
.hi
, inv_desc
.lo
);
1863 if (!vtd_process_inv_iec_desc(s
, &inv_desc
)) {
1868 case VTD_INV_DESC_DEVICE
:
1869 trace_vtd_inv_desc("device", inv_desc
.hi
, inv_desc
.lo
);
1870 if (!vtd_process_device_iotlb_desc(s
, &inv_desc
)) {
1876 trace_vtd_inv_desc_invalid(inv_desc
.hi
, inv_desc
.lo
);
1880 if (s
->iq_head
== s
->iq_size
) {
1886 /* Try to fetch and process more Invalidation Descriptors */
1887 static void vtd_fetch_inv_desc(IntelIOMMUState
*s
)
1889 trace_vtd_inv_qi_fetch();
1891 if (s
->iq_tail
>= s
->iq_size
) {
1892 /* Detects an invalid Tail pointer */
1893 trace_vtd_err_qi_tail(s
->iq_tail
, s
->iq_size
);
1894 vtd_handle_inv_queue_error(s
);
1897 while (s
->iq_head
!= s
->iq_tail
) {
1898 if (!vtd_process_inv_desc(s
)) {
1899 /* Invalidation Queue Errors */
1900 vtd_handle_inv_queue_error(s
);
1903 /* Must update the IQH_REG in time */
1904 vtd_set_quad_raw(s
, DMAR_IQH_REG
,
1905 (((uint64_t)(s
->iq_head
)) << VTD_IQH_QH_SHIFT
) &
1910 /* Handle write to Invalidation Queue Tail Register */
1911 static void vtd_handle_iqt_write(IntelIOMMUState
*s
)
1913 uint64_t val
= vtd_get_quad_raw(s
, DMAR_IQT_REG
);
1915 s
->iq_tail
= VTD_IQT_QT(val
);
1916 trace_vtd_inv_qi_tail(s
->iq_tail
);
1918 if (s
->qi_enabled
&& !(vtd_get_long_raw(s
, DMAR_FSTS_REG
) & VTD_FSTS_IQE
)) {
1919 /* Process Invalidation Queue here */
1920 vtd_fetch_inv_desc(s
);
1924 static void vtd_handle_fsts_write(IntelIOMMUState
*s
)
1926 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
1927 uint32_t fectl_reg
= vtd_get_long_raw(s
, DMAR_FECTL_REG
);
1928 uint32_t status_fields
= VTD_FSTS_PFO
| VTD_FSTS_PPF
| VTD_FSTS_IQE
;
1930 if ((fectl_reg
& VTD_FECTL_IP
) && !(fsts_reg
& status_fields
)) {
1931 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
1932 trace_vtd_fsts_clear_ip();
1934 /* FIXME: when IQE is Clear, should we try to fetch some Invalidation
1935 * Descriptors if there are any when Queued Invalidation is enabled?
1939 static void vtd_handle_fectl_write(IntelIOMMUState
*s
)
1942 /* FIXME: when software clears the IM field, check the IP field. But do we
1943 * need to compare the old value and the new value to conclude that
1944 * software clears the IM field? Or just check if the IM field is zero?
1946 fectl_reg
= vtd_get_long_raw(s
, DMAR_FECTL_REG
);
1948 trace_vtd_reg_write_fectl(fectl_reg
);
1950 if ((fectl_reg
& VTD_FECTL_IP
) && !(fectl_reg
& VTD_FECTL_IM
)) {
1951 vtd_generate_interrupt(s
, DMAR_FEADDR_REG
, DMAR_FEDATA_REG
);
1952 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
1956 static void vtd_handle_ics_write(IntelIOMMUState
*s
)
1958 uint32_t ics_reg
= vtd_get_long_raw(s
, DMAR_ICS_REG
);
1959 uint32_t iectl_reg
= vtd_get_long_raw(s
, DMAR_IECTL_REG
);
1961 if ((iectl_reg
& VTD_IECTL_IP
) && !(ics_reg
& VTD_ICS_IWC
)) {
1962 trace_vtd_reg_ics_clear_ip();
1963 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
1967 static void vtd_handle_iectl_write(IntelIOMMUState
*s
)
1970 /* FIXME: when software clears the IM field, check the IP field. But do we
1971 * need to compare the old value and the new value to conclude that
1972 * software clears the IM field? Or just check if the IM field is zero?
1974 iectl_reg
= vtd_get_long_raw(s
, DMAR_IECTL_REG
);
1976 trace_vtd_reg_write_iectl(iectl_reg
);
1978 if ((iectl_reg
& VTD_IECTL_IP
) && !(iectl_reg
& VTD_IECTL_IM
)) {
1979 vtd_generate_interrupt(s
, DMAR_IEADDR_REG
, DMAR_IEDATA_REG
);
1980 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
1984 static uint64_t vtd_mem_read(void *opaque
, hwaddr addr
, unsigned size
)
1986 IntelIOMMUState
*s
= opaque
;
1989 trace_vtd_reg_read(addr
, size
);
1991 if (addr
+ size
> DMAR_REG_SIZE
) {
1992 trace_vtd_err("Read MMIO over range.");
1993 return (uint64_t)-1;
1997 /* Root Table Address Register, 64-bit */
1998 case DMAR_RTADDR_REG
:
2000 val
= s
->root
& ((1ULL << 32) - 1);
2006 case DMAR_RTADDR_REG_HI
:
2008 val
= s
->root
>> 32;
2011 /* Invalidation Queue Address Register, 64-bit */
2013 val
= s
->iq
| (vtd_get_quad(s
, DMAR_IQA_REG
) & VTD_IQA_QS
);
2015 val
= val
& ((1ULL << 32) - 1);
2019 case DMAR_IQA_REG_HI
:
2026 val
= vtd_get_long(s
, addr
);
2028 val
= vtd_get_quad(s
, addr
);
2035 static void vtd_mem_write(void *opaque
, hwaddr addr
,
2036 uint64_t val
, unsigned size
)
2038 IntelIOMMUState
*s
= opaque
;
2040 trace_vtd_reg_write(addr
, size
, val
);
2042 if (addr
+ size
> DMAR_REG_SIZE
) {
2043 trace_vtd_err("Write MMIO over range.");
2048 /* Global Command Register, 32-bit */
2050 vtd_set_long(s
, addr
, val
);
2051 vtd_handle_gcmd_write(s
);
2054 /* Context Command Register, 64-bit */
2057 vtd_set_long(s
, addr
, val
);
2059 vtd_set_quad(s
, addr
, val
);
2060 vtd_handle_ccmd_write(s
);
2064 case DMAR_CCMD_REG_HI
:
2066 vtd_set_long(s
, addr
, val
);
2067 vtd_handle_ccmd_write(s
);
2070 /* IOTLB Invalidation Register, 64-bit */
2071 case DMAR_IOTLB_REG
:
2073 vtd_set_long(s
, addr
, val
);
2075 vtd_set_quad(s
, addr
, val
);
2076 vtd_handle_iotlb_write(s
);
2080 case DMAR_IOTLB_REG_HI
:
2082 vtd_set_long(s
, addr
, val
);
2083 vtd_handle_iotlb_write(s
);
2086 /* Invalidate Address Register, 64-bit */
2089 vtd_set_long(s
, addr
, val
);
2091 vtd_set_quad(s
, addr
, val
);
2095 case DMAR_IVA_REG_HI
:
2097 vtd_set_long(s
, addr
, val
);
2100 /* Fault Status Register, 32-bit */
2103 vtd_set_long(s
, addr
, val
);
2104 vtd_handle_fsts_write(s
);
2107 /* Fault Event Control Register, 32-bit */
2108 case DMAR_FECTL_REG
:
2110 vtd_set_long(s
, addr
, val
);
2111 vtd_handle_fectl_write(s
);
2114 /* Fault Event Data Register, 32-bit */
2115 case DMAR_FEDATA_REG
:
2117 vtd_set_long(s
, addr
, val
);
2120 /* Fault Event Address Register, 32-bit */
2121 case DMAR_FEADDR_REG
:
2123 vtd_set_long(s
, addr
, val
);
2126 /* Fault Event Upper Address Register, 32-bit */
2127 case DMAR_FEUADDR_REG
:
2129 vtd_set_long(s
, addr
, val
);
2132 /* Protected Memory Enable Register, 32-bit */
2135 vtd_set_long(s
, addr
, val
);
2138 /* Root Table Address Register, 64-bit */
2139 case DMAR_RTADDR_REG
:
2141 vtd_set_long(s
, addr
, val
);
2143 vtd_set_quad(s
, addr
, val
);
2147 case DMAR_RTADDR_REG_HI
:
2149 vtd_set_long(s
, addr
, val
);
2152 /* Invalidation Queue Tail Register, 64-bit */
2155 vtd_set_long(s
, addr
, val
);
2157 vtd_set_quad(s
, addr
, val
);
2159 vtd_handle_iqt_write(s
);
2162 case DMAR_IQT_REG_HI
:
2164 vtd_set_long(s
, addr
, val
);
2165 /* 19:63 of IQT_REG is RsvdZ, do nothing here */
2168 /* Invalidation Queue Address Register, 64-bit */
2171 vtd_set_long(s
, addr
, val
);
2173 vtd_set_quad(s
, addr
, val
);
2177 case DMAR_IQA_REG_HI
:
2179 vtd_set_long(s
, addr
, val
);
2182 /* Invalidation Completion Status Register, 32-bit */
2185 vtd_set_long(s
, addr
, val
);
2186 vtd_handle_ics_write(s
);
2189 /* Invalidation Event Control Register, 32-bit */
2190 case DMAR_IECTL_REG
:
2192 vtd_set_long(s
, addr
, val
);
2193 vtd_handle_iectl_write(s
);
2196 /* Invalidation Event Data Register, 32-bit */
2197 case DMAR_IEDATA_REG
:
2199 vtd_set_long(s
, addr
, val
);
2202 /* Invalidation Event Address Register, 32-bit */
2203 case DMAR_IEADDR_REG
:
2205 vtd_set_long(s
, addr
, val
);
2208 /* Invalidation Event Upper Address Register, 32-bit */
2209 case DMAR_IEUADDR_REG
:
2211 vtd_set_long(s
, addr
, val
);
2214 /* Fault Recording Registers, 128-bit */
2215 case DMAR_FRCD_REG_0_0
:
2217 vtd_set_long(s
, addr
, val
);
2219 vtd_set_quad(s
, addr
, val
);
2223 case DMAR_FRCD_REG_0_1
:
2225 vtd_set_long(s
, addr
, val
);
2228 case DMAR_FRCD_REG_0_2
:
2230 vtd_set_long(s
, addr
, val
);
2232 vtd_set_quad(s
, addr
, val
);
2233 /* May clear bit 127 (Fault), update PPF */
2234 vtd_update_fsts_ppf(s
);
2238 case DMAR_FRCD_REG_0_3
:
2240 vtd_set_long(s
, addr
, val
);
2241 /* May clear bit 127 (Fault), update PPF */
2242 vtd_update_fsts_ppf(s
);
2247 vtd_set_long(s
, addr
, val
);
2249 vtd_set_quad(s
, addr
, val
);
2253 case DMAR_IRTA_REG_HI
:
2255 vtd_set_long(s
, addr
, val
);
2260 vtd_set_long(s
, addr
, val
);
2262 vtd_set_quad(s
, addr
, val
);
2267 static IOMMUTLBEntry
vtd_iommu_translate(MemoryRegion
*iommu
, hwaddr addr
,
2268 IOMMUAccessFlags flag
)
2270 VTDAddressSpace
*vtd_as
= container_of(iommu
, VTDAddressSpace
, iommu
);
2271 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
2272 IOMMUTLBEntry iotlb
= {
2273 /* We'll fill in the rest later. */
2274 .target_as
= &address_space_memory
,
2278 if (likely(s
->dmar_enabled
)) {
2279 success
= vtd_do_iommu_translate(vtd_as
, vtd_as
->bus
, vtd_as
->devfn
,
2280 addr
, flag
& IOMMU_WO
, &iotlb
);
2282 /* DMAR disabled, passthrough, use 4k-page*/
2283 iotlb
.iova
= addr
& VTD_PAGE_MASK_4K
;
2284 iotlb
.translated_addr
= addr
& VTD_PAGE_MASK_4K
;
2285 iotlb
.addr_mask
= ~VTD_PAGE_MASK_4K
;
2286 iotlb
.perm
= IOMMU_RW
;
2290 if (likely(success
)) {
2291 trace_vtd_dmar_translate(pci_bus_num(vtd_as
->bus
),
2292 VTD_PCI_SLOT(vtd_as
->devfn
),
2293 VTD_PCI_FUNC(vtd_as
->devfn
),
2294 iotlb
.iova
, iotlb
.translated_addr
,
2297 trace_vtd_err_dmar_translate(pci_bus_num(vtd_as
->bus
),
2298 VTD_PCI_SLOT(vtd_as
->devfn
),
2299 VTD_PCI_FUNC(vtd_as
->devfn
),
2306 static void vtd_iommu_notify_flag_changed(MemoryRegion
*iommu
,
2307 IOMMUNotifierFlag old
,
2308 IOMMUNotifierFlag
new)
2310 VTDAddressSpace
*vtd_as
= container_of(iommu
, VTDAddressSpace
, iommu
);
2311 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
2312 IntelIOMMUNotifierNode
*node
= NULL
;
2313 IntelIOMMUNotifierNode
*next_node
= NULL
;
2315 if (!s
->caching_mode
&& new & IOMMU_NOTIFIER_MAP
) {
2316 error_report("We need to set cache_mode=1 for intel-iommu to enable "
2317 "device assignment with IOMMU protection.");
2321 if (old
== IOMMU_NOTIFIER_NONE
) {
2322 node
= g_malloc0(sizeof(*node
));
2323 node
->vtd_as
= vtd_as
;
2324 QLIST_INSERT_HEAD(&s
->notifiers_list
, node
, next
);
2328 /* update notifier node with new flags */
2329 QLIST_FOREACH_SAFE(node
, &s
->notifiers_list
, next
, next_node
) {
2330 if (node
->vtd_as
== vtd_as
) {
2331 if (new == IOMMU_NOTIFIER_NONE
) {
2332 QLIST_REMOVE(node
, next
);
2340 static int vtd_post_load(void *opaque
, int version_id
)
2342 IntelIOMMUState
*iommu
= opaque
;
2345 * Memory regions are dynamically turned on/off depending on
2346 * context entry configurations from the guest. After migration,
2347 * we need to make sure the memory regions are still correct.
2349 vtd_switch_address_space_all(iommu
);
2354 static const VMStateDescription vtd_vmstate
= {
2355 .name
= "iommu-intel",
2357 .minimum_version_id
= 1,
2358 .priority
= MIG_PRI_IOMMU
,
2359 .post_load
= vtd_post_load
,
2360 .fields
= (VMStateField
[]) {
2361 VMSTATE_UINT64(root
, IntelIOMMUState
),
2362 VMSTATE_UINT64(intr_root
, IntelIOMMUState
),
2363 VMSTATE_UINT64(iq
, IntelIOMMUState
),
2364 VMSTATE_UINT32(intr_size
, IntelIOMMUState
),
2365 VMSTATE_UINT16(iq_head
, IntelIOMMUState
),
2366 VMSTATE_UINT16(iq_tail
, IntelIOMMUState
),
2367 VMSTATE_UINT16(iq_size
, IntelIOMMUState
),
2368 VMSTATE_UINT16(next_frcd_reg
, IntelIOMMUState
),
2369 VMSTATE_UINT8_ARRAY(csr
, IntelIOMMUState
, DMAR_REG_SIZE
),
2370 VMSTATE_UINT8(iq_last_desc_type
, IntelIOMMUState
),
2371 VMSTATE_BOOL(root_extended
, IntelIOMMUState
),
2372 VMSTATE_BOOL(dmar_enabled
, IntelIOMMUState
),
2373 VMSTATE_BOOL(qi_enabled
, IntelIOMMUState
),
2374 VMSTATE_BOOL(intr_enabled
, IntelIOMMUState
),
2375 VMSTATE_BOOL(intr_eime
, IntelIOMMUState
),
2376 VMSTATE_END_OF_LIST()
2380 static const MemoryRegionOps vtd_mem_ops
= {
2381 .read
= vtd_mem_read
,
2382 .write
= vtd_mem_write
,
2383 .endianness
= DEVICE_LITTLE_ENDIAN
,
2385 .min_access_size
= 4,
2386 .max_access_size
= 8,
2389 .min_access_size
= 4,
2390 .max_access_size
= 8,
2394 static Property vtd_properties
[] = {
2395 DEFINE_PROP_UINT32("version", IntelIOMMUState
, version
, 0),
2396 DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState
, intr_eim
,
2398 DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState
, buggy_eim
, false),
2399 DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState
, caching_mode
, FALSE
),
2400 DEFINE_PROP_END_OF_LIST(),
2403 /* Read IRTE entry with specific index */
2404 static int vtd_irte_get(IntelIOMMUState
*iommu
, uint16_t index
,
2405 VTD_IR_TableEntry
*entry
, uint16_t sid
)
2407 static const uint16_t vtd_svt_mask
[VTD_SQ_MAX
] = \
2408 {0xffff, 0xfffb, 0xfff9, 0xfff8};
2409 dma_addr_t addr
= 0x00;
2410 uint16_t mask
, source_id
;
2411 uint8_t bus
, bus_max
, bus_min
;
2413 addr
= iommu
->intr_root
+ index
* sizeof(*entry
);
2414 if (dma_memory_read(&address_space_memory
, addr
, entry
,
2416 trace_vtd_err("Memory read failed for IRTE.");
2417 return -VTD_FR_IR_ROOT_INVAL
;
2420 trace_vtd_ir_irte_get(index
, le64_to_cpu(entry
->data
[1]),
2421 le64_to_cpu(entry
->data
[0]));
2423 if (!entry
->irte
.present
) {
2424 trace_vtd_err_irte(index
, le64_to_cpu(entry
->data
[1]),
2425 le64_to_cpu(entry
->data
[0]));
2426 return -VTD_FR_IR_ENTRY_P
;
2429 if (entry
->irte
.__reserved_0
|| entry
->irte
.__reserved_1
||
2430 entry
->irte
.__reserved_2
) {
2431 trace_vtd_err_irte(index
, le64_to_cpu(entry
->data
[1]),
2432 le64_to_cpu(entry
->data
[0]));
2433 return -VTD_FR_IR_IRTE_RSVD
;
2436 if (sid
!= X86_IOMMU_SID_INVALID
) {
2437 /* Validate IRTE SID */
2438 source_id
= le32_to_cpu(entry
->irte
.source_id
);
2439 switch (entry
->irte
.sid_vtype
) {
2444 mask
= vtd_svt_mask
[entry
->irte
.sid_q
];
2445 if ((source_id
& mask
) != (sid
& mask
)) {
2446 trace_vtd_err_irte_sid(index
, sid
, source_id
);
2447 return -VTD_FR_IR_SID_ERR
;
2452 bus_max
= source_id
>> 8;
2453 bus_min
= source_id
& 0xff;
2455 if (bus
> bus_max
|| bus
< bus_min
) {
2456 trace_vtd_err_irte_sid_bus(index
, bus
, bus_min
, bus_max
);
2457 return -VTD_FR_IR_SID_ERR
;
2462 trace_vtd_err_irte_svt(index
, entry
->irte
.sid_vtype
);
2463 /* Take this as verification failure. */
2464 return -VTD_FR_IR_SID_ERR
;
2472 /* Fetch IRQ information of specific IR index */
2473 static int vtd_remap_irq_get(IntelIOMMUState
*iommu
, uint16_t index
,
2474 VTDIrq
*irq
, uint16_t sid
)
2476 VTD_IR_TableEntry irte
= {};
2479 ret
= vtd_irte_get(iommu
, index
, &irte
, sid
);
2484 irq
->trigger_mode
= irte
.irte
.trigger_mode
;
2485 irq
->vector
= irte
.irte
.vector
;
2486 irq
->delivery_mode
= irte
.irte
.delivery_mode
;
2487 irq
->dest
= le32_to_cpu(irte
.irte
.dest_id
);
2488 if (!iommu
->intr_eime
) {
2489 #define VTD_IR_APIC_DEST_MASK (0xff00ULL)
2490 #define VTD_IR_APIC_DEST_SHIFT (8)
2491 irq
->dest
= (irq
->dest
& VTD_IR_APIC_DEST_MASK
) >>
2492 VTD_IR_APIC_DEST_SHIFT
;
2494 irq
->dest_mode
= irte
.irte
.dest_mode
;
2495 irq
->redir_hint
= irte
.irte
.redir_hint
;
2497 trace_vtd_ir_remap(index
, irq
->trigger_mode
, irq
->vector
,
2498 irq
->delivery_mode
, irq
->dest
, irq
->dest_mode
);
2503 /* Generate one MSI message from VTDIrq info */
2504 static void vtd_generate_msi_message(VTDIrq
*irq
, MSIMessage
*msg_out
)
2506 VTD_MSIMessage msg
= {};
2508 /* Generate address bits */
2509 msg
.dest_mode
= irq
->dest_mode
;
2510 msg
.redir_hint
= irq
->redir_hint
;
2511 msg
.dest
= irq
->dest
;
2512 msg
.__addr_hi
= irq
->dest
& 0xffffff00;
2513 msg
.__addr_head
= cpu_to_le32(0xfee);
2514 /* Keep this from original MSI address bits */
2515 msg
.__not_used
= irq
->msi_addr_last_bits
;
2517 /* Generate data bits */
2518 msg
.vector
= irq
->vector
;
2519 msg
.delivery_mode
= irq
->delivery_mode
;
2521 msg
.trigger_mode
= irq
->trigger_mode
;
2523 msg_out
->address
= msg
.msi_addr
;
2524 msg_out
->data
= msg
.msi_data
;
2527 /* Interrupt remapping for MSI/MSI-X entry */
2528 static int vtd_interrupt_remap_msi(IntelIOMMUState
*iommu
,
2530 MSIMessage
*translated
,
2534 VTD_IR_MSIAddress addr
;
2538 assert(origin
&& translated
);
2540 trace_vtd_ir_remap_msi_req(origin
->address
, origin
->data
);
2542 if (!iommu
|| !iommu
->intr_enabled
) {
2543 memcpy(translated
, origin
, sizeof(*origin
));
2547 if (origin
->address
& VTD_MSI_ADDR_HI_MASK
) {
2548 trace_vtd_err("MSI address high 32 bits non-zero when "
2549 "Interrupt Remapping enabled.");
2550 return -VTD_FR_IR_REQ_RSVD
;
2553 addr
.data
= origin
->address
& VTD_MSI_ADDR_LO_MASK
;
2554 if (addr
.addr
.__head
!= 0xfee) {
2555 trace_vtd_err("MSI addr low 32 bit invalid.");
2556 return -VTD_FR_IR_REQ_RSVD
;
2559 /* This is compatible mode. */
2560 if (addr
.addr
.int_mode
!= VTD_IR_INT_FORMAT_REMAP
) {
2561 memcpy(translated
, origin
, sizeof(*origin
));
2565 index
= addr
.addr
.index_h
<< 15 | le16_to_cpu(addr
.addr
.index_l
);
2567 #define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff)
2568 #define VTD_IR_MSI_DATA_RESERVED (0xffff0000)
2570 if (addr
.addr
.sub_valid
) {
2571 /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */
2572 index
+= origin
->data
& VTD_IR_MSI_DATA_SUBHANDLE
;
2575 ret
= vtd_remap_irq_get(iommu
, index
, &irq
, sid
);
2580 if (addr
.addr
.sub_valid
) {
2581 trace_vtd_ir_remap_type("MSI");
2582 if (origin
->data
& VTD_IR_MSI_DATA_RESERVED
) {
2583 trace_vtd_err_ir_msi_invalid(sid
, origin
->address
, origin
->data
);
2584 return -VTD_FR_IR_REQ_RSVD
;
2587 uint8_t vector
= origin
->data
& 0xff;
2588 uint8_t trigger_mode
= (origin
->data
>> MSI_DATA_TRIGGER_SHIFT
) & 0x1;
2590 trace_vtd_ir_remap_type("IOAPIC");
2591 /* IOAPIC entry vector should be aligned with IRTE vector
2592 * (see vt-d spec 5.1.5.1). */
2593 if (vector
!= irq
.vector
) {
2594 trace_vtd_warn_ir_vector(sid
, index
, vector
, irq
.vector
);
2597 /* The Trigger Mode field must match the Trigger Mode in the IRTE.
2598 * (see vt-d spec 5.1.5.1). */
2599 if (trigger_mode
!= irq
.trigger_mode
) {
2600 trace_vtd_warn_ir_trigger(sid
, index
, trigger_mode
,
2606 * We'd better keep the last two bits, assuming that guest OS
2607 * might modify it. Keep it does not hurt after all.
2609 irq
.msi_addr_last_bits
= addr
.addr
.__not_care
;
2611 /* Translate VTDIrq to MSI message */
2612 vtd_generate_msi_message(&irq
, translated
);
2615 trace_vtd_ir_remap_msi(origin
->address
, origin
->data
,
2616 translated
->address
, translated
->data
);
2620 static int vtd_int_remap(X86IOMMUState
*iommu
, MSIMessage
*src
,
2621 MSIMessage
*dst
, uint16_t sid
)
2623 return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu
),
2627 static MemTxResult
vtd_mem_ir_read(void *opaque
, hwaddr addr
,
2628 uint64_t *data
, unsigned size
,
2634 static MemTxResult
vtd_mem_ir_write(void *opaque
, hwaddr addr
,
2635 uint64_t value
, unsigned size
,
2639 MSIMessage from
= {}, to
= {};
2640 uint16_t sid
= X86_IOMMU_SID_INVALID
;
2642 from
.address
= (uint64_t) addr
+ VTD_INTERRUPT_ADDR_FIRST
;
2643 from
.data
= (uint32_t) value
;
2645 if (!attrs
.unspecified
) {
2646 /* We have explicit Source ID */
2647 sid
= attrs
.requester_id
;
2650 ret
= vtd_interrupt_remap_msi(opaque
, &from
, &to
, sid
);
2652 /* TODO: report error */
2653 /* Drop this interrupt */
2657 apic_get_class()->send_msi(&to
);
2662 static const MemoryRegionOps vtd_mem_ir_ops
= {
2663 .read_with_attrs
= vtd_mem_ir_read
,
2664 .write_with_attrs
= vtd_mem_ir_write
,
2665 .endianness
= DEVICE_LITTLE_ENDIAN
,
2667 .min_access_size
= 4,
2668 .max_access_size
= 4,
2671 .min_access_size
= 4,
2672 .max_access_size
= 4,
2676 VTDAddressSpace
*vtd_find_add_as(IntelIOMMUState
*s
, PCIBus
*bus
, int devfn
)
2678 uintptr_t key
= (uintptr_t)bus
;
2679 VTDBus
*vtd_bus
= g_hash_table_lookup(s
->vtd_as_by_busptr
, &key
);
2680 VTDAddressSpace
*vtd_dev_as
;
2684 uintptr_t *new_key
= g_malloc(sizeof(*new_key
));
2685 *new_key
= (uintptr_t)bus
;
2686 /* No corresponding free() */
2687 vtd_bus
= g_malloc0(sizeof(VTDBus
) + sizeof(VTDAddressSpace
*) * \
2688 X86_IOMMU_PCI_DEVFN_MAX
);
2690 g_hash_table_insert(s
->vtd_as_by_busptr
, new_key
, vtd_bus
);
2693 vtd_dev_as
= vtd_bus
->dev_as
[devfn
];
2696 snprintf(name
, sizeof(name
), "intel_iommu_devfn_%d", devfn
);
2697 vtd_bus
->dev_as
[devfn
] = vtd_dev_as
= g_malloc0(sizeof(VTDAddressSpace
));
2699 vtd_dev_as
->bus
= bus
;
2700 vtd_dev_as
->devfn
= (uint8_t)devfn
;
2701 vtd_dev_as
->iommu_state
= s
;
2702 vtd_dev_as
->context_cache_entry
.context_cache_gen
= 0;
2705 * Memory region relationships looks like (Address range shows
2706 * only lower 32 bits to make it short in length...):
2708 * |-----------------+-------------------+----------|
2709 * | Name | Address range | Priority |
2710 * |-----------------+-------------------+----------+
2711 * | vtd_root | 00000000-ffffffff | 0 |
2712 * | intel_iommu | 00000000-ffffffff | 1 |
2713 * | vtd_sys_alias | 00000000-ffffffff | 1 |
2714 * | intel_iommu_ir | fee00000-feefffff | 64 |
2715 * |-----------------+-------------------+----------|
2717 * We enable/disable DMAR by switching enablement for
2718 * vtd_sys_alias and intel_iommu regions. IR region is always
2721 memory_region_init_iommu(&vtd_dev_as
->iommu
, OBJECT(s
),
2722 &s
->iommu_ops
, "intel_iommu_dmar",
2724 memory_region_init_alias(&vtd_dev_as
->sys_alias
, OBJECT(s
),
2725 "vtd_sys_alias", get_system_memory(),
2726 0, memory_region_size(get_system_memory()));
2727 memory_region_init_io(&vtd_dev_as
->iommu_ir
, OBJECT(s
),
2728 &vtd_mem_ir_ops
, s
, "intel_iommu_ir",
2729 VTD_INTERRUPT_ADDR_SIZE
);
2730 memory_region_init(&vtd_dev_as
->root
, OBJECT(s
),
2731 "vtd_root", UINT64_MAX
);
2732 memory_region_add_subregion_overlap(&vtd_dev_as
->root
,
2733 VTD_INTERRUPT_ADDR_FIRST
,
2734 &vtd_dev_as
->iommu_ir
, 64);
2735 address_space_init(&vtd_dev_as
->as
, &vtd_dev_as
->root
, name
);
2736 memory_region_add_subregion_overlap(&vtd_dev_as
->root
, 0,
2737 &vtd_dev_as
->sys_alias
, 1);
2738 memory_region_add_subregion_overlap(&vtd_dev_as
->root
, 0,
2739 &vtd_dev_as
->iommu
, 1);
2740 vtd_switch_address_space(vtd_dev_as
);
2745 /* Unmap the whole range in the notifier's scope. */
2746 static void vtd_address_space_unmap(VTDAddressSpace
*as
, IOMMUNotifier
*n
)
2748 IOMMUTLBEntry entry
;
2750 hwaddr start
= n
->start
;
2751 hwaddr end
= n
->end
;
2754 * Note: all the codes in this function has a assumption that IOVA
2755 * bits are no more than VTD_MGAW bits (which is restricted by
2756 * VT-d spec), otherwise we need to consider overflow of 64 bits.
2759 if (end
> VTD_ADDRESS_SIZE
) {
2761 * Don't need to unmap regions that is bigger than the whole
2762 * VT-d supported address space size
2764 end
= VTD_ADDRESS_SIZE
;
2767 assert(start
<= end
);
2770 if (ctpop64(size
) != 1) {
2772 * This size cannot format a correct mask. Let's enlarge it to
2773 * suite the minimum available mask.
2775 int n
= 64 - clz64(size
);
2777 /* should not happen, but in case it happens, limit it */
2783 entry
.target_as
= &address_space_memory
;
2784 /* Adjust iova for the size */
2785 entry
.iova
= n
->start
& ~(size
- 1);
2786 /* This field is meaningless for unmap */
2787 entry
.translated_addr
= 0;
2788 entry
.perm
= IOMMU_NONE
;
2789 entry
.addr_mask
= size
- 1;
2791 trace_vtd_as_unmap_whole(pci_bus_num(as
->bus
),
2792 VTD_PCI_SLOT(as
->devfn
),
2793 VTD_PCI_FUNC(as
->devfn
),
2796 memory_region_notify_one(n
, &entry
);
2799 static void vtd_address_space_unmap_all(IntelIOMMUState
*s
)
2801 IntelIOMMUNotifierNode
*node
;
2802 VTDAddressSpace
*vtd_as
;
2805 QLIST_FOREACH(node
, &s
->notifiers_list
, next
) {
2806 vtd_as
= node
->vtd_as
;
2807 IOMMU_NOTIFIER_FOREACH(n
, &vtd_as
->iommu
) {
2808 vtd_address_space_unmap(vtd_as
, n
);
2813 static int vtd_replay_hook(IOMMUTLBEntry
*entry
, void *private)
2815 memory_region_notify_one((IOMMUNotifier
*)private, entry
);
2819 static void vtd_iommu_replay(MemoryRegion
*mr
, IOMMUNotifier
*n
)
2821 VTDAddressSpace
*vtd_as
= container_of(mr
, VTDAddressSpace
, iommu
);
2822 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
2823 uint8_t bus_n
= pci_bus_num(vtd_as
->bus
);
2827 * The replay can be triggered by either a invalidation or a newly
2828 * created entry. No matter what, we release existing mappings
2829 * (it means flushing caches for UNMAP-only registers).
2831 vtd_address_space_unmap(vtd_as
, n
);
2833 if (vtd_dev_to_context_entry(s
, bus_n
, vtd_as
->devfn
, &ce
) == 0) {
2834 trace_vtd_replay_ce_valid(bus_n
, PCI_SLOT(vtd_as
->devfn
),
2835 PCI_FUNC(vtd_as
->devfn
),
2836 VTD_CONTEXT_ENTRY_DID(ce
.hi
),
2838 vtd_page_walk(&ce
, 0, ~0ULL, vtd_replay_hook
, (void *)n
, false);
2840 trace_vtd_replay_ce_invalid(bus_n
, PCI_SLOT(vtd_as
->devfn
),
2841 PCI_FUNC(vtd_as
->devfn
));
2847 /* Do the initialization. It will also be called when reset, so pay
2848 * attention when adding new initialization stuff.
2850 static void vtd_init(IntelIOMMUState
*s
)
2852 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
2854 memset(s
->csr
, 0, DMAR_REG_SIZE
);
2855 memset(s
->wmask
, 0, DMAR_REG_SIZE
);
2856 memset(s
->w1cmask
, 0, DMAR_REG_SIZE
);
2857 memset(s
->womask
, 0, DMAR_REG_SIZE
);
2859 s
->iommu_ops
.translate
= vtd_iommu_translate
;
2860 s
->iommu_ops
.notify_flag_changed
= vtd_iommu_notify_flag_changed
;
2861 s
->iommu_ops
.replay
= vtd_iommu_replay
;
2863 s
->root_extended
= false;
2864 s
->dmar_enabled
= false;
2869 s
->qi_enabled
= false;
2870 s
->iq_last_desc_type
= VTD_INV_DESC_NONE
;
2871 s
->next_frcd_reg
= 0;
2872 s
->cap
= VTD_CAP_FRO
| VTD_CAP_NFR
| VTD_CAP_ND
| VTD_CAP_MGAW
|
2873 VTD_CAP_SAGAW
| VTD_CAP_MAMV
| VTD_CAP_PSI
| VTD_CAP_SLLPS
;
2874 s
->ecap
= VTD_ECAP_QI
| VTD_ECAP_IRO
;
2876 if (x86_iommu
->intr_supported
) {
2877 s
->ecap
|= VTD_ECAP_IR
| VTD_ECAP_MHMV
;
2878 if (s
->intr_eim
== ON_OFF_AUTO_ON
) {
2879 s
->ecap
|= VTD_ECAP_EIM
;
2881 assert(s
->intr_eim
!= ON_OFF_AUTO_AUTO
);
2884 if (x86_iommu
->dt_supported
) {
2885 s
->ecap
|= VTD_ECAP_DT
;
2888 if (x86_iommu
->pt_supported
) {
2889 s
->ecap
|= VTD_ECAP_PT
;
2892 if (s
->caching_mode
) {
2893 s
->cap
|= VTD_CAP_CM
;
2896 vtd_reset_context_cache(s
);
2899 /* Define registers with default values and bit semantics */
2900 vtd_define_long(s
, DMAR_VER_REG
, 0x10UL
, 0, 0);
2901 vtd_define_quad(s
, DMAR_CAP_REG
, s
->cap
, 0, 0);
2902 vtd_define_quad(s
, DMAR_ECAP_REG
, s
->ecap
, 0, 0);
2903 vtd_define_long(s
, DMAR_GCMD_REG
, 0, 0xff800000UL
, 0);
2904 vtd_define_long_wo(s
, DMAR_GCMD_REG
, 0xff800000UL
);
2905 vtd_define_long(s
, DMAR_GSTS_REG
, 0, 0, 0);
2906 vtd_define_quad(s
, DMAR_RTADDR_REG
, 0, 0xfffffffffffff000ULL
, 0);
2907 vtd_define_quad(s
, DMAR_CCMD_REG
, 0, 0xe0000003ffffffffULL
, 0);
2908 vtd_define_quad_wo(s
, DMAR_CCMD_REG
, 0x3ffff0000ULL
);
2910 /* Advanced Fault Logging not supported */
2911 vtd_define_long(s
, DMAR_FSTS_REG
, 0, 0, 0x11UL
);
2912 vtd_define_long(s
, DMAR_FECTL_REG
, 0x80000000UL
, 0x80000000UL
, 0);
2913 vtd_define_long(s
, DMAR_FEDATA_REG
, 0, 0x0000ffffUL
, 0);
2914 vtd_define_long(s
, DMAR_FEADDR_REG
, 0, 0xfffffffcUL
, 0);
2916 /* Treated as RsvdZ when EIM in ECAP_REG is not supported
2917 * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0);
2919 vtd_define_long(s
, DMAR_FEUADDR_REG
, 0, 0, 0);
2921 /* Treated as RO for implementations that PLMR and PHMR fields reported
2922 * as Clear in the CAP_REG.
2923 * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0);
2925 vtd_define_long(s
, DMAR_PMEN_REG
, 0, 0, 0);
2927 vtd_define_quad(s
, DMAR_IQH_REG
, 0, 0, 0);
2928 vtd_define_quad(s
, DMAR_IQT_REG
, 0, 0x7fff0ULL
, 0);
2929 vtd_define_quad(s
, DMAR_IQA_REG
, 0, 0xfffffffffffff007ULL
, 0);
2930 vtd_define_long(s
, DMAR_ICS_REG
, 0, 0, 0x1UL
);
2931 vtd_define_long(s
, DMAR_IECTL_REG
, 0x80000000UL
, 0x80000000UL
, 0);
2932 vtd_define_long(s
, DMAR_IEDATA_REG
, 0, 0xffffffffUL
, 0);
2933 vtd_define_long(s
, DMAR_IEADDR_REG
, 0, 0xfffffffcUL
, 0);
2934 /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */
2935 vtd_define_long(s
, DMAR_IEUADDR_REG
, 0, 0, 0);
2937 /* IOTLB registers */
2938 vtd_define_quad(s
, DMAR_IOTLB_REG
, 0, 0Xb003ffff00000000ULL
, 0);
2939 vtd_define_quad(s
, DMAR_IVA_REG
, 0, 0xfffffffffffff07fULL
, 0);
2940 vtd_define_quad_wo(s
, DMAR_IVA_REG
, 0xfffffffffffff07fULL
);
2942 /* Fault Recording Registers, 128-bit */
2943 vtd_define_quad(s
, DMAR_FRCD_REG_0_0
, 0, 0, 0);
2944 vtd_define_quad(s
, DMAR_FRCD_REG_0_2
, 0, 0, 0x8000000000000000ULL
);
2947 * Interrupt remapping registers.
2949 vtd_define_quad(s
, DMAR_IRTA_REG
, 0, 0xfffffffffffff80fULL
, 0);
2952 /* Should not reset address_spaces when reset because devices will still use
2953 * the address space they got at first (won't ask the bus again).
2955 static void vtd_reset(DeviceState
*dev
)
2957 IntelIOMMUState
*s
= INTEL_IOMMU_DEVICE(dev
);
2962 * When device reset, throw away all mappings and external caches
2964 vtd_address_space_unmap_all(s
);
2967 static AddressSpace
*vtd_host_dma_iommu(PCIBus
*bus
, void *opaque
, int devfn
)
2969 IntelIOMMUState
*s
= opaque
;
2970 VTDAddressSpace
*vtd_as
;
2972 assert(0 <= devfn
&& devfn
< X86_IOMMU_PCI_DEVFN_MAX
);
2974 vtd_as
= vtd_find_add_as(s
, bus
, devfn
);
2978 static bool vtd_decide_config(IntelIOMMUState
*s
, Error
**errp
)
2980 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
2982 /* Currently Intel IOMMU IR only support "kernel-irqchip={off|split}" */
2983 if (x86_iommu
->intr_supported
&& kvm_irqchip_in_kernel() &&
2984 !kvm_irqchip_is_split()) {
2985 error_setg(errp
, "Intel Interrupt Remapping cannot work with "
2986 "kernel-irqchip=on, please use 'split|off'.");
2989 if (s
->intr_eim
== ON_OFF_AUTO_ON
&& !x86_iommu
->intr_supported
) {
2990 error_setg(errp
, "eim=on cannot be selected without intremap=on");
2994 if (s
->intr_eim
== ON_OFF_AUTO_AUTO
) {
2995 s
->intr_eim
= (kvm_irqchip_in_kernel() || s
->buggy_eim
)
2996 && x86_iommu
->intr_supported
?
2997 ON_OFF_AUTO_ON
: ON_OFF_AUTO_OFF
;
2999 if (s
->intr_eim
== ON_OFF_AUTO_ON
&& !s
->buggy_eim
) {
3000 if (!kvm_irqchip_in_kernel()) {
3001 error_setg(errp
, "eim=on requires accel=kvm,kernel-irqchip=split");
3004 if (!kvm_enable_x2apic()) {
3005 error_setg(errp
, "eim=on requires support on the KVM side"
3006 "(X2APIC_API, first shipped in v4.7)");
3014 static void vtd_realize(DeviceState
*dev
, Error
**errp
)
3016 MachineState
*ms
= MACHINE(qdev_get_machine());
3017 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
3018 PCMachineState
*pcms
=
3019 PC_MACHINE(object_dynamic_cast(OBJECT(ms
), TYPE_PC_MACHINE
));
3021 IntelIOMMUState
*s
= INTEL_IOMMU_DEVICE(dev
);
3022 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(dev
);
3025 error_setg(errp
, "Machine-type '%s' not supported by intel-iommu",
3031 x86_iommu
->type
= TYPE_INTEL
;
3033 if (!vtd_decide_config(s
, errp
)) {
3037 QLIST_INIT(&s
->notifiers_list
);
3038 memset(s
->vtd_as_by_bus_num
, 0, sizeof(s
->vtd_as_by_bus_num
));
3039 memory_region_init_io(&s
->csrmem
, OBJECT(s
), &vtd_mem_ops
, s
,
3040 "intel_iommu", DMAR_REG_SIZE
);
3041 sysbus_init_mmio(SYS_BUS_DEVICE(s
), &s
->csrmem
);
3042 /* No corresponding destroy */
3043 s
->iotlb
= g_hash_table_new_full(vtd_uint64_hash
, vtd_uint64_equal
,
3045 s
->vtd_as_by_busptr
= g_hash_table_new_full(vtd_uint64_hash
, vtd_uint64_equal
,
3048 sysbus_mmio_map(SYS_BUS_DEVICE(s
), 0, Q35_HOST_BRIDGE_IOMMU_ADDR
);
3049 pci_setup_iommu(bus
, vtd_host_dma_iommu
, dev
);
3050 /* Pseudo address space under root PCI bus. */
3051 pcms
->ioapic_as
= vtd_host_dma_iommu(bus
, s
, Q35_PSEUDO_DEVFN_IOAPIC
);
3054 static void vtd_class_init(ObjectClass
*klass
, void *data
)
3056 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3057 X86IOMMUClass
*x86_class
= X86_IOMMU_CLASS(klass
);
3059 dc
->reset
= vtd_reset
;
3060 dc
->vmsd
= &vtd_vmstate
;
3061 dc
->props
= vtd_properties
;
3062 dc
->hotpluggable
= false;
3063 x86_class
->realize
= vtd_realize
;
3064 x86_class
->int_remap
= vtd_int_remap
;
3065 /* Supported by the pc-q35-* machine types */
3066 dc
->user_creatable
= true;
3069 static const TypeInfo vtd_info
= {
3070 .name
= TYPE_INTEL_IOMMU_DEVICE
,
3071 .parent
= TYPE_X86_IOMMU_DEVICE
,
3072 .instance_size
= sizeof(IntelIOMMUState
),
3073 .class_init
= vtd_class_init
,
3076 static void vtd_register_types(void)
3078 type_register_static(&vtd_info
);
3081 type_init(vtd_register_types
)