2 * QEMU emulation of an Intel IOMMU (VT-d)
3 * (DMA Remapping device)
5 * Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com>
6 * Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "qemu/error-report.h"
24 #include "qemu/main-loop.h"
25 #include "qapi/error.h"
26 #include "hw/sysbus.h"
27 #include "exec/address-spaces.h"
28 #include "intel_iommu_internal.h"
29 #include "hw/pci/pci.h"
30 #include "hw/pci/pci_bus.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/i386/pc.h"
33 #include "hw/i386/apic-msidef.h"
34 #include "hw/boards.h"
35 #include "hw/i386/x86-iommu.h"
36 #include "hw/pci-host/q35.h"
37 #include "sysemu/kvm.h"
38 #include "sysemu/dma.h"
39 #include "sysemu/sysemu.h"
40 #include "hw/i386/apic_internal.h"
41 #include "kvm/kvm_i386.h"
42 #include "migration/vmstate.h"
45 /* context entry operations */
46 #define VTD_CE_GET_RID2PASID(ce) \
47 ((ce)->val[1] & VTD_SM_CONTEXT_ENTRY_RID2PASID_MASK)
48 #define VTD_CE_GET_PASID_DIR_TABLE(ce) \
49 ((ce)->val[0] & VTD_PASID_DIR_BASE_ADDR_MASK)
52 #define VTD_PE_GET_TYPE(pe) ((pe)->val[0] & VTD_SM_PASID_ENTRY_PGTT)
53 #define VTD_PE_GET_LEVEL(pe) (2 + (((pe)->val[0] >> 2) & VTD_SM_PASID_ENTRY_AW))
54 #define VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write) {\
57 if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { \
58 trace_vtd_fault_disabled(); \
60 vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); \
66 static void vtd_address_space_refresh_all(IntelIOMMUState
*s
);
67 static void vtd_address_space_unmap(VTDAddressSpace
*as
, IOMMUNotifier
*n
);
69 static void vtd_panic_require_caching_mode(void)
71 error_report("We need to set caching-mode=on for intel-iommu to enable "
72 "device assignment with IOMMU protection.");
76 static void vtd_define_quad(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
,
77 uint64_t wmask
, uint64_t w1cmask
)
79 stq_le_p(&s
->csr
[addr
], val
);
80 stq_le_p(&s
->wmask
[addr
], wmask
);
81 stq_le_p(&s
->w1cmask
[addr
], w1cmask
);
84 static void vtd_define_quad_wo(IntelIOMMUState
*s
, hwaddr addr
, uint64_t mask
)
86 stq_le_p(&s
->womask
[addr
], mask
);
89 static void vtd_define_long(IntelIOMMUState
*s
, hwaddr addr
, uint32_t val
,
90 uint32_t wmask
, uint32_t w1cmask
)
92 stl_le_p(&s
->csr
[addr
], val
);
93 stl_le_p(&s
->wmask
[addr
], wmask
);
94 stl_le_p(&s
->w1cmask
[addr
], w1cmask
);
97 static void vtd_define_long_wo(IntelIOMMUState
*s
, hwaddr addr
, uint32_t mask
)
99 stl_le_p(&s
->womask
[addr
], mask
);
102 /* "External" get/set operations */
103 static void vtd_set_quad(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
)
105 uint64_t oldval
= ldq_le_p(&s
->csr
[addr
]);
106 uint64_t wmask
= ldq_le_p(&s
->wmask
[addr
]);
107 uint64_t w1cmask
= ldq_le_p(&s
->w1cmask
[addr
]);
108 stq_le_p(&s
->csr
[addr
],
109 ((oldval
& ~wmask
) | (val
& wmask
)) & ~(w1cmask
& val
));
112 static void vtd_set_long(IntelIOMMUState
*s
, hwaddr addr
, uint32_t val
)
114 uint32_t oldval
= ldl_le_p(&s
->csr
[addr
]);
115 uint32_t wmask
= ldl_le_p(&s
->wmask
[addr
]);
116 uint32_t w1cmask
= ldl_le_p(&s
->w1cmask
[addr
]);
117 stl_le_p(&s
->csr
[addr
],
118 ((oldval
& ~wmask
) | (val
& wmask
)) & ~(w1cmask
& val
));
121 static uint64_t vtd_get_quad(IntelIOMMUState
*s
, hwaddr addr
)
123 uint64_t val
= ldq_le_p(&s
->csr
[addr
]);
124 uint64_t womask
= ldq_le_p(&s
->womask
[addr
]);
125 return val
& ~womask
;
128 static uint32_t vtd_get_long(IntelIOMMUState
*s
, hwaddr addr
)
130 uint32_t val
= ldl_le_p(&s
->csr
[addr
]);
131 uint32_t womask
= ldl_le_p(&s
->womask
[addr
]);
132 return val
& ~womask
;
135 /* "Internal" get/set operations */
136 static uint64_t vtd_get_quad_raw(IntelIOMMUState
*s
, hwaddr addr
)
138 return ldq_le_p(&s
->csr
[addr
]);
141 static uint32_t vtd_get_long_raw(IntelIOMMUState
*s
, hwaddr addr
)
143 return ldl_le_p(&s
->csr
[addr
]);
146 static void vtd_set_quad_raw(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
)
148 stq_le_p(&s
->csr
[addr
], val
);
151 static uint32_t vtd_set_clear_mask_long(IntelIOMMUState
*s
, hwaddr addr
,
152 uint32_t clear
, uint32_t mask
)
154 uint32_t new_val
= (ldl_le_p(&s
->csr
[addr
]) & ~clear
) | mask
;
155 stl_le_p(&s
->csr
[addr
], new_val
);
159 static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState
*s
, hwaddr addr
,
160 uint64_t clear
, uint64_t mask
)
162 uint64_t new_val
= (ldq_le_p(&s
->csr
[addr
]) & ~clear
) | mask
;
163 stq_le_p(&s
->csr
[addr
], new_val
);
167 static inline void vtd_iommu_lock(IntelIOMMUState
*s
)
169 qemu_mutex_lock(&s
->iommu_lock
);
172 static inline void vtd_iommu_unlock(IntelIOMMUState
*s
)
174 qemu_mutex_unlock(&s
->iommu_lock
);
177 static void vtd_update_scalable_state(IntelIOMMUState
*s
)
179 uint64_t val
= vtd_get_quad_raw(s
, DMAR_RTADDR_REG
);
181 if (s
->scalable_mode
) {
182 s
->root_scalable
= val
& VTD_RTADDR_SMT
;
186 /* Whether the address space needs to notify new mappings */
187 static inline gboolean
vtd_as_has_map_notifier(VTDAddressSpace
*as
)
189 return as
->notifier_flags
& IOMMU_NOTIFIER_MAP
;
192 /* GHashTable functions */
193 static gboolean
vtd_uint64_equal(gconstpointer v1
, gconstpointer v2
)
195 return *((const uint64_t *)v1
) == *((const uint64_t *)v2
);
198 static guint
vtd_uint64_hash(gconstpointer v
)
200 return (guint
)*(const uint64_t *)v
;
203 static gboolean
vtd_hash_remove_by_domain(gpointer key
, gpointer value
,
206 VTDIOTLBEntry
*entry
= (VTDIOTLBEntry
*)value
;
207 uint16_t domain_id
= *(uint16_t *)user_data
;
208 return entry
->domain_id
== domain_id
;
211 /* The shift of an addr for a certain level of paging structure */
212 static inline uint32_t vtd_slpt_level_shift(uint32_t level
)
215 return VTD_PAGE_SHIFT_4K
+ (level
- 1) * VTD_SL_LEVEL_BITS
;
218 static inline uint64_t vtd_slpt_level_page_mask(uint32_t level
)
220 return ~((1ULL << vtd_slpt_level_shift(level
)) - 1);
223 static gboolean
vtd_hash_remove_by_page(gpointer key
, gpointer value
,
226 VTDIOTLBEntry
*entry
= (VTDIOTLBEntry
*)value
;
227 VTDIOTLBPageInvInfo
*info
= (VTDIOTLBPageInvInfo
*)user_data
;
228 uint64_t gfn
= (info
->addr
>> VTD_PAGE_SHIFT_4K
) & info
->mask
;
229 uint64_t gfn_tlb
= (info
->addr
& entry
->mask
) >> VTD_PAGE_SHIFT_4K
;
230 return (entry
->domain_id
== info
->domain_id
) &&
231 (((entry
->gfn
& info
->mask
) == gfn
) ||
232 (entry
->gfn
== gfn_tlb
));
235 /* Reset all the gen of VTDAddressSpace to zero and set the gen of
236 * IntelIOMMUState to 1. Must be called with IOMMU lock held.
238 static void vtd_reset_context_cache_locked(IntelIOMMUState
*s
)
240 VTDAddressSpace
*vtd_as
;
242 GHashTableIter bus_it
;
245 trace_vtd_context_cache_reset();
247 g_hash_table_iter_init(&bus_it
, s
->vtd_as_by_busptr
);
249 while (g_hash_table_iter_next (&bus_it
, NULL
, (void**)&vtd_bus
)) {
250 for (devfn_it
= 0; devfn_it
< PCI_DEVFN_MAX
; ++devfn_it
) {
251 vtd_as
= vtd_bus
->dev_as
[devfn_it
];
255 vtd_as
->context_cache_entry
.context_cache_gen
= 0;
258 s
->context_cache_gen
= 1;
261 /* Must be called with IOMMU lock held. */
262 static void vtd_reset_iotlb_locked(IntelIOMMUState
*s
)
265 g_hash_table_remove_all(s
->iotlb
);
268 static void vtd_reset_iotlb(IntelIOMMUState
*s
)
271 vtd_reset_iotlb_locked(s
);
275 static void vtd_reset_caches(IntelIOMMUState
*s
)
278 vtd_reset_iotlb_locked(s
);
279 vtd_reset_context_cache_locked(s
);
283 static uint64_t vtd_get_iotlb_key(uint64_t gfn
, uint16_t source_id
,
286 return gfn
| ((uint64_t)(source_id
) << VTD_IOTLB_SID_SHIFT
) |
287 ((uint64_t)(level
) << VTD_IOTLB_LVL_SHIFT
);
290 static uint64_t vtd_get_iotlb_gfn(hwaddr addr
, uint32_t level
)
292 return (addr
& vtd_slpt_level_page_mask(level
)) >> VTD_PAGE_SHIFT_4K
;
295 /* Must be called with IOMMU lock held */
296 static VTDIOTLBEntry
*vtd_lookup_iotlb(IntelIOMMUState
*s
, uint16_t source_id
,
299 VTDIOTLBEntry
*entry
;
303 for (level
= VTD_SL_PT_LEVEL
; level
< VTD_SL_PML4_LEVEL
; level
++) {
304 key
= vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr
, level
),
306 entry
= g_hash_table_lookup(s
->iotlb
, &key
);
316 /* Must be with IOMMU lock held */
317 static void vtd_update_iotlb(IntelIOMMUState
*s
, uint16_t source_id
,
318 uint16_t domain_id
, hwaddr addr
, uint64_t slpte
,
319 uint8_t access_flags
, uint32_t level
)
321 VTDIOTLBEntry
*entry
= g_malloc(sizeof(*entry
));
322 uint64_t *key
= g_malloc(sizeof(*key
));
323 uint64_t gfn
= vtd_get_iotlb_gfn(addr
, level
);
325 trace_vtd_iotlb_page_update(source_id
, addr
, slpte
, domain_id
);
326 if (g_hash_table_size(s
->iotlb
) >= VTD_IOTLB_MAX_SIZE
) {
327 trace_vtd_iotlb_reset("iotlb exceeds size limit");
328 vtd_reset_iotlb_locked(s
);
332 entry
->domain_id
= domain_id
;
333 entry
->slpte
= slpte
;
334 entry
->access_flags
= access_flags
;
335 entry
->mask
= vtd_slpt_level_page_mask(level
);
336 *key
= vtd_get_iotlb_key(gfn
, source_id
, level
);
337 g_hash_table_replace(s
->iotlb
, key
, entry
);
340 /* Given the reg addr of both the message data and address, generate an
343 static void vtd_generate_interrupt(IntelIOMMUState
*s
, hwaddr mesg_addr_reg
,
344 hwaddr mesg_data_reg
)
348 assert(mesg_data_reg
< DMAR_REG_SIZE
);
349 assert(mesg_addr_reg
< DMAR_REG_SIZE
);
351 msi
.address
= vtd_get_long_raw(s
, mesg_addr_reg
);
352 msi
.data
= vtd_get_long_raw(s
, mesg_data_reg
);
354 trace_vtd_irq_generate(msi
.address
, msi
.data
);
356 apic_get_class()->send_msi(&msi
);
359 /* Generate a fault event to software via MSI if conditions are met.
360 * Notice that the value of FSTS_REG being passed to it should be the one
363 static void vtd_generate_fault_event(IntelIOMMUState
*s
, uint32_t pre_fsts
)
365 if (pre_fsts
& VTD_FSTS_PPF
|| pre_fsts
& VTD_FSTS_PFO
||
366 pre_fsts
& VTD_FSTS_IQE
) {
367 error_report_once("There are previous interrupt conditions "
368 "to be serviced by software, fault event "
372 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, 0, VTD_FECTL_IP
);
373 if (vtd_get_long_raw(s
, DMAR_FECTL_REG
) & VTD_FECTL_IM
) {
374 error_report_once("Interrupt Mask set, irq is not generated");
376 vtd_generate_interrupt(s
, DMAR_FEADDR_REG
, DMAR_FEDATA_REG
);
377 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
381 /* Check if the Fault (F) field of the Fault Recording Register referenced by
384 static bool vtd_is_frcd_set(IntelIOMMUState
*s
, uint16_t index
)
386 /* Each reg is 128-bit */
387 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
388 addr
+= 8; /* Access the high 64-bit half */
390 assert(index
< DMAR_FRCD_REG_NR
);
392 return vtd_get_quad_raw(s
, addr
) & VTD_FRCD_F
;
395 /* Update the PPF field of Fault Status Register.
396 * Should be called whenever change the F field of any fault recording
399 static void vtd_update_fsts_ppf(IntelIOMMUState
*s
)
402 uint32_t ppf_mask
= 0;
404 for (i
= 0; i
< DMAR_FRCD_REG_NR
; i
++) {
405 if (vtd_is_frcd_set(s
, i
)) {
406 ppf_mask
= VTD_FSTS_PPF
;
410 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, VTD_FSTS_PPF
, ppf_mask
);
411 trace_vtd_fsts_ppf(!!ppf_mask
);
414 static void vtd_set_frcd_and_update_ppf(IntelIOMMUState
*s
, uint16_t index
)
416 /* Each reg is 128-bit */
417 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
418 addr
+= 8; /* Access the high 64-bit half */
420 assert(index
< DMAR_FRCD_REG_NR
);
422 vtd_set_clear_mask_quad(s
, addr
, 0, VTD_FRCD_F
);
423 vtd_update_fsts_ppf(s
);
426 /* Must not update F field now, should be done later */
427 static void vtd_record_frcd(IntelIOMMUState
*s
, uint16_t index
,
428 uint16_t source_id
, hwaddr addr
,
429 VTDFaultReason fault
, bool is_write
)
432 hwaddr frcd_reg_addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
434 assert(index
< DMAR_FRCD_REG_NR
);
436 lo
= VTD_FRCD_FI(addr
);
437 hi
= VTD_FRCD_SID(source_id
) | VTD_FRCD_FR(fault
);
441 vtd_set_quad_raw(s
, frcd_reg_addr
, lo
);
442 vtd_set_quad_raw(s
, frcd_reg_addr
+ 8, hi
);
444 trace_vtd_frr_new(index
, hi
, lo
);
447 /* Try to collapse multiple pending faults from the same requester */
448 static bool vtd_try_collapse_fault(IntelIOMMUState
*s
, uint16_t source_id
)
452 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ 8; /* The high 64-bit half */
454 for (i
= 0; i
< DMAR_FRCD_REG_NR
; i
++) {
455 frcd_reg
= vtd_get_quad_raw(s
, addr
);
456 if ((frcd_reg
& VTD_FRCD_F
) &&
457 ((frcd_reg
& VTD_FRCD_SID_MASK
) == source_id
)) {
460 addr
+= 16; /* 128-bit for each */
465 /* Log and report an DMAR (address translation) fault to software */
466 static void vtd_report_dmar_fault(IntelIOMMUState
*s
, uint16_t source_id
,
467 hwaddr addr
, VTDFaultReason fault
,
470 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
472 assert(fault
< VTD_FR_MAX
);
474 if (fault
== VTD_FR_RESERVED_ERR
) {
475 /* This is not a normal fault reason case. Drop it. */
479 trace_vtd_dmar_fault(source_id
, fault
, addr
, is_write
);
481 if (fsts_reg
& VTD_FSTS_PFO
) {
482 error_report_once("New fault is not recorded due to "
483 "Primary Fault Overflow");
487 if (vtd_try_collapse_fault(s
, source_id
)) {
488 error_report_once("New fault is not recorded due to "
489 "compression of faults");
493 if (vtd_is_frcd_set(s
, s
->next_frcd_reg
)) {
494 error_report_once("Next Fault Recording Reg is used, "
495 "new fault is not recorded, set PFO field");
496 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, 0, VTD_FSTS_PFO
);
500 vtd_record_frcd(s
, s
->next_frcd_reg
, source_id
, addr
, fault
, is_write
);
502 if (fsts_reg
& VTD_FSTS_PPF
) {
503 error_report_once("There are pending faults already, "
504 "fault event is not generated");
505 vtd_set_frcd_and_update_ppf(s
, s
->next_frcd_reg
);
507 if (s
->next_frcd_reg
== DMAR_FRCD_REG_NR
) {
508 s
->next_frcd_reg
= 0;
511 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, VTD_FSTS_FRI_MASK
,
512 VTD_FSTS_FRI(s
->next_frcd_reg
));
513 vtd_set_frcd_and_update_ppf(s
, s
->next_frcd_reg
); /* Will set PPF */
515 if (s
->next_frcd_reg
== DMAR_FRCD_REG_NR
) {
516 s
->next_frcd_reg
= 0;
518 /* This case actually cause the PPF to be Set.
519 * So generate fault event (interrupt).
521 vtd_generate_fault_event(s
, fsts_reg
);
525 /* Handle Invalidation Queue Errors of queued invalidation interface error
528 static void vtd_handle_inv_queue_error(IntelIOMMUState
*s
)
530 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
532 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, 0, VTD_FSTS_IQE
);
533 vtd_generate_fault_event(s
, fsts_reg
);
536 /* Set the IWC field and try to generate an invalidation completion interrupt */
537 static void vtd_generate_completion_event(IntelIOMMUState
*s
)
539 if (vtd_get_long_raw(s
, DMAR_ICS_REG
) & VTD_ICS_IWC
) {
540 trace_vtd_inv_desc_wait_irq("One pending, skip current");
543 vtd_set_clear_mask_long(s
, DMAR_ICS_REG
, 0, VTD_ICS_IWC
);
544 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, 0, VTD_IECTL_IP
);
545 if (vtd_get_long_raw(s
, DMAR_IECTL_REG
) & VTD_IECTL_IM
) {
546 trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, "
547 "new event not generated");
550 /* Generate the interrupt event */
551 trace_vtd_inv_desc_wait_irq("Generating complete event");
552 vtd_generate_interrupt(s
, DMAR_IEADDR_REG
, DMAR_IEDATA_REG
);
553 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
557 static inline bool vtd_root_entry_present(IntelIOMMUState
*s
,
561 if (s
->root_scalable
&& devfn
> UINT8_MAX
/ 2) {
562 return re
->hi
& VTD_ROOT_ENTRY_P
;
565 return re
->lo
& VTD_ROOT_ENTRY_P
;
568 static int vtd_get_root_entry(IntelIOMMUState
*s
, uint8_t index
,
573 addr
= s
->root
+ index
* sizeof(*re
);
574 if (dma_memory_read(&address_space_memory
, addr
, re
, sizeof(*re
))) {
576 return -VTD_FR_ROOT_TABLE_INV
;
578 re
->lo
= le64_to_cpu(re
->lo
);
579 re
->hi
= le64_to_cpu(re
->hi
);
583 static inline bool vtd_ce_present(VTDContextEntry
*context
)
585 return context
->lo
& VTD_CONTEXT_ENTRY_P
;
588 static int vtd_get_context_entry_from_root(IntelIOMMUState
*s
,
593 dma_addr_t addr
, ce_size
;
595 /* we have checked that root entry is present */
596 ce_size
= s
->root_scalable
? VTD_CTX_ENTRY_SCALABLE_SIZE
:
597 VTD_CTX_ENTRY_LEGACY_SIZE
;
599 if (s
->root_scalable
&& index
> UINT8_MAX
/ 2) {
600 index
= index
& (~VTD_DEVFN_CHECK_MASK
);
601 addr
= re
->hi
& VTD_ROOT_ENTRY_CTP
;
603 addr
= re
->lo
& VTD_ROOT_ENTRY_CTP
;
606 addr
= addr
+ index
* ce_size
;
607 if (dma_memory_read(&address_space_memory
, addr
, ce
, ce_size
)) {
608 return -VTD_FR_CONTEXT_TABLE_INV
;
611 ce
->lo
= le64_to_cpu(ce
->lo
);
612 ce
->hi
= le64_to_cpu(ce
->hi
);
613 if (ce_size
== VTD_CTX_ENTRY_SCALABLE_SIZE
) {
614 ce
->val
[2] = le64_to_cpu(ce
->val
[2]);
615 ce
->val
[3] = le64_to_cpu(ce
->val
[3]);
620 static inline dma_addr_t
vtd_ce_get_slpt_base(VTDContextEntry
*ce
)
622 return ce
->lo
& VTD_CONTEXT_ENTRY_SLPTPTR
;
625 static inline uint64_t vtd_get_slpte_addr(uint64_t slpte
, uint8_t aw
)
627 return slpte
& VTD_SL_PT_BASE_ADDR_MASK(aw
);
630 /* Whether the pte indicates the address of the page frame */
631 static inline bool vtd_is_last_slpte(uint64_t slpte
, uint32_t level
)
633 return level
== VTD_SL_PT_LEVEL
|| (slpte
& VTD_SL_PT_PAGE_SIZE_MASK
);
636 /* Get the content of a spte located in @base_addr[@index] */
637 static uint64_t vtd_get_slpte(dma_addr_t base_addr
, uint32_t index
)
641 assert(index
< VTD_SL_PT_ENTRY_NR
);
643 if (dma_memory_read(&address_space_memory
,
644 base_addr
+ index
* sizeof(slpte
), &slpte
,
646 slpte
= (uint64_t)-1;
649 slpte
= le64_to_cpu(slpte
);
653 /* Given an iova and the level of paging structure, return the offset
656 static inline uint32_t vtd_iova_level_offset(uint64_t iova
, uint32_t level
)
658 return (iova
>> vtd_slpt_level_shift(level
)) &
659 ((1ULL << VTD_SL_LEVEL_BITS
) - 1);
662 /* Check Capability Register to see if the @level of page-table is supported */
663 static inline bool vtd_is_level_supported(IntelIOMMUState
*s
, uint32_t level
)
665 return VTD_CAP_SAGAW_MASK
& s
->cap
&
666 (1ULL << (level
- 2 + VTD_CAP_SAGAW_SHIFT
));
669 /* Return true if check passed, otherwise false */
670 static inline bool vtd_pe_type_check(X86IOMMUState
*x86_iommu
,
673 switch (VTD_PE_GET_TYPE(pe
)) {
674 case VTD_SM_PASID_ENTRY_FLT
:
675 case VTD_SM_PASID_ENTRY_SLT
:
676 case VTD_SM_PASID_ENTRY_NESTED
:
678 case VTD_SM_PASID_ENTRY_PT
:
679 if (!x86_iommu
->pt_supported
) {
690 static inline bool vtd_pdire_present(VTDPASIDDirEntry
*pdire
)
692 return pdire
->val
& 1;
696 * Caller of this function should check present bit if wants
697 * to use pdir entry for futher usage except for fpd bit check.
699 static int vtd_get_pdire_from_pdir_table(dma_addr_t pasid_dir_base
,
701 VTDPASIDDirEntry
*pdire
)
704 dma_addr_t addr
, entry_size
;
706 index
= VTD_PASID_DIR_INDEX(pasid
);
707 entry_size
= VTD_PASID_DIR_ENTRY_SIZE
;
708 addr
= pasid_dir_base
+ index
* entry_size
;
709 if (dma_memory_read(&address_space_memory
, addr
, pdire
, entry_size
)) {
710 return -VTD_FR_PASID_TABLE_INV
;
716 static inline bool vtd_pe_present(VTDPASIDEntry
*pe
)
718 return pe
->val
[0] & VTD_PASID_ENTRY_P
;
721 static int vtd_get_pe_in_pasid_leaf_table(IntelIOMMUState
*s
,
727 dma_addr_t entry_size
;
728 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
730 index
= VTD_PASID_TABLE_INDEX(pasid
);
731 entry_size
= VTD_PASID_ENTRY_SIZE
;
732 addr
= addr
+ index
* entry_size
;
733 if (dma_memory_read(&address_space_memory
, addr
, pe
, entry_size
)) {
734 return -VTD_FR_PASID_TABLE_INV
;
737 /* Do translation type check */
738 if (!vtd_pe_type_check(x86_iommu
, pe
)) {
739 return -VTD_FR_PASID_TABLE_INV
;
742 if (!vtd_is_level_supported(s
, VTD_PE_GET_LEVEL(pe
))) {
743 return -VTD_FR_PASID_TABLE_INV
;
750 * Caller of this function should check present bit if wants
751 * to use pasid entry for futher usage except for fpd bit check.
753 static int vtd_get_pe_from_pdire(IntelIOMMUState
*s
,
755 VTDPASIDDirEntry
*pdire
,
758 dma_addr_t addr
= pdire
->val
& VTD_PASID_TABLE_BASE_ADDR_MASK
;
760 return vtd_get_pe_in_pasid_leaf_table(s
, pasid
, addr
, pe
);
764 * This function gets a pasid entry from a specified pasid
765 * table (includes dir and leaf table) with a specified pasid.
766 * Sanity check should be done to ensure return a present
767 * pasid entry to caller.
769 static int vtd_get_pe_from_pasid_table(IntelIOMMUState
*s
,
770 dma_addr_t pasid_dir_base
,
775 VTDPASIDDirEntry pdire
;
777 ret
= vtd_get_pdire_from_pdir_table(pasid_dir_base
,
783 if (!vtd_pdire_present(&pdire
)) {
784 return -VTD_FR_PASID_TABLE_INV
;
787 ret
= vtd_get_pe_from_pdire(s
, pasid
, &pdire
, pe
);
792 if (!vtd_pe_present(pe
)) {
793 return -VTD_FR_PASID_TABLE_INV
;
799 static int vtd_ce_get_rid2pasid_entry(IntelIOMMUState
*s
,
804 dma_addr_t pasid_dir_base
;
807 pasid
= VTD_CE_GET_RID2PASID(ce
);
808 pasid_dir_base
= VTD_CE_GET_PASID_DIR_TABLE(ce
);
809 ret
= vtd_get_pe_from_pasid_table(s
, pasid_dir_base
, pasid
, pe
);
814 static int vtd_ce_get_pasid_fpd(IntelIOMMUState
*s
,
820 dma_addr_t pasid_dir_base
;
821 VTDPASIDDirEntry pdire
;
824 pasid
= VTD_CE_GET_RID2PASID(ce
);
825 pasid_dir_base
= VTD_CE_GET_PASID_DIR_TABLE(ce
);
828 * No present bit check since fpd is meaningful even
829 * if the present bit is clear.
831 ret
= vtd_get_pdire_from_pdir_table(pasid_dir_base
, pasid
, &pdire
);
836 if (pdire
.val
& VTD_PASID_DIR_FPD
) {
841 if (!vtd_pdire_present(&pdire
)) {
842 return -VTD_FR_PASID_TABLE_INV
;
846 * No present bit check since fpd is meaningful even
847 * if the present bit is clear.
849 ret
= vtd_get_pe_from_pdire(s
, pasid
, &pdire
, &pe
);
854 if (pe
.val
[0] & VTD_PASID_ENTRY_FPD
) {
861 /* Get the page-table level that hardware should use for the second-level
862 * page-table walk from the Address Width field of context-entry.
864 static inline uint32_t vtd_ce_get_level(VTDContextEntry
*ce
)
866 return 2 + (ce
->hi
& VTD_CONTEXT_ENTRY_AW
);
869 static uint32_t vtd_get_iova_level(IntelIOMMUState
*s
,
874 if (s
->root_scalable
) {
875 vtd_ce_get_rid2pasid_entry(s
, ce
, &pe
);
876 return VTD_PE_GET_LEVEL(&pe
);
879 return vtd_ce_get_level(ce
);
882 static inline uint32_t vtd_ce_get_agaw(VTDContextEntry
*ce
)
884 return 30 + (ce
->hi
& VTD_CONTEXT_ENTRY_AW
) * 9;
887 static uint32_t vtd_get_iova_agaw(IntelIOMMUState
*s
,
892 if (s
->root_scalable
) {
893 vtd_ce_get_rid2pasid_entry(s
, ce
, &pe
);
894 return 30 + ((pe
.val
[0] >> 2) & VTD_SM_PASID_ENTRY_AW
) * 9;
897 return vtd_ce_get_agaw(ce
);
900 static inline uint32_t vtd_ce_get_type(VTDContextEntry
*ce
)
902 return ce
->lo
& VTD_CONTEXT_ENTRY_TT
;
905 /* Only for Legacy Mode. Return true if check passed, otherwise false */
906 static inline bool vtd_ce_type_check(X86IOMMUState
*x86_iommu
,
909 switch (vtd_ce_get_type(ce
)) {
910 case VTD_CONTEXT_TT_MULTI_LEVEL
:
911 /* Always supported */
913 case VTD_CONTEXT_TT_DEV_IOTLB
:
914 if (!x86_iommu
->dt_supported
) {
915 error_report_once("%s: DT specified but not supported", __func__
);
919 case VTD_CONTEXT_TT_PASS_THROUGH
:
920 if (!x86_iommu
->pt_supported
) {
921 error_report_once("%s: PT specified but not supported", __func__
);
927 error_report_once("%s: unknown ce type: %"PRIu32
, __func__
,
928 vtd_ce_get_type(ce
));
934 static inline uint64_t vtd_iova_limit(IntelIOMMUState
*s
,
935 VTDContextEntry
*ce
, uint8_t aw
)
937 uint32_t ce_agaw
= vtd_get_iova_agaw(s
, ce
);
938 return 1ULL << MIN(ce_agaw
, aw
);
941 /* Return true if IOVA passes range check, otherwise false. */
942 static inline bool vtd_iova_range_check(IntelIOMMUState
*s
,
943 uint64_t iova
, VTDContextEntry
*ce
,
947 * Check if @iova is above 2^X-1, where X is the minimum of MGAW
948 * in CAP_REG and AW in context-entry.
950 return !(iova
& ~(vtd_iova_limit(s
, ce
, aw
) - 1));
953 static dma_addr_t
vtd_get_iova_pgtbl_base(IntelIOMMUState
*s
,
958 if (s
->root_scalable
) {
959 vtd_ce_get_rid2pasid_entry(s
, ce
, &pe
);
960 return pe
.val
[0] & VTD_SM_PASID_ENTRY_SLPTPTR
;
963 return vtd_ce_get_slpt_base(ce
);
967 * Rsvd field masks for spte:
968 * vtd_spte_rsvd 4k pages
969 * vtd_spte_rsvd_large large pages
971 static uint64_t vtd_spte_rsvd
[5];
972 static uint64_t vtd_spte_rsvd_large
[5];
974 static bool vtd_slpte_nonzero_rsvd(uint64_t slpte
, uint32_t level
)
976 uint64_t rsvd_mask
= vtd_spte_rsvd
[level
];
978 if ((level
== VTD_SL_PD_LEVEL
|| level
== VTD_SL_PDP_LEVEL
) &&
979 (slpte
& VTD_SL_PT_PAGE_SIZE_MASK
)) {
981 rsvd_mask
= vtd_spte_rsvd_large
[level
];
984 return slpte
& rsvd_mask
;
987 /* Find the VTD address space associated with a given bus number */
988 static VTDBus
*vtd_find_as_from_bus_num(IntelIOMMUState
*s
, uint8_t bus_num
)
990 VTDBus
*vtd_bus
= s
->vtd_as_by_bus_num
[bus_num
];
998 * Iterate over the registered buses to find the one which
999 * currently holds this bus number and update the bus_num
1002 g_hash_table_iter_init(&iter
, s
->vtd_as_by_busptr
);
1003 while (g_hash_table_iter_next(&iter
, NULL
, (void **)&vtd_bus
)) {
1004 if (pci_bus_num(vtd_bus
->bus
) == bus_num
) {
1005 s
->vtd_as_by_bus_num
[bus_num
] = vtd_bus
;
1013 /* Given the @iova, get relevant @slptep. @slpte_level will be the last level
1014 * of the translation, can be used for deciding the size of large page.
1016 static int vtd_iova_to_slpte(IntelIOMMUState
*s
, VTDContextEntry
*ce
,
1017 uint64_t iova
, bool is_write
,
1018 uint64_t *slptep
, uint32_t *slpte_level
,
1019 bool *reads
, bool *writes
, uint8_t aw_bits
)
1021 dma_addr_t addr
= vtd_get_iova_pgtbl_base(s
, ce
);
1022 uint32_t level
= vtd_get_iova_level(s
, ce
);
1025 uint64_t access_right_check
;
1027 if (!vtd_iova_range_check(s
, iova
, ce
, aw_bits
)) {
1028 error_report_once("%s: detected IOVA overflow (iova=0x%" PRIx64
")",
1030 return -VTD_FR_ADDR_BEYOND_MGAW
;
1033 /* FIXME: what is the Atomics request here? */
1034 access_right_check
= is_write
? VTD_SL_W
: VTD_SL_R
;
1037 offset
= vtd_iova_level_offset(iova
, level
);
1038 slpte
= vtd_get_slpte(addr
, offset
);
1040 if (slpte
== (uint64_t)-1) {
1041 error_report_once("%s: detected read error on DMAR slpte "
1042 "(iova=0x%" PRIx64
")", __func__
, iova
);
1043 if (level
== vtd_get_iova_level(s
, ce
)) {
1044 /* Invalid programming of context-entry */
1045 return -VTD_FR_CONTEXT_ENTRY_INV
;
1047 return -VTD_FR_PAGING_ENTRY_INV
;
1050 *reads
= (*reads
) && (slpte
& VTD_SL_R
);
1051 *writes
= (*writes
) && (slpte
& VTD_SL_W
);
1052 if (!(slpte
& access_right_check
)) {
1053 error_report_once("%s: detected slpte permission error "
1054 "(iova=0x%" PRIx64
", level=0x%" PRIx32
", "
1055 "slpte=0x%" PRIx64
", write=%d)", __func__
,
1056 iova
, level
, slpte
, is_write
);
1057 return is_write
? -VTD_FR_WRITE
: -VTD_FR_READ
;
1059 if (vtd_slpte_nonzero_rsvd(slpte
, level
)) {
1060 error_report_once("%s: detected splte reserve non-zero "
1061 "iova=0x%" PRIx64
", level=0x%" PRIx32
1062 "slpte=0x%" PRIx64
")", __func__
, iova
,
1064 return -VTD_FR_PAGING_ENTRY_RSVD
;
1067 if (vtd_is_last_slpte(slpte
, level
)) {
1069 *slpte_level
= level
;
1072 addr
= vtd_get_slpte_addr(slpte
, aw_bits
);
1077 typedef int (*vtd_page_walk_hook
)(IOMMUTLBEvent
*event
, void *private);
1080 * Constant information used during page walking
1082 * @hook_fn: hook func to be called when detected page
1083 * @private: private data to be passed into hook func
1084 * @notify_unmap: whether we should notify invalid entries
1085 * @as: VT-d address space of the device
1086 * @aw: maximum address width
1087 * @domain: domain ID of the page walk
1090 VTDAddressSpace
*as
;
1091 vtd_page_walk_hook hook_fn
;
1096 } vtd_page_walk_info
;
1098 static int vtd_page_walk_one(IOMMUTLBEvent
*event
, vtd_page_walk_info
*info
)
1100 VTDAddressSpace
*as
= info
->as
;
1101 vtd_page_walk_hook hook_fn
= info
->hook_fn
;
1102 void *private = info
->private;
1103 IOMMUTLBEntry
*entry
= &event
->entry
;
1105 .iova
= entry
->iova
,
1106 .size
= entry
->addr_mask
,
1107 .translated_addr
= entry
->translated_addr
,
1108 .perm
= entry
->perm
,
1110 DMAMap
*mapped
= iova_tree_find(as
->iova_tree
, &target
);
1112 if (event
->type
== IOMMU_NOTIFIER_UNMAP
&& !info
->notify_unmap
) {
1113 trace_vtd_page_walk_one_skip_unmap(entry
->iova
, entry
->addr_mask
);
1119 /* Update local IOVA mapped ranges */
1120 if (event
->type
== IOMMU_NOTIFIER_MAP
) {
1122 /* If it's exactly the same translation, skip */
1123 if (!memcmp(mapped
, &target
, sizeof(target
))) {
1124 trace_vtd_page_walk_one_skip_map(entry
->iova
, entry
->addr_mask
,
1125 entry
->translated_addr
);
1129 * Translation changed. Normally this should not
1130 * happen, but it can happen when with buggy guest
1131 * OSes. Note that there will be a small window that
1132 * we don't have map at all. But that's the best
1133 * effort we can do. The ideal way to emulate this is
1134 * atomically modify the PTE to follow what has
1135 * changed, but we can't. One example is that vfio
1136 * driver only has VFIO_IOMMU_[UN]MAP_DMA but no
1137 * interface to modify a mapping (meanwhile it seems
1138 * meaningless to even provide one). Anyway, let's
1139 * mark this as a TODO in case one day we'll have
1140 * a better solution.
1142 IOMMUAccessFlags cache_perm
= entry
->perm
;
1145 /* Emulate an UNMAP */
1146 event
->type
= IOMMU_NOTIFIER_UNMAP
;
1147 entry
->perm
= IOMMU_NONE
;
1148 trace_vtd_page_walk_one(info
->domain_id
,
1150 entry
->translated_addr
,
1153 ret
= hook_fn(event
, private);
1157 /* Drop any existing mapping */
1158 iova_tree_remove(as
->iova_tree
, &target
);
1159 /* Recover the correct type */
1160 event
->type
= IOMMU_NOTIFIER_MAP
;
1161 entry
->perm
= cache_perm
;
1164 iova_tree_insert(as
->iova_tree
, &target
);
1167 /* Skip since we didn't map this range at all */
1168 trace_vtd_page_walk_one_skip_unmap(entry
->iova
, entry
->addr_mask
);
1171 iova_tree_remove(as
->iova_tree
, &target
);
1174 trace_vtd_page_walk_one(info
->domain_id
, entry
->iova
,
1175 entry
->translated_addr
, entry
->addr_mask
,
1177 return hook_fn(event
, private);
1181 * vtd_page_walk_level - walk over specific level for IOVA range
1183 * @addr: base GPA addr to start the walk
1184 * @start: IOVA range start address
1185 * @end: IOVA range end address (start <= addr < end)
1186 * @read: whether parent level has read permission
1187 * @write: whether parent level has write permission
1188 * @info: constant information for the page walk
1190 static int vtd_page_walk_level(dma_addr_t addr
, uint64_t start
,
1191 uint64_t end
, uint32_t level
, bool read
,
1192 bool write
, vtd_page_walk_info
*info
)
1194 bool read_cur
, write_cur
, entry_valid
;
1197 uint64_t subpage_size
, subpage_mask
;
1198 IOMMUTLBEvent event
;
1199 uint64_t iova
= start
;
1203 trace_vtd_page_walk_level(addr
, level
, start
, end
);
1205 subpage_size
= 1ULL << vtd_slpt_level_shift(level
);
1206 subpage_mask
= vtd_slpt_level_page_mask(level
);
1208 while (iova
< end
) {
1209 iova_next
= (iova
& subpage_mask
) + subpage_size
;
1211 offset
= vtd_iova_level_offset(iova
, level
);
1212 slpte
= vtd_get_slpte(addr
, offset
);
1214 if (slpte
== (uint64_t)-1) {
1215 trace_vtd_page_walk_skip_read(iova
, iova_next
);
1219 if (vtd_slpte_nonzero_rsvd(slpte
, level
)) {
1220 trace_vtd_page_walk_skip_reserve(iova
, iova_next
);
1224 /* Permissions are stacked with parents' */
1225 read_cur
= read
&& (slpte
& VTD_SL_R
);
1226 write_cur
= write
&& (slpte
& VTD_SL_W
);
1229 * As long as we have either read/write permission, this is a
1230 * valid entry. The rule works for both page entries and page
1233 entry_valid
= read_cur
| write_cur
;
1235 if (!vtd_is_last_slpte(slpte
, level
) && entry_valid
) {
1237 * This is a valid PDE (or even bigger than PDE). We need
1238 * to walk one further level.
1240 ret
= vtd_page_walk_level(vtd_get_slpte_addr(slpte
, info
->aw
),
1241 iova
, MIN(iova_next
, end
), level
- 1,
1242 read_cur
, write_cur
, info
);
1245 * This means we are either:
1247 * (1) the real page entry (either 4K page, or huge page)
1248 * (2) the whole range is invalid
1250 * In either case, we send an IOTLB notification down.
1252 event
.entry
.target_as
= &address_space_memory
;
1253 event
.entry
.iova
= iova
& subpage_mask
;
1254 event
.entry
.perm
= IOMMU_ACCESS_FLAG(read_cur
, write_cur
);
1255 event
.entry
.addr_mask
= ~subpage_mask
;
1256 /* NOTE: this is only meaningful if entry_valid == true */
1257 event
.entry
.translated_addr
= vtd_get_slpte_addr(slpte
, info
->aw
);
1258 event
.type
= event
.entry
.perm
? IOMMU_NOTIFIER_MAP
:
1259 IOMMU_NOTIFIER_UNMAP
;
1260 ret
= vtd_page_walk_one(&event
, info
);
1275 * vtd_page_walk - walk specific IOVA range, and call the hook
1277 * @s: intel iommu state
1278 * @ce: context entry to walk upon
1279 * @start: IOVA address to start the walk
1280 * @end: IOVA range end address (start <= addr < end)
1281 * @info: page walking information struct
1283 static int vtd_page_walk(IntelIOMMUState
*s
, VTDContextEntry
*ce
,
1284 uint64_t start
, uint64_t end
,
1285 vtd_page_walk_info
*info
)
1287 dma_addr_t addr
= vtd_get_iova_pgtbl_base(s
, ce
);
1288 uint32_t level
= vtd_get_iova_level(s
, ce
);
1290 if (!vtd_iova_range_check(s
, start
, ce
, info
->aw
)) {
1291 return -VTD_FR_ADDR_BEYOND_MGAW
;
1294 if (!vtd_iova_range_check(s
, end
, ce
, info
->aw
)) {
1295 /* Fix end so that it reaches the maximum */
1296 end
= vtd_iova_limit(s
, ce
, info
->aw
);
1299 return vtd_page_walk_level(addr
, start
, end
, level
, true, true, info
);
1302 static int vtd_root_entry_rsvd_bits_check(IntelIOMMUState
*s
,
1305 /* Legacy Mode reserved bits check */
1306 if (!s
->root_scalable
&&
1307 (re
->hi
|| (re
->lo
& VTD_ROOT_ENTRY_RSVD(s
->aw_bits
))))
1310 /* Scalable Mode reserved bits check */
1311 if (s
->root_scalable
&&
1312 ((re
->lo
& VTD_ROOT_ENTRY_RSVD(s
->aw_bits
)) ||
1313 (re
->hi
& VTD_ROOT_ENTRY_RSVD(s
->aw_bits
))))
1319 error_report_once("%s: invalid root entry: hi=0x%"PRIx64
1321 __func__
, re
->hi
, re
->lo
);
1322 return -VTD_FR_ROOT_ENTRY_RSVD
;
1325 static inline int vtd_context_entry_rsvd_bits_check(IntelIOMMUState
*s
,
1326 VTDContextEntry
*ce
)
1328 if (!s
->root_scalable
&&
1329 (ce
->hi
& VTD_CONTEXT_ENTRY_RSVD_HI
||
1330 ce
->lo
& VTD_CONTEXT_ENTRY_RSVD_LO(s
->aw_bits
))) {
1331 error_report_once("%s: invalid context entry: hi=%"PRIx64
1332 ", lo=%"PRIx64
" (reserved nonzero)",
1333 __func__
, ce
->hi
, ce
->lo
);
1334 return -VTD_FR_CONTEXT_ENTRY_RSVD
;
1337 if (s
->root_scalable
&&
1338 (ce
->val
[0] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL0(s
->aw_bits
) ||
1339 ce
->val
[1] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL1
||
1342 error_report_once("%s: invalid context entry: val[3]=%"PRIx64
1345 ", val[0]=%"PRIx64
" (reserved nonzero)",
1346 __func__
, ce
->val
[3], ce
->val
[2],
1347 ce
->val
[1], ce
->val
[0]);
1348 return -VTD_FR_CONTEXT_ENTRY_RSVD
;
1354 static int vtd_ce_rid2pasid_check(IntelIOMMUState
*s
,
1355 VTDContextEntry
*ce
)
1360 * Make sure in Scalable Mode, a present context entry
1361 * has valid rid2pasid setting, which includes valid
1362 * rid2pasid field and corresponding pasid entry setting
1364 return vtd_ce_get_rid2pasid_entry(s
, ce
, &pe
);
1367 /* Map a device to its corresponding domain (context-entry) */
1368 static int vtd_dev_to_context_entry(IntelIOMMUState
*s
, uint8_t bus_num
,
1369 uint8_t devfn
, VTDContextEntry
*ce
)
1373 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
1375 ret_fr
= vtd_get_root_entry(s
, bus_num
, &re
);
1380 if (!vtd_root_entry_present(s
, &re
, devfn
)) {
1381 /* Not error - it's okay we don't have root entry. */
1382 trace_vtd_re_not_present(bus_num
);
1383 return -VTD_FR_ROOT_ENTRY_P
;
1386 ret_fr
= vtd_root_entry_rsvd_bits_check(s
, &re
);
1391 ret_fr
= vtd_get_context_entry_from_root(s
, &re
, devfn
, ce
);
1396 if (!vtd_ce_present(ce
)) {
1397 /* Not error - it's okay we don't have context entry. */
1398 trace_vtd_ce_not_present(bus_num
, devfn
);
1399 return -VTD_FR_CONTEXT_ENTRY_P
;
1402 ret_fr
= vtd_context_entry_rsvd_bits_check(s
, ce
);
1407 /* Check if the programming of context-entry is valid */
1408 if (!s
->root_scalable
&&
1409 !vtd_is_level_supported(s
, vtd_ce_get_level(ce
))) {
1410 error_report_once("%s: invalid context entry: hi=%"PRIx64
1411 ", lo=%"PRIx64
" (level %d not supported)",
1412 __func__
, ce
->hi
, ce
->lo
,
1413 vtd_ce_get_level(ce
));
1414 return -VTD_FR_CONTEXT_ENTRY_INV
;
1417 if (!s
->root_scalable
) {
1418 /* Do translation type check */
1419 if (!vtd_ce_type_check(x86_iommu
, ce
)) {
1420 /* Errors dumped in vtd_ce_type_check() */
1421 return -VTD_FR_CONTEXT_ENTRY_INV
;
1425 * Check if the programming of context-entry.rid2pasid
1426 * and corresponding pasid setting is valid, and thus
1427 * avoids to check pasid entry fetching result in future
1428 * helper function calling.
1430 ret_fr
= vtd_ce_rid2pasid_check(s
, ce
);
1439 static int vtd_sync_shadow_page_hook(IOMMUTLBEvent
*event
,
1442 memory_region_notify_iommu(private, 0, *event
);
1446 static uint16_t vtd_get_domain_id(IntelIOMMUState
*s
,
1447 VTDContextEntry
*ce
)
1451 if (s
->root_scalable
) {
1452 vtd_ce_get_rid2pasid_entry(s
, ce
, &pe
);
1453 return VTD_SM_PASID_ENTRY_DID(pe
.val
[1]);
1456 return VTD_CONTEXT_ENTRY_DID(ce
->hi
);
1459 static int vtd_sync_shadow_page_table_range(VTDAddressSpace
*vtd_as
,
1460 VTDContextEntry
*ce
,
1461 hwaddr addr
, hwaddr size
)
1463 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
1464 vtd_page_walk_info info
= {
1465 .hook_fn
= vtd_sync_shadow_page_hook
,
1466 .private = (void *)&vtd_as
->iommu
,
1467 .notify_unmap
= true,
1470 .domain_id
= vtd_get_domain_id(s
, ce
),
1473 return vtd_page_walk(s
, ce
, addr
, addr
+ size
, &info
);
1476 static int vtd_sync_shadow_page_table(VTDAddressSpace
*vtd_as
)
1482 if (!(vtd_as
->iommu
.iommu_notify_flags
& IOMMU_NOTIFIER_IOTLB_EVENTS
)) {
1486 ret
= vtd_dev_to_context_entry(vtd_as
->iommu_state
,
1487 pci_bus_num(vtd_as
->bus
),
1488 vtd_as
->devfn
, &ce
);
1490 if (ret
== -VTD_FR_CONTEXT_ENTRY_P
) {
1492 * It's a valid scenario to have a context entry that is
1493 * not present. For example, when a device is removed
1494 * from an existing domain then the context entry will be
1495 * zeroed by the guest before it was put into another
1496 * domain. When this happens, instead of synchronizing
1497 * the shadow pages we should invalidate all existing
1498 * mappings and notify the backends.
1500 IOMMU_NOTIFIER_FOREACH(n
, &vtd_as
->iommu
) {
1501 vtd_address_space_unmap(vtd_as
, n
);
1508 return vtd_sync_shadow_page_table_range(vtd_as
, &ce
, 0, UINT64_MAX
);
1512 * Check if specific device is configed to bypass address
1513 * translation for DMA requests. In Scalable Mode, bypass
1514 * 1st-level translation or 2nd-level translation, it depends
1517 static bool vtd_dev_pt_enabled(VTDAddressSpace
*as
)
1526 s
= as
->iommu_state
;
1527 ret
= vtd_dev_to_context_entry(s
, pci_bus_num(as
->bus
),
1531 * Possibly failed to parse the context entry for some reason
1532 * (e.g., during init, or any guest configuration errors on
1533 * context entries). We should assume PT not enabled for
1539 if (s
->root_scalable
) {
1540 ret
= vtd_ce_get_rid2pasid_entry(s
, &ce
, &pe
);
1542 error_report_once("%s: vtd_ce_get_rid2pasid_entry error: %"PRId32
,
1546 return (VTD_PE_GET_TYPE(&pe
) == VTD_SM_PASID_ENTRY_PT
);
1549 return (vtd_ce_get_type(&ce
) == VTD_CONTEXT_TT_PASS_THROUGH
);
1552 /* Return whether the device is using IOMMU translation. */
1553 static bool vtd_switch_address_space(VTDAddressSpace
*as
)
1556 /* Whether we need to take the BQL on our own */
1557 bool take_bql
= !qemu_mutex_iothread_locked();
1561 use_iommu
= as
->iommu_state
->dmar_enabled
&& !vtd_dev_pt_enabled(as
);
1563 trace_vtd_switch_address_space(pci_bus_num(as
->bus
),
1564 VTD_PCI_SLOT(as
->devfn
),
1565 VTD_PCI_FUNC(as
->devfn
),
1569 * It's possible that we reach here without BQL, e.g., when called
1570 * from vtd_pt_enable_fast_path(). However the memory APIs need
1571 * it. We'd better make sure we have had it already, or, take it.
1574 qemu_mutex_lock_iothread();
1577 /* Turn off first then on the other */
1579 memory_region_set_enabled(&as
->nodmar
, false);
1580 memory_region_set_enabled(MEMORY_REGION(&as
->iommu
), true);
1582 memory_region_set_enabled(MEMORY_REGION(&as
->iommu
), false);
1583 memory_region_set_enabled(&as
->nodmar
, true);
1587 qemu_mutex_unlock_iothread();
1593 static void vtd_switch_address_space_all(IntelIOMMUState
*s
)
1595 GHashTableIter iter
;
1599 g_hash_table_iter_init(&iter
, s
->vtd_as_by_busptr
);
1600 while (g_hash_table_iter_next(&iter
, NULL
, (void **)&vtd_bus
)) {
1601 for (i
= 0; i
< PCI_DEVFN_MAX
; i
++) {
1602 if (!vtd_bus
->dev_as
[i
]) {
1605 vtd_switch_address_space(vtd_bus
->dev_as
[i
]);
1610 static inline uint16_t vtd_make_source_id(uint8_t bus_num
, uint8_t devfn
)
1612 return ((bus_num
& 0xffUL
) << 8) | (devfn
& 0xffUL
);
1615 static const bool vtd_qualified_faults
[] = {
1616 [VTD_FR_RESERVED
] = false,
1617 [VTD_FR_ROOT_ENTRY_P
] = false,
1618 [VTD_FR_CONTEXT_ENTRY_P
] = true,
1619 [VTD_FR_CONTEXT_ENTRY_INV
] = true,
1620 [VTD_FR_ADDR_BEYOND_MGAW
] = true,
1621 [VTD_FR_WRITE
] = true,
1622 [VTD_FR_READ
] = true,
1623 [VTD_FR_PAGING_ENTRY_INV
] = true,
1624 [VTD_FR_ROOT_TABLE_INV
] = false,
1625 [VTD_FR_CONTEXT_TABLE_INV
] = false,
1626 [VTD_FR_ROOT_ENTRY_RSVD
] = false,
1627 [VTD_FR_PAGING_ENTRY_RSVD
] = true,
1628 [VTD_FR_CONTEXT_ENTRY_TT
] = true,
1629 [VTD_FR_PASID_TABLE_INV
] = false,
1630 [VTD_FR_RESERVED_ERR
] = false,
1631 [VTD_FR_MAX
] = false,
1634 /* To see if a fault condition is "qualified", which is reported to software
1635 * only if the FPD field in the context-entry used to process the faulting
1638 static inline bool vtd_is_qualified_fault(VTDFaultReason fault
)
1640 return vtd_qualified_faults
[fault
];
1643 static inline bool vtd_is_interrupt_addr(hwaddr addr
)
1645 return VTD_INTERRUPT_ADDR_FIRST
<= addr
&& addr
<= VTD_INTERRUPT_ADDR_LAST
;
1648 static void vtd_pt_enable_fast_path(IntelIOMMUState
*s
, uint16_t source_id
)
1651 VTDAddressSpace
*vtd_as
;
1652 bool success
= false;
1654 vtd_bus
= vtd_find_as_from_bus_num(s
, VTD_SID_TO_BUS(source_id
));
1659 vtd_as
= vtd_bus
->dev_as
[VTD_SID_TO_DEVFN(source_id
)];
1664 if (vtd_switch_address_space(vtd_as
) == false) {
1665 /* We switched off IOMMU region successfully. */
1670 trace_vtd_pt_enable_fast_path(source_id
, success
);
1673 /* Map dev to context-entry then do a paging-structures walk to do a iommu
1676 * Called from RCU critical section.
1678 * @bus_num: The bus number
1679 * @devfn: The devfn, which is the combined of device and function number
1680 * @is_write: The access is a write operation
1681 * @entry: IOMMUTLBEntry that contain the addr to be translated and result
1683 * Returns true if translation is successful, otherwise false.
1685 static bool vtd_do_iommu_translate(VTDAddressSpace
*vtd_as
, PCIBus
*bus
,
1686 uint8_t devfn
, hwaddr addr
, bool is_write
,
1687 IOMMUTLBEntry
*entry
)
1689 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
1691 uint8_t bus_num
= pci_bus_num(bus
);
1692 VTDContextCacheEntry
*cc_entry
;
1693 uint64_t slpte
, page_mask
;
1695 uint16_t source_id
= vtd_make_source_id(bus_num
, devfn
);
1697 bool is_fpd_set
= false;
1700 uint8_t access_flags
;
1701 VTDIOTLBEntry
*iotlb_entry
;
1704 * We have standalone memory region for interrupt addresses, we
1705 * should never receive translation requests in this region.
1707 assert(!vtd_is_interrupt_addr(addr
));
1711 cc_entry
= &vtd_as
->context_cache_entry
;
1713 /* Try to fetch slpte form IOTLB */
1714 iotlb_entry
= vtd_lookup_iotlb(s
, source_id
, addr
);
1716 trace_vtd_iotlb_page_hit(source_id
, addr
, iotlb_entry
->slpte
,
1717 iotlb_entry
->domain_id
);
1718 slpte
= iotlb_entry
->slpte
;
1719 access_flags
= iotlb_entry
->access_flags
;
1720 page_mask
= iotlb_entry
->mask
;
1724 /* Try to fetch context-entry from cache first */
1725 if (cc_entry
->context_cache_gen
== s
->context_cache_gen
) {
1726 trace_vtd_iotlb_cc_hit(bus_num
, devfn
, cc_entry
->context_entry
.hi
,
1727 cc_entry
->context_entry
.lo
,
1728 cc_entry
->context_cache_gen
);
1729 ce
= cc_entry
->context_entry
;
1730 is_fpd_set
= ce
.lo
& VTD_CONTEXT_ENTRY_FPD
;
1731 if (!is_fpd_set
&& s
->root_scalable
) {
1732 ret_fr
= vtd_ce_get_pasid_fpd(s
, &ce
, &is_fpd_set
);
1733 VTD_PE_GET_FPD_ERR(ret_fr
, is_fpd_set
, s
, source_id
, addr
, is_write
);
1736 ret_fr
= vtd_dev_to_context_entry(s
, bus_num
, devfn
, &ce
);
1737 is_fpd_set
= ce
.lo
& VTD_CONTEXT_ENTRY_FPD
;
1738 if (!ret_fr
&& !is_fpd_set
&& s
->root_scalable
) {
1739 ret_fr
= vtd_ce_get_pasid_fpd(s
, &ce
, &is_fpd_set
);
1741 VTD_PE_GET_FPD_ERR(ret_fr
, is_fpd_set
, s
, source_id
, addr
, is_write
);
1742 /* Update context-cache */
1743 trace_vtd_iotlb_cc_update(bus_num
, devfn
, ce
.hi
, ce
.lo
,
1744 cc_entry
->context_cache_gen
,
1745 s
->context_cache_gen
);
1746 cc_entry
->context_entry
= ce
;
1747 cc_entry
->context_cache_gen
= s
->context_cache_gen
;
1751 * We don't need to translate for pass-through context entries.
1752 * Also, let's ignore IOTLB caching as well for PT devices.
1754 if (vtd_ce_get_type(&ce
) == VTD_CONTEXT_TT_PASS_THROUGH
) {
1755 entry
->iova
= addr
& VTD_PAGE_MASK_4K
;
1756 entry
->translated_addr
= entry
->iova
;
1757 entry
->addr_mask
= ~VTD_PAGE_MASK_4K
;
1758 entry
->perm
= IOMMU_RW
;
1759 trace_vtd_translate_pt(source_id
, entry
->iova
);
1762 * When this happens, it means firstly caching-mode is not
1763 * enabled, and this is the first passthrough translation for
1764 * the device. Let's enable the fast path for passthrough.
1766 * When passthrough is disabled again for the device, we can
1767 * capture it via the context entry invalidation, then the
1768 * IOMMU region can be swapped back.
1770 vtd_pt_enable_fast_path(s
, source_id
);
1771 vtd_iommu_unlock(s
);
1775 ret_fr
= vtd_iova_to_slpte(s
, &ce
, addr
, is_write
, &slpte
, &level
,
1776 &reads
, &writes
, s
->aw_bits
);
1777 VTD_PE_GET_FPD_ERR(ret_fr
, is_fpd_set
, s
, source_id
, addr
, is_write
);
1779 page_mask
= vtd_slpt_level_page_mask(level
);
1780 access_flags
= IOMMU_ACCESS_FLAG(reads
, writes
);
1781 vtd_update_iotlb(s
, source_id
, vtd_get_domain_id(s
, &ce
), addr
, slpte
,
1782 access_flags
, level
);
1784 vtd_iommu_unlock(s
);
1785 entry
->iova
= addr
& page_mask
;
1786 entry
->translated_addr
= vtd_get_slpte_addr(slpte
, s
->aw_bits
) & page_mask
;
1787 entry
->addr_mask
= ~page_mask
;
1788 entry
->perm
= access_flags
;
1792 vtd_iommu_unlock(s
);
1794 entry
->translated_addr
= 0;
1795 entry
->addr_mask
= 0;
1796 entry
->perm
= IOMMU_NONE
;
1800 static void vtd_root_table_setup(IntelIOMMUState
*s
)
1802 s
->root
= vtd_get_quad_raw(s
, DMAR_RTADDR_REG
);
1803 s
->root
&= VTD_RTADDR_ADDR_MASK(s
->aw_bits
);
1805 vtd_update_scalable_state(s
);
1807 trace_vtd_reg_dmar_root(s
->root
, s
->root_scalable
);
1810 static void vtd_iec_notify_all(IntelIOMMUState
*s
, bool global
,
1811 uint32_t index
, uint32_t mask
)
1813 x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s
), global
, index
, mask
);
1816 static void vtd_interrupt_remap_table_setup(IntelIOMMUState
*s
)
1819 value
= vtd_get_quad_raw(s
, DMAR_IRTA_REG
);
1820 s
->intr_size
= 1UL << ((value
& VTD_IRTA_SIZE_MASK
) + 1);
1821 s
->intr_root
= value
& VTD_IRTA_ADDR_MASK(s
->aw_bits
);
1822 s
->intr_eime
= value
& VTD_IRTA_EIME
;
1824 /* Notify global invalidation */
1825 vtd_iec_notify_all(s
, true, 0, 0);
1827 trace_vtd_reg_ir_root(s
->intr_root
, s
->intr_size
);
1830 static void vtd_iommu_replay_all(IntelIOMMUState
*s
)
1832 VTDAddressSpace
*vtd_as
;
1834 QLIST_FOREACH(vtd_as
, &s
->vtd_as_with_notifiers
, next
) {
1835 vtd_sync_shadow_page_table(vtd_as
);
1839 static void vtd_context_global_invalidate(IntelIOMMUState
*s
)
1841 trace_vtd_inv_desc_cc_global();
1842 /* Protects context cache */
1844 s
->context_cache_gen
++;
1845 if (s
->context_cache_gen
== VTD_CONTEXT_CACHE_GEN_MAX
) {
1846 vtd_reset_context_cache_locked(s
);
1848 vtd_iommu_unlock(s
);
1849 vtd_address_space_refresh_all(s
);
1851 * From VT-d spec 6.5.2.1, a global context entry invalidation
1852 * should be followed by a IOTLB global invalidation, so we should
1853 * be safe even without this. Hoewever, let's replay the region as
1854 * well to be safer, and go back here when we need finer tunes for
1855 * VT-d emulation codes.
1857 vtd_iommu_replay_all(s
);
1860 /* Do a context-cache device-selective invalidation.
1861 * @func_mask: FM field after shifting
1863 static void vtd_context_device_invalidate(IntelIOMMUState
*s
,
1869 VTDAddressSpace
*vtd_as
;
1870 uint8_t bus_n
, devfn
;
1873 trace_vtd_inv_desc_cc_devices(source_id
, func_mask
);
1875 switch (func_mask
& 3) {
1877 mask
= 0; /* No bits in the SID field masked */
1880 mask
= 4; /* Mask bit 2 in the SID field */
1883 mask
= 6; /* Mask bit 2:1 in the SID field */
1886 mask
= 7; /* Mask bit 2:0 in the SID field */
1889 g_assert_not_reached();
1893 bus_n
= VTD_SID_TO_BUS(source_id
);
1894 vtd_bus
= vtd_find_as_from_bus_num(s
, bus_n
);
1896 devfn
= VTD_SID_TO_DEVFN(source_id
);
1897 for (devfn_it
= 0; devfn_it
< PCI_DEVFN_MAX
; ++devfn_it
) {
1898 vtd_as
= vtd_bus
->dev_as
[devfn_it
];
1899 if (vtd_as
&& ((devfn_it
& mask
) == (devfn
& mask
))) {
1900 trace_vtd_inv_desc_cc_device(bus_n
, VTD_PCI_SLOT(devfn_it
),
1901 VTD_PCI_FUNC(devfn_it
));
1903 vtd_as
->context_cache_entry
.context_cache_gen
= 0;
1904 vtd_iommu_unlock(s
);
1906 * Do switch address space when needed, in case if the
1907 * device passthrough bit is switched.
1909 vtd_switch_address_space(vtd_as
);
1911 * So a device is moving out of (or moving into) a
1912 * domain, resync the shadow page table.
1913 * This won't bring bad even if we have no such
1914 * notifier registered - the IOMMU notification
1915 * framework will skip MAP notifications if that
1918 vtd_sync_shadow_page_table(vtd_as
);
1924 /* Context-cache invalidation
1925 * Returns the Context Actual Invalidation Granularity.
1926 * @val: the content of the CCMD_REG
1928 static uint64_t vtd_context_cache_invalidate(IntelIOMMUState
*s
, uint64_t val
)
1931 uint64_t type
= val
& VTD_CCMD_CIRG_MASK
;
1934 case VTD_CCMD_DOMAIN_INVL
:
1936 case VTD_CCMD_GLOBAL_INVL
:
1937 caig
= VTD_CCMD_GLOBAL_INVL_A
;
1938 vtd_context_global_invalidate(s
);
1941 case VTD_CCMD_DEVICE_INVL
:
1942 caig
= VTD_CCMD_DEVICE_INVL_A
;
1943 vtd_context_device_invalidate(s
, VTD_CCMD_SID(val
), VTD_CCMD_FM(val
));
1947 error_report_once("%s: invalid context: 0x%" PRIx64
,
1954 static void vtd_iotlb_global_invalidate(IntelIOMMUState
*s
)
1956 trace_vtd_inv_desc_iotlb_global();
1958 vtd_iommu_replay_all(s
);
1961 static void vtd_iotlb_domain_invalidate(IntelIOMMUState
*s
, uint16_t domain_id
)
1964 VTDAddressSpace
*vtd_as
;
1966 trace_vtd_inv_desc_iotlb_domain(domain_id
);
1969 g_hash_table_foreach_remove(s
->iotlb
, vtd_hash_remove_by_domain
,
1971 vtd_iommu_unlock(s
);
1973 QLIST_FOREACH(vtd_as
, &s
->vtd_as_with_notifiers
, next
) {
1974 if (!vtd_dev_to_context_entry(s
, pci_bus_num(vtd_as
->bus
),
1975 vtd_as
->devfn
, &ce
) &&
1976 domain_id
== vtd_get_domain_id(s
, &ce
)) {
1977 vtd_sync_shadow_page_table(vtd_as
);
1982 static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState
*s
,
1983 uint16_t domain_id
, hwaddr addr
,
1986 VTDAddressSpace
*vtd_as
;
1989 hwaddr size
= (1 << am
) * VTD_PAGE_SIZE
;
1991 QLIST_FOREACH(vtd_as
, &(s
->vtd_as_with_notifiers
), next
) {
1992 ret
= vtd_dev_to_context_entry(s
, pci_bus_num(vtd_as
->bus
),
1993 vtd_as
->devfn
, &ce
);
1994 if (!ret
&& domain_id
== vtd_get_domain_id(s
, &ce
)) {
1995 if (vtd_as_has_map_notifier(vtd_as
)) {
1997 * As long as we have MAP notifications registered in
1998 * any of our IOMMU notifiers, we need to sync the
1999 * shadow page table.
2001 vtd_sync_shadow_page_table_range(vtd_as
, &ce
, addr
, size
);
2004 * For UNMAP-only notifiers, we don't need to walk the
2005 * page tables. We just deliver the PSI down to
2006 * invalidate caches.
2008 IOMMUTLBEvent event
= {
2009 .type
= IOMMU_NOTIFIER_UNMAP
,
2011 .target_as
= &address_space_memory
,
2013 .translated_addr
= 0,
2014 .addr_mask
= size
- 1,
2018 memory_region_notify_iommu(&vtd_as
->iommu
, 0, event
);
2024 static void vtd_iotlb_page_invalidate(IntelIOMMUState
*s
, uint16_t domain_id
,
2025 hwaddr addr
, uint8_t am
)
2027 VTDIOTLBPageInvInfo info
;
2029 trace_vtd_inv_desc_iotlb_pages(domain_id
, addr
, am
);
2031 assert(am
<= VTD_MAMV
);
2032 info
.domain_id
= domain_id
;
2034 info
.mask
= ~((1 << am
) - 1);
2036 g_hash_table_foreach_remove(s
->iotlb
, vtd_hash_remove_by_page
, &info
);
2037 vtd_iommu_unlock(s
);
2038 vtd_iotlb_page_invalidate_notify(s
, domain_id
, addr
, am
);
2042 * Returns the IOTLB Actual Invalidation Granularity.
2043 * @val: the content of the IOTLB_REG
2045 static uint64_t vtd_iotlb_flush(IntelIOMMUState
*s
, uint64_t val
)
2048 uint64_t type
= val
& VTD_TLB_FLUSH_GRANU_MASK
;
2054 case VTD_TLB_GLOBAL_FLUSH
:
2055 iaig
= VTD_TLB_GLOBAL_FLUSH_A
;
2056 vtd_iotlb_global_invalidate(s
);
2059 case VTD_TLB_DSI_FLUSH
:
2060 domain_id
= VTD_TLB_DID(val
);
2061 iaig
= VTD_TLB_DSI_FLUSH_A
;
2062 vtd_iotlb_domain_invalidate(s
, domain_id
);
2065 case VTD_TLB_PSI_FLUSH
:
2066 domain_id
= VTD_TLB_DID(val
);
2067 addr
= vtd_get_quad_raw(s
, DMAR_IVA_REG
);
2068 am
= VTD_IVA_AM(addr
);
2069 addr
= VTD_IVA_ADDR(addr
);
2070 if (am
> VTD_MAMV
) {
2071 error_report_once("%s: address mask overflow: 0x%" PRIx64
,
2072 __func__
, vtd_get_quad_raw(s
, DMAR_IVA_REG
));
2076 iaig
= VTD_TLB_PSI_FLUSH_A
;
2077 vtd_iotlb_page_invalidate(s
, domain_id
, addr
, am
);
2081 error_report_once("%s: invalid granularity: 0x%" PRIx64
,
2088 static void vtd_fetch_inv_desc(IntelIOMMUState
*s
);
2090 static inline bool vtd_queued_inv_disable_check(IntelIOMMUState
*s
)
2092 return s
->qi_enabled
&& (s
->iq_tail
== s
->iq_head
) &&
2093 (s
->iq_last_desc_type
== VTD_INV_DESC_WAIT
);
2096 static void vtd_handle_gcmd_qie(IntelIOMMUState
*s
, bool en
)
2098 uint64_t iqa_val
= vtd_get_quad_raw(s
, DMAR_IQA_REG
);
2100 trace_vtd_inv_qi_enable(en
);
2103 s
->iq
= iqa_val
& VTD_IQA_IQA_MASK(s
->aw_bits
);
2104 /* 2^(x+8) entries */
2105 s
->iq_size
= 1UL << ((iqa_val
& VTD_IQA_QS
) + 8 - (s
->iq_dw
? 1 : 0));
2106 s
->qi_enabled
= true;
2107 trace_vtd_inv_qi_setup(s
->iq
, s
->iq_size
);
2108 /* Ok - report back to driver */
2109 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_QIES
);
2111 if (s
->iq_tail
!= 0) {
2113 * This is a spec violation but Windows guests are known to set up
2114 * Queued Invalidation this way so we allow the write and process
2115 * Invalidation Descriptors right away.
2117 trace_vtd_warn_invalid_qi_tail(s
->iq_tail
);
2118 if (!(vtd_get_long_raw(s
, DMAR_FSTS_REG
) & VTD_FSTS_IQE
)) {
2119 vtd_fetch_inv_desc(s
);
2123 if (vtd_queued_inv_disable_check(s
)) {
2124 /* disable Queued Invalidation */
2125 vtd_set_quad_raw(s
, DMAR_IQH_REG
, 0);
2127 s
->qi_enabled
= false;
2128 /* Ok - report back to driver */
2129 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_QIES
, 0);
2131 error_report_once("%s: detected improper state when disable QI "
2132 "(head=0x%x, tail=0x%x, last_type=%d)",
2134 s
->iq_head
, s
->iq_tail
, s
->iq_last_desc_type
);
2139 /* Set Root Table Pointer */
2140 static void vtd_handle_gcmd_srtp(IntelIOMMUState
*s
)
2142 vtd_root_table_setup(s
);
2143 /* Ok - report back to driver */
2144 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_RTPS
);
2145 vtd_reset_caches(s
);
2146 vtd_address_space_refresh_all(s
);
2149 /* Set Interrupt Remap Table Pointer */
2150 static void vtd_handle_gcmd_sirtp(IntelIOMMUState
*s
)
2152 vtd_interrupt_remap_table_setup(s
);
2153 /* Ok - report back to driver */
2154 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_IRTPS
);
2157 /* Handle Translation Enable/Disable */
2158 static void vtd_handle_gcmd_te(IntelIOMMUState
*s
, bool en
)
2160 if (s
->dmar_enabled
== en
) {
2164 trace_vtd_dmar_enable(en
);
2167 s
->dmar_enabled
= true;
2168 /* Ok - report back to driver */
2169 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_TES
);
2171 s
->dmar_enabled
= false;
2173 /* Clear the index of Fault Recording Register */
2174 s
->next_frcd_reg
= 0;
2175 /* Ok - report back to driver */
2176 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_TES
, 0);
2179 vtd_reset_caches(s
);
2180 vtd_address_space_refresh_all(s
);
2183 /* Handle Interrupt Remap Enable/Disable */
2184 static void vtd_handle_gcmd_ire(IntelIOMMUState
*s
, bool en
)
2186 trace_vtd_ir_enable(en
);
2189 s
->intr_enabled
= true;
2190 /* Ok - report back to driver */
2191 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_IRES
);
2193 s
->intr_enabled
= false;
2194 /* Ok - report back to driver */
2195 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_IRES
, 0);
2199 /* Handle write to Global Command Register */
2200 static void vtd_handle_gcmd_write(IntelIOMMUState
*s
)
2202 uint32_t status
= vtd_get_long_raw(s
, DMAR_GSTS_REG
);
2203 uint32_t val
= vtd_get_long_raw(s
, DMAR_GCMD_REG
);
2204 uint32_t changed
= status
^ val
;
2206 trace_vtd_reg_write_gcmd(status
, val
);
2207 if (changed
& VTD_GCMD_TE
) {
2208 /* Translation enable/disable */
2209 vtd_handle_gcmd_te(s
, val
& VTD_GCMD_TE
);
2211 if (val
& VTD_GCMD_SRTP
) {
2212 /* Set/update the root-table pointer */
2213 vtd_handle_gcmd_srtp(s
);
2215 if (changed
& VTD_GCMD_QIE
) {
2216 /* Queued Invalidation Enable */
2217 vtd_handle_gcmd_qie(s
, val
& VTD_GCMD_QIE
);
2219 if (val
& VTD_GCMD_SIRTP
) {
2220 /* Set/update the interrupt remapping root-table pointer */
2221 vtd_handle_gcmd_sirtp(s
);
2223 if (changed
& VTD_GCMD_IRE
) {
2224 /* Interrupt remap enable/disable */
2225 vtd_handle_gcmd_ire(s
, val
& VTD_GCMD_IRE
);
2229 /* Handle write to Context Command Register */
2230 static void vtd_handle_ccmd_write(IntelIOMMUState
*s
)
2233 uint64_t val
= vtd_get_quad_raw(s
, DMAR_CCMD_REG
);
2235 /* Context-cache invalidation request */
2236 if (val
& VTD_CCMD_ICC
) {
2237 if (s
->qi_enabled
) {
2238 error_report_once("Queued Invalidation enabled, "
2239 "should not use register-based invalidation");
2242 ret
= vtd_context_cache_invalidate(s
, val
);
2243 /* Invalidation completed. Change something to show */
2244 vtd_set_clear_mask_quad(s
, DMAR_CCMD_REG
, VTD_CCMD_ICC
, 0ULL);
2245 ret
= vtd_set_clear_mask_quad(s
, DMAR_CCMD_REG
, VTD_CCMD_CAIG_MASK
,
2250 /* Handle write to IOTLB Invalidation Register */
2251 static void vtd_handle_iotlb_write(IntelIOMMUState
*s
)
2254 uint64_t val
= vtd_get_quad_raw(s
, DMAR_IOTLB_REG
);
2256 /* IOTLB invalidation request */
2257 if (val
& VTD_TLB_IVT
) {
2258 if (s
->qi_enabled
) {
2259 error_report_once("Queued Invalidation enabled, "
2260 "should not use register-based invalidation");
2263 ret
= vtd_iotlb_flush(s
, val
);
2264 /* Invalidation completed. Change something to show */
2265 vtd_set_clear_mask_quad(s
, DMAR_IOTLB_REG
, VTD_TLB_IVT
, 0ULL);
2266 ret
= vtd_set_clear_mask_quad(s
, DMAR_IOTLB_REG
,
2267 VTD_TLB_FLUSH_GRANU_MASK_A
, ret
);
2271 /* Fetch an Invalidation Descriptor from the Invalidation Queue */
2272 static bool vtd_get_inv_desc(IntelIOMMUState
*s
,
2273 VTDInvDesc
*inv_desc
)
2275 dma_addr_t base_addr
= s
->iq
;
2276 uint32_t offset
= s
->iq_head
;
2277 uint32_t dw
= s
->iq_dw
? 32 : 16;
2278 dma_addr_t addr
= base_addr
+ offset
* dw
;
2280 if (dma_memory_read(&address_space_memory
, addr
, inv_desc
, dw
)) {
2281 error_report_once("Read INV DESC failed.");
2284 inv_desc
->lo
= le64_to_cpu(inv_desc
->lo
);
2285 inv_desc
->hi
= le64_to_cpu(inv_desc
->hi
);
2287 inv_desc
->val
[2] = le64_to_cpu(inv_desc
->val
[2]);
2288 inv_desc
->val
[3] = le64_to_cpu(inv_desc
->val
[3]);
2293 static bool vtd_process_wait_desc(IntelIOMMUState
*s
, VTDInvDesc
*inv_desc
)
2295 if ((inv_desc
->hi
& VTD_INV_DESC_WAIT_RSVD_HI
) ||
2296 (inv_desc
->lo
& VTD_INV_DESC_WAIT_RSVD_LO
)) {
2297 error_report_once("%s: invalid wait desc: hi=%"PRIx64
", lo=%"PRIx64
2298 " (reserved nonzero)", __func__
, inv_desc
->hi
,
2302 if (inv_desc
->lo
& VTD_INV_DESC_WAIT_SW
) {
2304 uint32_t status_data
= (uint32_t)(inv_desc
->lo
>>
2305 VTD_INV_DESC_WAIT_DATA_SHIFT
);
2307 assert(!(inv_desc
->lo
& VTD_INV_DESC_WAIT_IF
));
2309 /* FIXME: need to be masked with HAW? */
2310 dma_addr_t status_addr
= inv_desc
->hi
;
2311 trace_vtd_inv_desc_wait_sw(status_addr
, status_data
);
2312 status_data
= cpu_to_le32(status_data
);
2313 if (dma_memory_write(&address_space_memory
, status_addr
, &status_data
,
2314 sizeof(status_data
))) {
2315 trace_vtd_inv_desc_wait_write_fail(inv_desc
->hi
, inv_desc
->lo
);
2318 } else if (inv_desc
->lo
& VTD_INV_DESC_WAIT_IF
) {
2319 /* Interrupt flag */
2320 vtd_generate_completion_event(s
);
2322 error_report_once("%s: invalid wait desc: hi=%"PRIx64
", lo=%"PRIx64
2323 " (unknown type)", __func__
, inv_desc
->hi
,
2330 static bool vtd_process_context_cache_desc(IntelIOMMUState
*s
,
2331 VTDInvDesc
*inv_desc
)
2333 uint16_t sid
, fmask
;
2335 if ((inv_desc
->lo
& VTD_INV_DESC_CC_RSVD
) || inv_desc
->hi
) {
2336 error_report_once("%s: invalid cc inv desc: hi=%"PRIx64
", lo=%"PRIx64
2337 " (reserved nonzero)", __func__
, inv_desc
->hi
,
2341 switch (inv_desc
->lo
& VTD_INV_DESC_CC_G
) {
2342 case VTD_INV_DESC_CC_DOMAIN
:
2343 trace_vtd_inv_desc_cc_domain(
2344 (uint16_t)VTD_INV_DESC_CC_DID(inv_desc
->lo
));
2346 case VTD_INV_DESC_CC_GLOBAL
:
2347 vtd_context_global_invalidate(s
);
2350 case VTD_INV_DESC_CC_DEVICE
:
2351 sid
= VTD_INV_DESC_CC_SID(inv_desc
->lo
);
2352 fmask
= VTD_INV_DESC_CC_FM(inv_desc
->lo
);
2353 vtd_context_device_invalidate(s
, sid
, fmask
);
2357 error_report_once("%s: invalid cc inv desc: hi=%"PRIx64
", lo=%"PRIx64
2358 " (invalid type)", __func__
, inv_desc
->hi
,
2365 static bool vtd_process_iotlb_desc(IntelIOMMUState
*s
, VTDInvDesc
*inv_desc
)
2371 if ((inv_desc
->lo
& VTD_INV_DESC_IOTLB_RSVD_LO
) ||
2372 (inv_desc
->hi
& VTD_INV_DESC_IOTLB_RSVD_HI
)) {
2373 error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64
2374 ", lo=0x%"PRIx64
" (reserved bits unzero)",
2375 __func__
, inv_desc
->hi
, inv_desc
->lo
);
2379 switch (inv_desc
->lo
& VTD_INV_DESC_IOTLB_G
) {
2380 case VTD_INV_DESC_IOTLB_GLOBAL
:
2381 vtd_iotlb_global_invalidate(s
);
2384 case VTD_INV_DESC_IOTLB_DOMAIN
:
2385 domain_id
= VTD_INV_DESC_IOTLB_DID(inv_desc
->lo
);
2386 vtd_iotlb_domain_invalidate(s
, domain_id
);
2389 case VTD_INV_DESC_IOTLB_PAGE
:
2390 domain_id
= VTD_INV_DESC_IOTLB_DID(inv_desc
->lo
);
2391 addr
= VTD_INV_DESC_IOTLB_ADDR(inv_desc
->hi
);
2392 am
= VTD_INV_DESC_IOTLB_AM(inv_desc
->hi
);
2393 if (am
> VTD_MAMV
) {
2394 error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64
2395 ", lo=0x%"PRIx64
" (am=%u > VTD_MAMV=%u)",
2396 __func__
, inv_desc
->hi
, inv_desc
->lo
,
2397 am
, (unsigned)VTD_MAMV
);
2400 vtd_iotlb_page_invalidate(s
, domain_id
, addr
, am
);
2404 error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64
2405 ", lo=0x%"PRIx64
" (type mismatch: 0x%llx)",
2406 __func__
, inv_desc
->hi
, inv_desc
->lo
,
2407 inv_desc
->lo
& VTD_INV_DESC_IOTLB_G
);
2413 static bool vtd_process_inv_iec_desc(IntelIOMMUState
*s
,
2414 VTDInvDesc
*inv_desc
)
2416 trace_vtd_inv_desc_iec(inv_desc
->iec
.granularity
,
2417 inv_desc
->iec
.index
,
2418 inv_desc
->iec
.index_mask
);
2420 vtd_iec_notify_all(s
, !inv_desc
->iec
.granularity
,
2421 inv_desc
->iec
.index
,
2422 inv_desc
->iec
.index_mask
);
2426 static bool vtd_process_device_iotlb_desc(IntelIOMMUState
*s
,
2427 VTDInvDesc
*inv_desc
)
2429 VTDAddressSpace
*vtd_dev_as
;
2430 IOMMUTLBEvent event
;
2431 struct VTDBus
*vtd_bus
;
2439 addr
= VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc
->hi
);
2440 sid
= VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc
->lo
);
2443 size
= VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc
->hi
);
2445 if ((inv_desc
->lo
& VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO
) ||
2446 (inv_desc
->hi
& VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI
)) {
2447 error_report_once("%s: invalid dev-iotlb inv desc: hi=%"PRIx64
2448 ", lo=%"PRIx64
" (reserved nonzero)", __func__
,
2449 inv_desc
->hi
, inv_desc
->lo
);
2453 vtd_bus
= vtd_find_as_from_bus_num(s
, bus_num
);
2458 vtd_dev_as
= vtd_bus
->dev_as
[devfn
];
2463 /* According to ATS spec table 2.4:
2464 * S = 0, bits 15:12 = xxxx range size: 4K
2465 * S = 1, bits 15:12 = xxx0 range size: 8K
2466 * S = 1, bits 15:12 = xx01 range size: 16K
2467 * S = 1, bits 15:12 = x011 range size: 32K
2468 * S = 1, bits 15:12 = 0111 range size: 64K
2472 sz
= (VTD_PAGE_SIZE
* 2) << cto64(addr
>> VTD_PAGE_SHIFT
);
2478 event
.type
= IOMMU_NOTIFIER_DEVIOTLB_UNMAP
;
2479 event
.entry
.target_as
= &vtd_dev_as
->as
;
2480 event
.entry
.addr_mask
= sz
- 1;
2481 event
.entry
.iova
= addr
;
2482 event
.entry
.perm
= IOMMU_NONE
;
2483 event
.entry
.translated_addr
= 0;
2484 memory_region_notify_iommu(&vtd_dev_as
->iommu
, 0, event
);
2490 static bool vtd_process_inv_desc(IntelIOMMUState
*s
)
2492 VTDInvDesc inv_desc
;
2495 trace_vtd_inv_qi_head(s
->iq_head
);
2496 if (!vtd_get_inv_desc(s
, &inv_desc
)) {
2497 s
->iq_last_desc_type
= VTD_INV_DESC_NONE
;
2501 desc_type
= inv_desc
.lo
& VTD_INV_DESC_TYPE
;
2502 /* FIXME: should update at first or at last? */
2503 s
->iq_last_desc_type
= desc_type
;
2505 switch (desc_type
) {
2506 case VTD_INV_DESC_CC
:
2507 trace_vtd_inv_desc("context-cache", inv_desc
.hi
, inv_desc
.lo
);
2508 if (!vtd_process_context_cache_desc(s
, &inv_desc
)) {
2513 case VTD_INV_DESC_IOTLB
:
2514 trace_vtd_inv_desc("iotlb", inv_desc
.hi
, inv_desc
.lo
);
2515 if (!vtd_process_iotlb_desc(s
, &inv_desc
)) {
2521 * TODO: the entity of below two cases will be implemented in future series.
2522 * To make guest (which integrates scalable mode support patch set in
2523 * iommu driver) work, just return true is enough so far.
2525 case VTD_INV_DESC_PC
:
2528 case VTD_INV_DESC_PIOTLB
:
2531 case VTD_INV_DESC_WAIT
:
2532 trace_vtd_inv_desc("wait", inv_desc
.hi
, inv_desc
.lo
);
2533 if (!vtd_process_wait_desc(s
, &inv_desc
)) {
2538 case VTD_INV_DESC_IEC
:
2539 trace_vtd_inv_desc("iec", inv_desc
.hi
, inv_desc
.lo
);
2540 if (!vtd_process_inv_iec_desc(s
, &inv_desc
)) {
2545 case VTD_INV_DESC_DEVICE
:
2546 trace_vtd_inv_desc("device", inv_desc
.hi
, inv_desc
.lo
);
2547 if (!vtd_process_device_iotlb_desc(s
, &inv_desc
)) {
2553 error_report_once("%s: invalid inv desc: hi=%"PRIx64
", lo=%"PRIx64
2554 " (unknown type)", __func__
, inv_desc
.hi
,
2559 if (s
->iq_head
== s
->iq_size
) {
2565 /* Try to fetch and process more Invalidation Descriptors */
2566 static void vtd_fetch_inv_desc(IntelIOMMUState
*s
)
2570 /* Refer to 10.4.23 of VT-d spec 3.0 */
2571 qi_shift
= s
->iq_dw
? VTD_IQH_QH_SHIFT_5
: VTD_IQH_QH_SHIFT_4
;
2573 trace_vtd_inv_qi_fetch();
2575 if (s
->iq_tail
>= s
->iq_size
) {
2576 /* Detects an invalid Tail pointer */
2577 error_report_once("%s: detected invalid QI tail "
2578 "(tail=0x%x, size=0x%x)",
2579 __func__
, s
->iq_tail
, s
->iq_size
);
2580 vtd_handle_inv_queue_error(s
);
2583 while (s
->iq_head
!= s
->iq_tail
) {
2584 if (!vtd_process_inv_desc(s
)) {
2585 /* Invalidation Queue Errors */
2586 vtd_handle_inv_queue_error(s
);
2589 /* Must update the IQH_REG in time */
2590 vtd_set_quad_raw(s
, DMAR_IQH_REG
,
2591 (((uint64_t)(s
->iq_head
)) << qi_shift
) &
2596 /* Handle write to Invalidation Queue Tail Register */
2597 static void vtd_handle_iqt_write(IntelIOMMUState
*s
)
2599 uint64_t val
= vtd_get_quad_raw(s
, DMAR_IQT_REG
);
2601 if (s
->iq_dw
&& (val
& VTD_IQT_QT_256_RSV_BIT
)) {
2602 error_report_once("%s: RSV bit is set: val=0x%"PRIx64
,
2606 s
->iq_tail
= VTD_IQT_QT(s
->iq_dw
, val
);
2607 trace_vtd_inv_qi_tail(s
->iq_tail
);
2609 if (s
->qi_enabled
&& !(vtd_get_long_raw(s
, DMAR_FSTS_REG
) & VTD_FSTS_IQE
)) {
2610 /* Process Invalidation Queue here */
2611 vtd_fetch_inv_desc(s
);
2615 static void vtd_handle_fsts_write(IntelIOMMUState
*s
)
2617 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
2618 uint32_t fectl_reg
= vtd_get_long_raw(s
, DMAR_FECTL_REG
);
2619 uint32_t status_fields
= VTD_FSTS_PFO
| VTD_FSTS_PPF
| VTD_FSTS_IQE
;
2621 if ((fectl_reg
& VTD_FECTL_IP
) && !(fsts_reg
& status_fields
)) {
2622 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
2623 trace_vtd_fsts_clear_ip();
2625 /* FIXME: when IQE is Clear, should we try to fetch some Invalidation
2626 * Descriptors if there are any when Queued Invalidation is enabled?
2630 static void vtd_handle_fectl_write(IntelIOMMUState
*s
)
2633 /* FIXME: when software clears the IM field, check the IP field. But do we
2634 * need to compare the old value and the new value to conclude that
2635 * software clears the IM field? Or just check if the IM field is zero?
2637 fectl_reg
= vtd_get_long_raw(s
, DMAR_FECTL_REG
);
2639 trace_vtd_reg_write_fectl(fectl_reg
);
2641 if ((fectl_reg
& VTD_FECTL_IP
) && !(fectl_reg
& VTD_FECTL_IM
)) {
2642 vtd_generate_interrupt(s
, DMAR_FEADDR_REG
, DMAR_FEDATA_REG
);
2643 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
2647 static void vtd_handle_ics_write(IntelIOMMUState
*s
)
2649 uint32_t ics_reg
= vtd_get_long_raw(s
, DMAR_ICS_REG
);
2650 uint32_t iectl_reg
= vtd_get_long_raw(s
, DMAR_IECTL_REG
);
2652 if ((iectl_reg
& VTD_IECTL_IP
) && !(ics_reg
& VTD_ICS_IWC
)) {
2653 trace_vtd_reg_ics_clear_ip();
2654 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
2658 static void vtd_handle_iectl_write(IntelIOMMUState
*s
)
2661 /* FIXME: when software clears the IM field, check the IP field. But do we
2662 * need to compare the old value and the new value to conclude that
2663 * software clears the IM field? Or just check if the IM field is zero?
2665 iectl_reg
= vtd_get_long_raw(s
, DMAR_IECTL_REG
);
2667 trace_vtd_reg_write_iectl(iectl_reg
);
2669 if ((iectl_reg
& VTD_IECTL_IP
) && !(iectl_reg
& VTD_IECTL_IM
)) {
2670 vtd_generate_interrupt(s
, DMAR_IEADDR_REG
, DMAR_IEDATA_REG
);
2671 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
2675 static uint64_t vtd_mem_read(void *opaque
, hwaddr addr
, unsigned size
)
2677 IntelIOMMUState
*s
= opaque
;
2680 trace_vtd_reg_read(addr
, size
);
2682 if (addr
+ size
> DMAR_REG_SIZE
) {
2683 error_report_once("%s: MMIO over range: addr=0x%" PRIx64
2684 " size=0x%x", __func__
, addr
, size
);
2685 return (uint64_t)-1;
2689 /* Root Table Address Register, 64-bit */
2690 case DMAR_RTADDR_REG
:
2691 val
= vtd_get_quad_raw(s
, DMAR_RTADDR_REG
);
2693 val
= val
& ((1ULL << 32) - 1);
2697 case DMAR_RTADDR_REG_HI
:
2699 val
= vtd_get_quad_raw(s
, DMAR_RTADDR_REG
) >> 32;
2702 /* Invalidation Queue Address Register, 64-bit */
2704 val
= s
->iq
| (vtd_get_quad(s
, DMAR_IQA_REG
) & VTD_IQA_QS
);
2706 val
= val
& ((1ULL << 32) - 1);
2710 case DMAR_IQA_REG_HI
:
2717 val
= vtd_get_long(s
, addr
);
2719 val
= vtd_get_quad(s
, addr
);
2726 static void vtd_mem_write(void *opaque
, hwaddr addr
,
2727 uint64_t val
, unsigned size
)
2729 IntelIOMMUState
*s
= opaque
;
2731 trace_vtd_reg_write(addr
, size
, val
);
2733 if (addr
+ size
> DMAR_REG_SIZE
) {
2734 error_report_once("%s: MMIO over range: addr=0x%" PRIx64
2735 " size=0x%x", __func__
, addr
, size
);
2740 /* Global Command Register, 32-bit */
2742 vtd_set_long(s
, addr
, val
);
2743 vtd_handle_gcmd_write(s
);
2746 /* Context Command Register, 64-bit */
2749 vtd_set_long(s
, addr
, val
);
2751 vtd_set_quad(s
, addr
, val
);
2752 vtd_handle_ccmd_write(s
);
2756 case DMAR_CCMD_REG_HI
:
2758 vtd_set_long(s
, addr
, val
);
2759 vtd_handle_ccmd_write(s
);
2762 /* IOTLB Invalidation Register, 64-bit */
2763 case DMAR_IOTLB_REG
:
2765 vtd_set_long(s
, addr
, val
);
2767 vtd_set_quad(s
, addr
, val
);
2768 vtd_handle_iotlb_write(s
);
2772 case DMAR_IOTLB_REG_HI
:
2774 vtd_set_long(s
, addr
, val
);
2775 vtd_handle_iotlb_write(s
);
2778 /* Invalidate Address Register, 64-bit */
2781 vtd_set_long(s
, addr
, val
);
2783 vtd_set_quad(s
, addr
, val
);
2787 case DMAR_IVA_REG_HI
:
2789 vtd_set_long(s
, addr
, val
);
2792 /* Fault Status Register, 32-bit */
2795 vtd_set_long(s
, addr
, val
);
2796 vtd_handle_fsts_write(s
);
2799 /* Fault Event Control Register, 32-bit */
2800 case DMAR_FECTL_REG
:
2802 vtd_set_long(s
, addr
, val
);
2803 vtd_handle_fectl_write(s
);
2806 /* Fault Event Data Register, 32-bit */
2807 case DMAR_FEDATA_REG
:
2809 vtd_set_long(s
, addr
, val
);
2812 /* Fault Event Address Register, 32-bit */
2813 case DMAR_FEADDR_REG
:
2815 vtd_set_long(s
, addr
, val
);
2818 * While the register is 32-bit only, some guests (Xen...) write to
2821 vtd_set_quad(s
, addr
, val
);
2825 /* Fault Event Upper Address Register, 32-bit */
2826 case DMAR_FEUADDR_REG
:
2828 vtd_set_long(s
, addr
, val
);
2831 /* Protected Memory Enable Register, 32-bit */
2834 vtd_set_long(s
, addr
, val
);
2837 /* Root Table Address Register, 64-bit */
2838 case DMAR_RTADDR_REG
:
2840 vtd_set_long(s
, addr
, val
);
2842 vtd_set_quad(s
, addr
, val
);
2846 case DMAR_RTADDR_REG_HI
:
2848 vtd_set_long(s
, addr
, val
);
2851 /* Invalidation Queue Tail Register, 64-bit */
2854 vtd_set_long(s
, addr
, val
);
2856 vtd_set_quad(s
, addr
, val
);
2858 vtd_handle_iqt_write(s
);
2861 case DMAR_IQT_REG_HI
:
2863 vtd_set_long(s
, addr
, val
);
2864 /* 19:63 of IQT_REG is RsvdZ, do nothing here */
2867 /* Invalidation Queue Address Register, 64-bit */
2870 vtd_set_long(s
, addr
, val
);
2872 vtd_set_quad(s
, addr
, val
);
2874 if (s
->ecap
& VTD_ECAP_SMTS
&&
2875 val
& VTD_IQA_DW_MASK
) {
2882 case DMAR_IQA_REG_HI
:
2884 vtd_set_long(s
, addr
, val
);
2887 /* Invalidation Completion Status Register, 32-bit */
2890 vtd_set_long(s
, addr
, val
);
2891 vtd_handle_ics_write(s
);
2894 /* Invalidation Event Control Register, 32-bit */
2895 case DMAR_IECTL_REG
:
2897 vtd_set_long(s
, addr
, val
);
2898 vtd_handle_iectl_write(s
);
2901 /* Invalidation Event Data Register, 32-bit */
2902 case DMAR_IEDATA_REG
:
2904 vtd_set_long(s
, addr
, val
);
2907 /* Invalidation Event Address Register, 32-bit */
2908 case DMAR_IEADDR_REG
:
2910 vtd_set_long(s
, addr
, val
);
2913 /* Invalidation Event Upper Address Register, 32-bit */
2914 case DMAR_IEUADDR_REG
:
2916 vtd_set_long(s
, addr
, val
);
2919 /* Fault Recording Registers, 128-bit */
2920 case DMAR_FRCD_REG_0_0
:
2922 vtd_set_long(s
, addr
, val
);
2924 vtd_set_quad(s
, addr
, val
);
2928 case DMAR_FRCD_REG_0_1
:
2930 vtd_set_long(s
, addr
, val
);
2933 case DMAR_FRCD_REG_0_2
:
2935 vtd_set_long(s
, addr
, val
);
2937 vtd_set_quad(s
, addr
, val
);
2938 /* May clear bit 127 (Fault), update PPF */
2939 vtd_update_fsts_ppf(s
);
2943 case DMAR_FRCD_REG_0_3
:
2945 vtd_set_long(s
, addr
, val
);
2946 /* May clear bit 127 (Fault), update PPF */
2947 vtd_update_fsts_ppf(s
);
2952 vtd_set_long(s
, addr
, val
);
2954 vtd_set_quad(s
, addr
, val
);
2958 case DMAR_IRTA_REG_HI
:
2960 vtd_set_long(s
, addr
, val
);
2965 vtd_set_long(s
, addr
, val
);
2967 vtd_set_quad(s
, addr
, val
);
2972 static IOMMUTLBEntry
vtd_iommu_translate(IOMMUMemoryRegion
*iommu
, hwaddr addr
,
2973 IOMMUAccessFlags flag
, int iommu_idx
)
2975 VTDAddressSpace
*vtd_as
= container_of(iommu
, VTDAddressSpace
, iommu
);
2976 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
2977 IOMMUTLBEntry iotlb
= {
2978 /* We'll fill in the rest later. */
2979 .target_as
= &address_space_memory
,
2983 if (likely(s
->dmar_enabled
)) {
2984 success
= vtd_do_iommu_translate(vtd_as
, vtd_as
->bus
, vtd_as
->devfn
,
2985 addr
, flag
& IOMMU_WO
, &iotlb
);
2987 /* DMAR disabled, passthrough, use 4k-page*/
2988 iotlb
.iova
= addr
& VTD_PAGE_MASK_4K
;
2989 iotlb
.translated_addr
= addr
& VTD_PAGE_MASK_4K
;
2990 iotlb
.addr_mask
= ~VTD_PAGE_MASK_4K
;
2991 iotlb
.perm
= IOMMU_RW
;
2995 if (likely(success
)) {
2996 trace_vtd_dmar_translate(pci_bus_num(vtd_as
->bus
),
2997 VTD_PCI_SLOT(vtd_as
->devfn
),
2998 VTD_PCI_FUNC(vtd_as
->devfn
),
2999 iotlb
.iova
, iotlb
.translated_addr
,
3002 error_report_once("%s: detected translation failure "
3003 "(dev=%02x:%02x:%02x, iova=0x%" PRIx64
")",
3004 __func__
, pci_bus_num(vtd_as
->bus
),
3005 VTD_PCI_SLOT(vtd_as
->devfn
),
3006 VTD_PCI_FUNC(vtd_as
->devfn
),
3013 static int vtd_iommu_notify_flag_changed(IOMMUMemoryRegion
*iommu
,
3014 IOMMUNotifierFlag old
,
3015 IOMMUNotifierFlag
new,
3018 VTDAddressSpace
*vtd_as
= container_of(iommu
, VTDAddressSpace
, iommu
);
3019 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
3021 /* Update per-address-space notifier flags */
3022 vtd_as
->notifier_flags
= new;
3024 if (old
== IOMMU_NOTIFIER_NONE
) {
3025 QLIST_INSERT_HEAD(&s
->vtd_as_with_notifiers
, vtd_as
, next
);
3026 } else if (new == IOMMU_NOTIFIER_NONE
) {
3027 QLIST_REMOVE(vtd_as
, next
);
3032 static int vtd_post_load(void *opaque
, int version_id
)
3034 IntelIOMMUState
*iommu
= opaque
;
3037 * Memory regions are dynamically turned on/off depending on
3038 * context entry configurations from the guest. After migration,
3039 * we need to make sure the memory regions are still correct.
3041 vtd_switch_address_space_all(iommu
);
3044 * We don't need to migrate the root_scalable because we can
3045 * simply do the calculation after the loading is complete. We
3046 * can actually do similar things with root, dmar_enabled, etc.
3047 * however since we've had them already so we'd better keep them
3048 * for compatibility of migration.
3050 vtd_update_scalable_state(iommu
);
3055 static const VMStateDescription vtd_vmstate
= {
3056 .name
= "iommu-intel",
3058 .minimum_version_id
= 1,
3059 .priority
= MIG_PRI_IOMMU
,
3060 .post_load
= vtd_post_load
,
3061 .fields
= (VMStateField
[]) {
3062 VMSTATE_UINT64(root
, IntelIOMMUState
),
3063 VMSTATE_UINT64(intr_root
, IntelIOMMUState
),
3064 VMSTATE_UINT64(iq
, IntelIOMMUState
),
3065 VMSTATE_UINT32(intr_size
, IntelIOMMUState
),
3066 VMSTATE_UINT16(iq_head
, IntelIOMMUState
),
3067 VMSTATE_UINT16(iq_tail
, IntelIOMMUState
),
3068 VMSTATE_UINT16(iq_size
, IntelIOMMUState
),
3069 VMSTATE_UINT16(next_frcd_reg
, IntelIOMMUState
),
3070 VMSTATE_UINT8_ARRAY(csr
, IntelIOMMUState
, DMAR_REG_SIZE
),
3071 VMSTATE_UINT8(iq_last_desc_type
, IntelIOMMUState
),
3072 VMSTATE_UNUSED(1), /* bool root_extended is obsolete by VT-d */
3073 VMSTATE_BOOL(dmar_enabled
, IntelIOMMUState
),
3074 VMSTATE_BOOL(qi_enabled
, IntelIOMMUState
),
3075 VMSTATE_BOOL(intr_enabled
, IntelIOMMUState
),
3076 VMSTATE_BOOL(intr_eime
, IntelIOMMUState
),
3077 VMSTATE_END_OF_LIST()
3081 static const MemoryRegionOps vtd_mem_ops
= {
3082 .read
= vtd_mem_read
,
3083 .write
= vtd_mem_write
,
3084 .endianness
= DEVICE_LITTLE_ENDIAN
,
3086 .min_access_size
= 4,
3087 .max_access_size
= 8,
3090 .min_access_size
= 4,
3091 .max_access_size
= 8,
3095 static Property vtd_properties
[] = {
3096 DEFINE_PROP_UINT32("version", IntelIOMMUState
, version
, 0),
3097 DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState
, intr_eim
,
3099 DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState
, buggy_eim
, false),
3100 DEFINE_PROP_UINT8("aw-bits", IntelIOMMUState
, aw_bits
,
3101 VTD_HOST_ADDRESS_WIDTH
),
3102 DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState
, caching_mode
, FALSE
),
3103 DEFINE_PROP_BOOL("x-scalable-mode", IntelIOMMUState
, scalable_mode
, FALSE
),
3104 DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState
, dma_drain
, true),
3105 DEFINE_PROP_END_OF_LIST(),
3108 /* Read IRTE entry with specific index */
3109 static int vtd_irte_get(IntelIOMMUState
*iommu
, uint16_t index
,
3110 VTD_IR_TableEntry
*entry
, uint16_t sid
)
3112 static const uint16_t vtd_svt_mask
[VTD_SQ_MAX
] = \
3113 {0xffff, 0xfffb, 0xfff9, 0xfff8};
3114 dma_addr_t addr
= 0x00;
3115 uint16_t mask
, source_id
;
3116 uint8_t bus
, bus_max
, bus_min
;
3118 if (index
>= iommu
->intr_size
) {
3119 error_report_once("%s: index too large: ind=0x%x",
3121 return -VTD_FR_IR_INDEX_OVER
;
3124 addr
= iommu
->intr_root
+ index
* sizeof(*entry
);
3125 if (dma_memory_read(&address_space_memory
, addr
, entry
,
3127 error_report_once("%s: read failed: ind=0x%x addr=0x%" PRIx64
,
3128 __func__
, index
, addr
);
3129 return -VTD_FR_IR_ROOT_INVAL
;
3132 trace_vtd_ir_irte_get(index
, le64_to_cpu(entry
->data
[1]),
3133 le64_to_cpu(entry
->data
[0]));
3135 if (!entry
->irte
.present
) {
3136 error_report_once("%s: detected non-present IRTE "
3137 "(index=%u, high=0x%" PRIx64
", low=0x%" PRIx64
")",
3138 __func__
, index
, le64_to_cpu(entry
->data
[1]),
3139 le64_to_cpu(entry
->data
[0]));
3140 return -VTD_FR_IR_ENTRY_P
;
3143 if (entry
->irte
.__reserved_0
|| entry
->irte
.__reserved_1
||
3144 entry
->irte
.__reserved_2
) {
3145 error_report_once("%s: detected non-zero reserved IRTE "
3146 "(index=%u, high=0x%" PRIx64
", low=0x%" PRIx64
")",
3147 __func__
, index
, le64_to_cpu(entry
->data
[1]),
3148 le64_to_cpu(entry
->data
[0]));
3149 return -VTD_FR_IR_IRTE_RSVD
;
3152 if (sid
!= X86_IOMMU_SID_INVALID
) {
3153 /* Validate IRTE SID */
3154 source_id
= le32_to_cpu(entry
->irte
.source_id
);
3155 switch (entry
->irte
.sid_vtype
) {
3160 mask
= vtd_svt_mask
[entry
->irte
.sid_q
];
3161 if ((source_id
& mask
) != (sid
& mask
)) {
3162 error_report_once("%s: invalid IRTE SID "
3163 "(index=%u, sid=%u, source_id=%u)",
3164 __func__
, index
, sid
, source_id
);
3165 return -VTD_FR_IR_SID_ERR
;
3170 bus_max
= source_id
>> 8;
3171 bus_min
= source_id
& 0xff;
3173 if (bus
> bus_max
|| bus
< bus_min
) {
3174 error_report_once("%s: invalid SVT_BUS "
3175 "(index=%u, bus=%u, min=%u, max=%u)",
3176 __func__
, index
, bus
, bus_min
, bus_max
);
3177 return -VTD_FR_IR_SID_ERR
;
3182 error_report_once("%s: detected invalid IRTE SVT "
3183 "(index=%u, type=%d)", __func__
,
3184 index
, entry
->irte
.sid_vtype
);
3185 /* Take this as verification failure. */
3186 return -VTD_FR_IR_SID_ERR
;
3193 /* Fetch IRQ information of specific IR index */
3194 static int vtd_remap_irq_get(IntelIOMMUState
*iommu
, uint16_t index
,
3195 X86IOMMUIrq
*irq
, uint16_t sid
)
3197 VTD_IR_TableEntry irte
= {};
3200 ret
= vtd_irte_get(iommu
, index
, &irte
, sid
);
3205 irq
->trigger_mode
= irte
.irte
.trigger_mode
;
3206 irq
->vector
= irte
.irte
.vector
;
3207 irq
->delivery_mode
= irte
.irte
.delivery_mode
;
3208 irq
->dest
= le32_to_cpu(irte
.irte
.dest_id
);
3209 if (!iommu
->intr_eime
) {
3210 #define VTD_IR_APIC_DEST_MASK (0xff00ULL)
3211 #define VTD_IR_APIC_DEST_SHIFT (8)
3212 irq
->dest
= (irq
->dest
& VTD_IR_APIC_DEST_MASK
) >>
3213 VTD_IR_APIC_DEST_SHIFT
;
3215 irq
->dest_mode
= irte
.irte
.dest_mode
;
3216 irq
->redir_hint
= irte
.irte
.redir_hint
;
3218 trace_vtd_ir_remap(index
, irq
->trigger_mode
, irq
->vector
,
3219 irq
->delivery_mode
, irq
->dest
, irq
->dest_mode
);
3224 /* Interrupt remapping for MSI/MSI-X entry */
3225 static int vtd_interrupt_remap_msi(IntelIOMMUState
*iommu
,
3227 MSIMessage
*translated
,
3231 VTD_IR_MSIAddress addr
;
3233 X86IOMMUIrq irq
= {};
3235 assert(origin
&& translated
);
3237 trace_vtd_ir_remap_msi_req(origin
->address
, origin
->data
);
3239 if (!iommu
|| !iommu
->intr_enabled
) {
3240 memcpy(translated
, origin
, sizeof(*origin
));
3244 if (origin
->address
& VTD_MSI_ADDR_HI_MASK
) {
3245 error_report_once("%s: MSI address high 32 bits non-zero detected: "
3246 "address=0x%" PRIx64
, __func__
, origin
->address
);
3247 return -VTD_FR_IR_REQ_RSVD
;
3250 addr
.data
= origin
->address
& VTD_MSI_ADDR_LO_MASK
;
3251 if (addr
.addr
.__head
!= 0xfee) {
3252 error_report_once("%s: MSI address low 32 bit invalid: 0x%" PRIx32
,
3253 __func__
, addr
.data
);
3254 return -VTD_FR_IR_REQ_RSVD
;
3257 /* This is compatible mode. */
3258 if (addr
.addr
.int_mode
!= VTD_IR_INT_FORMAT_REMAP
) {
3259 memcpy(translated
, origin
, sizeof(*origin
));
3263 index
= addr
.addr
.index_h
<< 15 | le16_to_cpu(addr
.addr
.index_l
);
3265 #define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff)
3266 #define VTD_IR_MSI_DATA_RESERVED (0xffff0000)
3268 if (addr
.addr
.sub_valid
) {
3269 /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */
3270 index
+= origin
->data
& VTD_IR_MSI_DATA_SUBHANDLE
;
3273 ret
= vtd_remap_irq_get(iommu
, index
, &irq
, sid
);
3278 if (addr
.addr
.sub_valid
) {
3279 trace_vtd_ir_remap_type("MSI");
3280 if (origin
->data
& VTD_IR_MSI_DATA_RESERVED
) {
3281 error_report_once("%s: invalid IR MSI "
3282 "(sid=%u, address=0x%" PRIx64
3283 ", data=0x%" PRIx32
")",
3284 __func__
, sid
, origin
->address
, origin
->data
);
3285 return -VTD_FR_IR_REQ_RSVD
;
3288 uint8_t vector
= origin
->data
& 0xff;
3289 uint8_t trigger_mode
= (origin
->data
>> MSI_DATA_TRIGGER_SHIFT
) & 0x1;
3291 trace_vtd_ir_remap_type("IOAPIC");
3292 /* IOAPIC entry vector should be aligned with IRTE vector
3293 * (see vt-d spec 5.1.5.1). */
3294 if (vector
!= irq
.vector
) {
3295 trace_vtd_warn_ir_vector(sid
, index
, vector
, irq
.vector
);
3298 /* The Trigger Mode field must match the Trigger Mode in the IRTE.
3299 * (see vt-d spec 5.1.5.1). */
3300 if (trigger_mode
!= irq
.trigger_mode
) {
3301 trace_vtd_warn_ir_trigger(sid
, index
, trigger_mode
,
3307 * We'd better keep the last two bits, assuming that guest OS
3308 * might modify it. Keep it does not hurt after all.
3310 irq
.msi_addr_last_bits
= addr
.addr
.__not_care
;
3312 /* Translate X86IOMMUIrq to MSI message */
3313 x86_iommu_irq_to_msi_message(&irq
, translated
);
3316 trace_vtd_ir_remap_msi(origin
->address
, origin
->data
,
3317 translated
->address
, translated
->data
);
3321 static int vtd_int_remap(X86IOMMUState
*iommu
, MSIMessage
*src
,
3322 MSIMessage
*dst
, uint16_t sid
)
3324 return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu
),
3328 static MemTxResult
vtd_mem_ir_read(void *opaque
, hwaddr addr
,
3329 uint64_t *data
, unsigned size
,
3335 static MemTxResult
vtd_mem_ir_write(void *opaque
, hwaddr addr
,
3336 uint64_t value
, unsigned size
,
3340 MSIMessage from
= {}, to
= {};
3341 uint16_t sid
= X86_IOMMU_SID_INVALID
;
3343 from
.address
= (uint64_t) addr
+ VTD_INTERRUPT_ADDR_FIRST
;
3344 from
.data
= (uint32_t) value
;
3346 if (!attrs
.unspecified
) {
3347 /* We have explicit Source ID */
3348 sid
= attrs
.requester_id
;
3351 ret
= vtd_interrupt_remap_msi(opaque
, &from
, &to
, sid
);
3353 /* TODO: report error */
3354 /* Drop this interrupt */
3358 apic_get_class()->send_msi(&to
);
3363 static const MemoryRegionOps vtd_mem_ir_ops
= {
3364 .read_with_attrs
= vtd_mem_ir_read
,
3365 .write_with_attrs
= vtd_mem_ir_write
,
3366 .endianness
= DEVICE_LITTLE_ENDIAN
,
3368 .min_access_size
= 4,
3369 .max_access_size
= 4,
3372 .min_access_size
= 4,
3373 .max_access_size
= 4,
3377 VTDAddressSpace
*vtd_find_add_as(IntelIOMMUState
*s
, PCIBus
*bus
, int devfn
)
3379 uintptr_t key
= (uintptr_t)bus
;
3380 VTDBus
*vtd_bus
= g_hash_table_lookup(s
->vtd_as_by_busptr
, &key
);
3381 VTDAddressSpace
*vtd_dev_as
;
3385 uintptr_t *new_key
= g_malloc(sizeof(*new_key
));
3386 *new_key
= (uintptr_t)bus
;
3387 /* No corresponding free() */
3388 vtd_bus
= g_malloc0(sizeof(VTDBus
) + sizeof(VTDAddressSpace
*) * \
3391 g_hash_table_insert(s
->vtd_as_by_busptr
, new_key
, vtd_bus
);
3394 vtd_dev_as
= vtd_bus
->dev_as
[devfn
];
3397 snprintf(name
, sizeof(name
), "vtd-%02x.%x", PCI_SLOT(devfn
),
3399 vtd_bus
->dev_as
[devfn
] = vtd_dev_as
= g_malloc0(sizeof(VTDAddressSpace
));
3401 vtd_dev_as
->bus
= bus
;
3402 vtd_dev_as
->devfn
= (uint8_t)devfn
;
3403 vtd_dev_as
->iommu_state
= s
;
3404 vtd_dev_as
->context_cache_entry
.context_cache_gen
= 0;
3405 vtd_dev_as
->iova_tree
= iova_tree_new();
3407 memory_region_init(&vtd_dev_as
->root
, OBJECT(s
), name
, UINT64_MAX
);
3408 address_space_init(&vtd_dev_as
->as
, &vtd_dev_as
->root
, "vtd-root");
3411 * Build the DMAR-disabled container with aliases to the
3412 * shared MRs. Note that aliasing to a shared memory region
3413 * could help the memory API to detect same FlatViews so we
3414 * can have devices to share the same FlatView when DMAR is
3415 * disabled (either by not providing "intel_iommu=on" or with
3416 * "iommu=pt"). It will greatly reduce the total number of
3417 * FlatViews of the system hence VM runs faster.
3419 memory_region_init_alias(&vtd_dev_as
->nodmar
, OBJECT(s
),
3420 "vtd-nodmar", &s
->mr_nodmar
, 0,
3421 memory_region_size(&s
->mr_nodmar
));
3424 * Build the per-device DMAR-enabled container.
3426 * TODO: currently we have per-device IOMMU memory region only
3427 * because we have per-device IOMMU notifiers for devices. If
3428 * one day we can abstract the IOMMU notifiers out of the
3429 * memory regions then we can also share the same memory
3430 * region here just like what we've done above with the nodmar
3433 strcat(name
, "-dmar");
3434 memory_region_init_iommu(&vtd_dev_as
->iommu
, sizeof(vtd_dev_as
->iommu
),
3435 TYPE_INTEL_IOMMU_MEMORY_REGION
, OBJECT(s
),
3437 memory_region_init_alias(&vtd_dev_as
->iommu_ir
, OBJECT(s
), "vtd-ir",
3438 &s
->mr_ir
, 0, memory_region_size(&s
->mr_ir
));
3439 memory_region_add_subregion_overlap(MEMORY_REGION(&vtd_dev_as
->iommu
),
3440 VTD_INTERRUPT_ADDR_FIRST
,
3441 &vtd_dev_as
->iommu_ir
, 1);
3444 * Hook both the containers under the root container, we
3445 * switch between DMAR & noDMAR by enable/disable
3446 * corresponding sub-containers
3448 memory_region_add_subregion_overlap(&vtd_dev_as
->root
, 0,
3449 MEMORY_REGION(&vtd_dev_as
->iommu
),
3451 memory_region_add_subregion_overlap(&vtd_dev_as
->root
, 0,
3452 &vtd_dev_as
->nodmar
, 0);
3454 vtd_switch_address_space(vtd_dev_as
);
3459 /* Unmap the whole range in the notifier's scope. */
3460 static void vtd_address_space_unmap(VTDAddressSpace
*as
, IOMMUNotifier
*n
)
3462 hwaddr size
, remain
;
3463 hwaddr start
= n
->start
;
3464 hwaddr end
= n
->end
;
3465 IntelIOMMUState
*s
= as
->iommu_state
;
3469 * Note: all the codes in this function has a assumption that IOVA
3470 * bits are no more than VTD_MGAW bits (which is restricted by
3471 * VT-d spec), otherwise we need to consider overflow of 64 bits.
3474 if (end
> VTD_ADDRESS_SIZE(s
->aw_bits
) - 1) {
3476 * Don't need to unmap regions that is bigger than the whole
3477 * VT-d supported address space size
3479 end
= VTD_ADDRESS_SIZE(s
->aw_bits
) - 1;
3482 assert(start
<= end
);
3483 size
= remain
= end
- start
+ 1;
3485 while (remain
>= VTD_PAGE_SIZE
) {
3486 IOMMUTLBEvent event
;
3487 uint64_t mask
= dma_aligned_pow2_mask(start
, end
, s
->aw_bits
);
3488 uint64_t size
= mask
+ 1;
3492 event
.type
= IOMMU_NOTIFIER_UNMAP
;
3493 event
.entry
.iova
= start
;
3494 event
.entry
.addr_mask
= mask
;
3495 event
.entry
.target_as
= &address_space_memory
;
3496 event
.entry
.perm
= IOMMU_NONE
;
3497 /* This field is meaningless for unmap */
3498 event
.entry
.translated_addr
= 0;
3500 memory_region_notify_iommu_one(n
, &event
);
3508 trace_vtd_as_unmap_whole(pci_bus_num(as
->bus
),
3509 VTD_PCI_SLOT(as
->devfn
),
3510 VTD_PCI_FUNC(as
->devfn
),
3513 map
.iova
= n
->start
;
3515 iova_tree_remove(as
->iova_tree
, &map
);
3518 static void vtd_address_space_unmap_all(IntelIOMMUState
*s
)
3520 VTDAddressSpace
*vtd_as
;
3523 QLIST_FOREACH(vtd_as
, &s
->vtd_as_with_notifiers
, next
) {
3524 IOMMU_NOTIFIER_FOREACH(n
, &vtd_as
->iommu
) {
3525 vtd_address_space_unmap(vtd_as
, n
);
3530 static void vtd_address_space_refresh_all(IntelIOMMUState
*s
)
3532 vtd_address_space_unmap_all(s
);
3533 vtd_switch_address_space_all(s
);
3536 static int vtd_replay_hook(IOMMUTLBEvent
*event
, void *private)
3538 memory_region_notify_iommu_one(private, event
);
3542 static void vtd_iommu_replay(IOMMUMemoryRegion
*iommu_mr
, IOMMUNotifier
*n
)
3544 VTDAddressSpace
*vtd_as
= container_of(iommu_mr
, VTDAddressSpace
, iommu
);
3545 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
3546 uint8_t bus_n
= pci_bus_num(vtd_as
->bus
);
3550 * The replay can be triggered by either a invalidation or a newly
3551 * created entry. No matter what, we release existing mappings
3552 * (it means flushing caches for UNMAP-only registers).
3554 vtd_address_space_unmap(vtd_as
, n
);
3556 if (vtd_dev_to_context_entry(s
, bus_n
, vtd_as
->devfn
, &ce
) == 0) {
3557 trace_vtd_replay_ce_valid(s
->root_scalable
? "scalable mode" :
3559 bus_n
, PCI_SLOT(vtd_as
->devfn
),
3560 PCI_FUNC(vtd_as
->devfn
),
3561 vtd_get_domain_id(s
, &ce
),
3563 if (vtd_as_has_map_notifier(vtd_as
)) {
3564 /* This is required only for MAP typed notifiers */
3565 vtd_page_walk_info info
= {
3566 .hook_fn
= vtd_replay_hook
,
3567 .private = (void *)n
,
3568 .notify_unmap
= false,
3571 .domain_id
= vtd_get_domain_id(s
, &ce
),
3574 vtd_page_walk(s
, &ce
, 0, ~0ULL, &info
);
3577 trace_vtd_replay_ce_invalid(bus_n
, PCI_SLOT(vtd_as
->devfn
),
3578 PCI_FUNC(vtd_as
->devfn
));
3584 /* Do the initialization. It will also be called when reset, so pay
3585 * attention when adding new initialization stuff.
3587 static void vtd_init(IntelIOMMUState
*s
)
3589 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
3591 memset(s
->csr
, 0, DMAR_REG_SIZE
);
3592 memset(s
->wmask
, 0, DMAR_REG_SIZE
);
3593 memset(s
->w1cmask
, 0, DMAR_REG_SIZE
);
3594 memset(s
->womask
, 0, DMAR_REG_SIZE
);
3597 s
->root_scalable
= false;
3598 s
->dmar_enabled
= false;
3599 s
->intr_enabled
= false;
3604 s
->qi_enabled
= false;
3605 s
->iq_last_desc_type
= VTD_INV_DESC_NONE
;
3607 s
->next_frcd_reg
= 0;
3608 s
->cap
= VTD_CAP_FRO
| VTD_CAP_NFR
| VTD_CAP_ND
|
3609 VTD_CAP_MAMV
| VTD_CAP_PSI
| VTD_CAP_SLLPS
|
3610 VTD_CAP_SAGAW_39bit
| VTD_CAP_MGAW(s
->aw_bits
);
3612 s
->cap
|= VTD_CAP_DRAIN
;
3614 if (s
->aw_bits
== VTD_HOST_AW_48BIT
) {
3615 s
->cap
|= VTD_CAP_SAGAW_48bit
;
3617 s
->ecap
= VTD_ECAP_QI
| VTD_ECAP_IRO
;
3620 * Rsvd field masks for spte
3622 vtd_spte_rsvd
[0] = ~0ULL;
3623 vtd_spte_rsvd
[1] = VTD_SPTE_PAGE_L1_RSVD_MASK(s
->aw_bits
,
3624 x86_iommu
->dt_supported
);
3625 vtd_spte_rsvd
[2] = VTD_SPTE_PAGE_L2_RSVD_MASK(s
->aw_bits
);
3626 vtd_spte_rsvd
[3] = VTD_SPTE_PAGE_L3_RSVD_MASK(s
->aw_bits
);
3627 vtd_spte_rsvd
[4] = VTD_SPTE_PAGE_L4_RSVD_MASK(s
->aw_bits
);
3629 vtd_spte_rsvd_large
[2] = VTD_SPTE_LPAGE_L2_RSVD_MASK(s
->aw_bits
,
3630 x86_iommu
->dt_supported
);
3631 vtd_spte_rsvd_large
[3] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s
->aw_bits
,
3632 x86_iommu
->dt_supported
);
3634 if (x86_iommu_ir_supported(x86_iommu
)) {
3635 s
->ecap
|= VTD_ECAP_IR
| VTD_ECAP_MHMV
;
3636 if (s
->intr_eim
== ON_OFF_AUTO_ON
) {
3637 s
->ecap
|= VTD_ECAP_EIM
;
3639 assert(s
->intr_eim
!= ON_OFF_AUTO_AUTO
);
3642 if (x86_iommu
->dt_supported
) {
3643 s
->ecap
|= VTD_ECAP_DT
;
3646 if (x86_iommu
->pt_supported
) {
3647 s
->ecap
|= VTD_ECAP_PT
;
3650 if (s
->caching_mode
) {
3651 s
->cap
|= VTD_CAP_CM
;
3654 /* TODO: read cap/ecap from host to decide which cap to be exposed. */
3655 if (s
->scalable_mode
) {
3656 s
->ecap
|= VTD_ECAP_SMTS
| VTD_ECAP_SRS
| VTD_ECAP_SLTS
;
3659 vtd_reset_caches(s
);
3661 /* Define registers with default values and bit semantics */
3662 vtd_define_long(s
, DMAR_VER_REG
, 0x10UL
, 0, 0);
3663 vtd_define_quad(s
, DMAR_CAP_REG
, s
->cap
, 0, 0);
3664 vtd_define_quad(s
, DMAR_ECAP_REG
, s
->ecap
, 0, 0);
3665 vtd_define_long(s
, DMAR_GCMD_REG
, 0, 0xff800000UL
, 0);
3666 vtd_define_long_wo(s
, DMAR_GCMD_REG
, 0xff800000UL
);
3667 vtd_define_long(s
, DMAR_GSTS_REG
, 0, 0, 0);
3668 vtd_define_quad(s
, DMAR_RTADDR_REG
, 0, 0xfffffffffffffc00ULL
, 0);
3669 vtd_define_quad(s
, DMAR_CCMD_REG
, 0, 0xe0000003ffffffffULL
, 0);
3670 vtd_define_quad_wo(s
, DMAR_CCMD_REG
, 0x3ffff0000ULL
);
3672 /* Advanced Fault Logging not supported */
3673 vtd_define_long(s
, DMAR_FSTS_REG
, 0, 0, 0x11UL
);
3674 vtd_define_long(s
, DMAR_FECTL_REG
, 0x80000000UL
, 0x80000000UL
, 0);
3675 vtd_define_long(s
, DMAR_FEDATA_REG
, 0, 0x0000ffffUL
, 0);
3676 vtd_define_long(s
, DMAR_FEADDR_REG
, 0, 0xfffffffcUL
, 0);
3678 /* Treated as RsvdZ when EIM in ECAP_REG is not supported
3679 * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0);
3681 vtd_define_long(s
, DMAR_FEUADDR_REG
, 0, 0, 0);
3683 /* Treated as RO for implementations that PLMR and PHMR fields reported
3684 * as Clear in the CAP_REG.
3685 * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0);
3687 vtd_define_long(s
, DMAR_PMEN_REG
, 0, 0, 0);
3689 vtd_define_quad(s
, DMAR_IQH_REG
, 0, 0, 0);
3690 vtd_define_quad(s
, DMAR_IQT_REG
, 0, 0x7fff0ULL
, 0);
3691 vtd_define_quad(s
, DMAR_IQA_REG
, 0, 0xfffffffffffff807ULL
, 0);
3692 vtd_define_long(s
, DMAR_ICS_REG
, 0, 0, 0x1UL
);
3693 vtd_define_long(s
, DMAR_IECTL_REG
, 0x80000000UL
, 0x80000000UL
, 0);
3694 vtd_define_long(s
, DMAR_IEDATA_REG
, 0, 0xffffffffUL
, 0);
3695 vtd_define_long(s
, DMAR_IEADDR_REG
, 0, 0xfffffffcUL
, 0);
3696 /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */
3697 vtd_define_long(s
, DMAR_IEUADDR_REG
, 0, 0, 0);
3699 /* IOTLB registers */
3700 vtd_define_quad(s
, DMAR_IOTLB_REG
, 0, 0Xb003ffff00000000ULL
, 0);
3701 vtd_define_quad(s
, DMAR_IVA_REG
, 0, 0xfffffffffffff07fULL
, 0);
3702 vtd_define_quad_wo(s
, DMAR_IVA_REG
, 0xfffffffffffff07fULL
);
3704 /* Fault Recording Registers, 128-bit */
3705 vtd_define_quad(s
, DMAR_FRCD_REG_0_0
, 0, 0, 0);
3706 vtd_define_quad(s
, DMAR_FRCD_REG_0_2
, 0, 0, 0x8000000000000000ULL
);
3709 * Interrupt remapping registers.
3711 vtd_define_quad(s
, DMAR_IRTA_REG
, 0, 0xfffffffffffff80fULL
, 0);
3714 /* Should not reset address_spaces when reset because devices will still use
3715 * the address space they got at first (won't ask the bus again).
3717 static void vtd_reset(DeviceState
*dev
)
3719 IntelIOMMUState
*s
= INTEL_IOMMU_DEVICE(dev
);
3722 vtd_address_space_refresh_all(s
);
3725 static AddressSpace
*vtd_host_dma_iommu(PCIBus
*bus
, void *opaque
, int devfn
)
3727 IntelIOMMUState
*s
= opaque
;
3728 VTDAddressSpace
*vtd_as
;
3730 assert(0 <= devfn
&& devfn
< PCI_DEVFN_MAX
);
3732 vtd_as
= vtd_find_add_as(s
, bus
, devfn
);
3736 static bool vtd_decide_config(IntelIOMMUState
*s
, Error
**errp
)
3738 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
3740 if (s
->intr_eim
== ON_OFF_AUTO_ON
&& !x86_iommu_ir_supported(x86_iommu
)) {
3741 error_setg(errp
, "eim=on cannot be selected without intremap=on");
3745 if (s
->intr_eim
== ON_OFF_AUTO_AUTO
) {
3746 s
->intr_eim
= (kvm_irqchip_in_kernel() || s
->buggy_eim
)
3747 && x86_iommu_ir_supported(x86_iommu
) ?
3748 ON_OFF_AUTO_ON
: ON_OFF_AUTO_OFF
;
3750 if (s
->intr_eim
== ON_OFF_AUTO_ON
&& !s
->buggy_eim
) {
3751 if (!kvm_irqchip_in_kernel()) {
3752 error_setg(errp
, "eim=on requires accel=kvm,kernel-irqchip=split");
3755 if (!kvm_enable_x2apic()) {
3756 error_setg(errp
, "eim=on requires support on the KVM side"
3757 "(X2APIC_API, first shipped in v4.7)");
3762 /* Currently only address widths supported are 39 and 48 bits */
3763 if ((s
->aw_bits
!= VTD_HOST_AW_39BIT
) &&
3764 (s
->aw_bits
!= VTD_HOST_AW_48BIT
)) {
3765 error_setg(errp
, "Supported values for aw-bits are: %d, %d",
3766 VTD_HOST_AW_39BIT
, VTD_HOST_AW_48BIT
);
3770 if (s
->scalable_mode
&& !s
->dma_drain
) {
3771 error_setg(errp
, "Need to set dma_drain for scalable mode");
3778 static int vtd_machine_done_notify_one(Object
*child
, void *unused
)
3780 IntelIOMMUState
*iommu
= INTEL_IOMMU_DEVICE(x86_iommu_get_default());
3783 * We hard-coded here because vfio-pci is the only special case
3784 * here. Let's be more elegant in the future when we can, but so
3785 * far there seems to be no better way.
3787 if (object_dynamic_cast(child
, "vfio-pci") && !iommu
->caching_mode
) {
3788 vtd_panic_require_caching_mode();
3794 static void vtd_machine_done_hook(Notifier
*notifier
, void *unused
)
3796 object_child_foreach_recursive(object_get_root(),
3797 vtd_machine_done_notify_one
, NULL
);
3800 static Notifier vtd_machine_done_notify
= {
3801 .notify
= vtd_machine_done_hook
,
3804 static void vtd_realize(DeviceState
*dev
, Error
**errp
)
3806 MachineState
*ms
= MACHINE(qdev_get_machine());
3807 PCMachineState
*pcms
= PC_MACHINE(ms
);
3808 X86MachineState
*x86ms
= X86_MACHINE(ms
);
3809 PCIBus
*bus
= pcms
->bus
;
3810 IntelIOMMUState
*s
= INTEL_IOMMU_DEVICE(dev
);
3811 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(dev
);
3813 x86_iommu
->type
= TYPE_INTEL
;
3815 if (!vtd_decide_config(s
, errp
)) {
3819 QLIST_INIT(&s
->vtd_as_with_notifiers
);
3820 qemu_mutex_init(&s
->iommu_lock
);
3821 memset(s
->vtd_as_by_bus_num
, 0, sizeof(s
->vtd_as_by_bus_num
));
3822 memory_region_init_io(&s
->csrmem
, OBJECT(s
), &vtd_mem_ops
, s
,
3823 "intel_iommu", DMAR_REG_SIZE
);
3825 /* Create the shared memory regions by all devices */
3826 memory_region_init(&s
->mr_nodmar
, OBJECT(s
), "vtd-nodmar",
3828 memory_region_init_io(&s
->mr_ir
, OBJECT(s
), &vtd_mem_ir_ops
,
3829 s
, "vtd-ir", VTD_INTERRUPT_ADDR_SIZE
);
3830 memory_region_init_alias(&s
->mr_sys_alias
, OBJECT(s
),
3831 "vtd-sys-alias", get_system_memory(), 0,
3832 memory_region_size(get_system_memory()));
3833 memory_region_add_subregion_overlap(&s
->mr_nodmar
, 0,
3834 &s
->mr_sys_alias
, 0);
3835 memory_region_add_subregion_overlap(&s
->mr_nodmar
,
3836 VTD_INTERRUPT_ADDR_FIRST
,
3839 sysbus_init_mmio(SYS_BUS_DEVICE(s
), &s
->csrmem
);
3840 /* No corresponding destroy */
3841 s
->iotlb
= g_hash_table_new_full(vtd_uint64_hash
, vtd_uint64_equal
,
3843 s
->vtd_as_by_busptr
= g_hash_table_new_full(vtd_uint64_hash
, vtd_uint64_equal
,
3846 sysbus_mmio_map(SYS_BUS_DEVICE(s
), 0, Q35_HOST_BRIDGE_IOMMU_ADDR
);
3847 pci_setup_iommu(bus
, vtd_host_dma_iommu
, dev
);
3848 /* Pseudo address space under root PCI bus. */
3849 x86ms
->ioapic_as
= vtd_host_dma_iommu(bus
, s
, Q35_PSEUDO_DEVFN_IOAPIC
);
3850 qemu_add_machine_init_done_notifier(&vtd_machine_done_notify
);
3853 static void vtd_class_init(ObjectClass
*klass
, void *data
)
3855 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3856 X86IOMMUClass
*x86_class
= X86_IOMMU_DEVICE_CLASS(klass
);
3858 dc
->reset
= vtd_reset
;
3859 dc
->vmsd
= &vtd_vmstate
;
3860 device_class_set_props(dc
, vtd_properties
);
3861 dc
->hotpluggable
= false;
3862 x86_class
->realize
= vtd_realize
;
3863 x86_class
->int_remap
= vtd_int_remap
;
3864 /* Supported by the pc-q35-* machine types */
3865 dc
->user_creatable
= true;
3866 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
3867 dc
->desc
= "Intel IOMMU (VT-d) DMA Remapping device";
3870 static const TypeInfo vtd_info
= {
3871 .name
= TYPE_INTEL_IOMMU_DEVICE
,
3872 .parent
= TYPE_X86_IOMMU_DEVICE
,
3873 .instance_size
= sizeof(IntelIOMMUState
),
3874 .class_init
= vtd_class_init
,
3877 static void vtd_iommu_memory_region_class_init(ObjectClass
*klass
,
3880 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_CLASS(klass
);
3882 imrc
->translate
= vtd_iommu_translate
;
3883 imrc
->notify_flag_changed
= vtd_iommu_notify_flag_changed
;
3884 imrc
->replay
= vtd_iommu_replay
;
3887 static const TypeInfo vtd_iommu_memory_region_info
= {
3888 .parent
= TYPE_IOMMU_MEMORY_REGION
,
3889 .name
= TYPE_INTEL_IOMMU_MEMORY_REGION
,
3890 .class_init
= vtd_iommu_memory_region_class_init
,
3893 static void vtd_register_types(void)
3895 type_register_static(&vtd_info
);
3896 type_register_static(&vtd_iommu_memory_region_info
);
3899 type_init(vtd_register_types
)