2 * QEMU emulation of AMD IOMMU (AMD-Vi)
4 * Copyright (C) 2011 Eduard - Gabriel Munteanu
5 * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, see <http://www.gnu.org/licenses/>.
20 * Cache implementation inspired by hw/i386/intel_iommu.c
22 #include "qemu/osdep.h"
23 #include "hw/i386/amd_iommu.h"
24 #include "qapi/error.h"
25 #include "qemu/error-report.h"
28 /* used AMD-Vi MMIO registers */
29 const char *amdvi_mmio_low
[] = {
30 "AMDVI_MMIO_DEVTAB_BASE",
31 "AMDVI_MMIO_CMDBUF_BASE",
32 "AMDVI_MMIO_EVTLOG_BASE",
34 "AMDVI_MMIO_EXCL_BASE",
35 "AMDVI_MMIO_EXCL_LIMIT",
36 "AMDVI_MMIO_EXT_FEATURES",
37 "AMDVI_MMIO_PPR_BASE",
40 const char *amdvi_mmio_high
[] = {
41 "AMDVI_MMIO_COMMAND_HEAD",
42 "AMDVI_MMIO_COMMAND_TAIL",
43 "AMDVI_MMIO_EVTLOG_HEAD",
44 "AMDVI_MMIO_EVTLOG_TAIL",
46 "AMDVI_MMIO_PPR_HEAD",
47 "AMDVI_MMIO_PPR_TAIL",
51 struct AMDVIAddressSpace
{
52 uint8_t bus_num
; /* bus number */
53 uint8_t devfn
; /* device function */
54 AMDVIState
*iommu_state
; /* AMDVI - one per machine */
55 IOMMUMemoryRegion iommu
; /* Device's address translation region */
56 MemoryRegion iommu_ir
; /* Device's interrupt remapping region */
57 AddressSpace as
; /* device's corresponding address space */
60 /* AMDVI cache entry */
61 typedef struct AMDVIIOTLBEntry
{
62 uint16_t domid
; /* assigned domain id */
63 uint16_t devid
; /* device owning entry */
64 uint64_t perms
; /* access permissions */
65 uint64_t translated_addr
; /* translated address */
66 uint64_t page_mask
; /* physical page size */
69 /* configure MMIO registers at startup/reset */
70 static void amdvi_set_quad(AMDVIState
*s
, hwaddr addr
, uint64_t val
,
71 uint64_t romask
, uint64_t w1cmask
)
73 stq_le_p(&s
->mmior
[addr
], val
);
74 stq_le_p(&s
->romask
[addr
], romask
);
75 stq_le_p(&s
->w1cmask
[addr
], w1cmask
);
78 static uint16_t amdvi_readw(AMDVIState
*s
, hwaddr addr
)
80 return lduw_le_p(&s
->mmior
[addr
]);
83 static uint32_t amdvi_readl(AMDVIState
*s
, hwaddr addr
)
85 return ldl_le_p(&s
->mmior
[addr
]);
88 static uint64_t amdvi_readq(AMDVIState
*s
, hwaddr addr
)
90 return ldq_le_p(&s
->mmior
[addr
]);
94 static void amdvi_writeq_raw(AMDVIState
*s
, uint64_t val
, hwaddr addr
)
96 stq_le_p(&s
->mmior
[addr
], val
);
100 static void amdvi_writew(AMDVIState
*s
, hwaddr addr
, uint16_t val
)
102 uint16_t romask
= lduw_le_p(&s
->romask
[addr
]);
103 uint16_t w1cmask
= lduw_le_p(&s
->w1cmask
[addr
]);
104 uint16_t oldval
= lduw_le_p(&s
->mmior
[addr
]);
105 stw_le_p(&s
->mmior
[addr
],
106 ((oldval
& romask
) | (val
& ~romask
)) & ~(val
& w1cmask
));
109 static void amdvi_writel(AMDVIState
*s
, hwaddr addr
, uint32_t val
)
111 uint32_t romask
= ldl_le_p(&s
->romask
[addr
]);
112 uint32_t w1cmask
= ldl_le_p(&s
->w1cmask
[addr
]);
113 uint32_t oldval
= ldl_le_p(&s
->mmior
[addr
]);
114 stl_le_p(&s
->mmior
[addr
],
115 ((oldval
& romask
) | (val
& ~romask
)) & ~(val
& w1cmask
));
118 static void amdvi_writeq(AMDVIState
*s
, hwaddr addr
, uint64_t val
)
120 uint64_t romask
= ldq_le_p(&s
->romask
[addr
]);
121 uint64_t w1cmask
= ldq_le_p(&s
->w1cmask
[addr
]);
122 uint32_t oldval
= ldq_le_p(&s
->mmior
[addr
]);
123 stq_le_p(&s
->mmior
[addr
],
124 ((oldval
& romask
) | (val
& ~romask
)) & ~(val
& w1cmask
));
127 /* OR a 64-bit register with a 64-bit value */
128 static bool amdvi_test_mask(AMDVIState
*s
, hwaddr addr
, uint64_t val
)
130 return amdvi_readq(s
, addr
) | val
;
133 /* OR a 64-bit register with a 64-bit value storing result in the register */
134 static void amdvi_assign_orq(AMDVIState
*s
, hwaddr addr
, uint64_t val
)
136 amdvi_writeq_raw(s
, addr
, amdvi_readq(s
, addr
) | val
);
139 /* AND a 64-bit register with a 64-bit value storing result in the register */
140 static void amdvi_assign_andq(AMDVIState
*s
, hwaddr addr
, uint64_t val
)
142 amdvi_writeq_raw(s
, addr
, amdvi_readq(s
, addr
) & val
);
145 static void amdvi_generate_msi_interrupt(AMDVIState
*s
)
149 .requester_id
= pci_requester_id(&s
->pci
.dev
)
152 if (msi_enabled(&s
->pci
.dev
)) {
153 msg
= msi_get_message(&s
->pci
.dev
, 0);
154 address_space_stl_le(&address_space_memory
, msg
.address
, msg
.data
,
159 static void amdvi_log_event(AMDVIState
*s
, uint64_t *evt
)
161 /* event logging not enabled */
162 if (!s
->evtlog_enabled
|| amdvi_test_mask(s
, AMDVI_MMIO_STATUS
,
163 AMDVI_MMIO_STATUS_EVT_OVF
)) {
167 /* event log buffer full */
168 if (s
->evtlog_tail
>= s
->evtlog_len
) {
169 amdvi_assign_orq(s
, AMDVI_MMIO_STATUS
, AMDVI_MMIO_STATUS_EVT_OVF
);
170 /* generate interrupt */
171 amdvi_generate_msi_interrupt(s
);
175 if (dma_memory_write(&address_space_memory
, s
->evtlog
+ s
->evtlog_tail
,
176 &evt
, AMDVI_EVENT_LEN
)) {
177 trace_amdvi_evntlog_fail(s
->evtlog
, s
->evtlog_tail
);
180 s
->evtlog_tail
+= AMDVI_EVENT_LEN
;
181 amdvi_assign_orq(s
, AMDVI_MMIO_STATUS
, AMDVI_MMIO_STATUS_COMP_INT
);
182 amdvi_generate_msi_interrupt(s
);
185 static void amdvi_setevent_bits(uint64_t *buffer
, uint64_t value
, int start
,
188 int index
= start
/ 64, bitpos
= start
% 64;
189 uint64_t mask
= MAKE_64BIT_MASK(start
, length
);
190 buffer
[index
] &= ~mask
;
191 buffer
[index
] |= (value
<< bitpos
) & mask
;
194 * AMDVi event structure
196 * 55:63 -> event type + miscellaneous info
197 * 63:127 -> related address
199 static void amdvi_encode_event(uint64_t *evt
, uint16_t devid
, uint64_t addr
,
202 amdvi_setevent_bits(evt
, devid
, 0, 16);
203 amdvi_setevent_bits(evt
, info
, 55, 8);
204 amdvi_setevent_bits(evt
, addr
, 63, 64);
206 /* log an error encountered during a page walk
208 * @addr: virtual address in translation request
210 static void amdvi_page_fault(AMDVIState
*s
, uint16_t devid
,
211 hwaddr addr
, uint16_t info
)
215 info
|= AMDVI_EVENT_IOPF_I
| AMDVI_EVENT_IOPF
;
216 amdvi_encode_event(evt
, devid
, addr
, info
);
217 amdvi_log_event(s
, evt
);
218 pci_word_test_and_set_mask(s
->pci
.dev
.config
+ PCI_STATUS
,
219 PCI_STATUS_SIG_TARGET_ABORT
);
222 * log a master abort accessing device table
223 * @devtab : address of device table entry
224 * @info : error flags
226 static void amdvi_log_devtab_error(AMDVIState
*s
, uint16_t devid
,
227 hwaddr devtab
, uint16_t info
)
231 info
|= AMDVI_EVENT_DEV_TAB_HW_ERROR
;
233 amdvi_encode_event(evt
, devid
, devtab
, info
);
234 amdvi_log_event(s
, evt
);
235 pci_word_test_and_set_mask(s
->pci
.dev
.config
+ PCI_STATUS
,
236 PCI_STATUS_SIG_TARGET_ABORT
);
238 /* log an event trying to access command buffer
239 * @addr : address that couldn't be accessed
241 static void amdvi_log_command_error(AMDVIState
*s
, hwaddr addr
)
243 uint64_t evt
[4], info
= AMDVI_EVENT_COMMAND_HW_ERROR
;
245 amdvi_encode_event(evt
, 0, addr
, info
);
246 amdvi_log_event(s
, evt
);
247 pci_word_test_and_set_mask(s
->pci
.dev
.config
+ PCI_STATUS
,
248 PCI_STATUS_SIG_TARGET_ABORT
);
250 /* log an illegal comand event
251 * @addr : address of illegal command
253 static void amdvi_log_illegalcom_error(AMDVIState
*s
, uint16_t info
,
258 info
|= AMDVI_EVENT_ILLEGAL_COMMAND_ERROR
;
259 amdvi_encode_event(evt
, 0, addr
, info
);
260 amdvi_log_event(s
, evt
);
262 /* log an error accessing device table
264 * @devid : device owning the table entry
265 * @devtab : address of device table entry
266 * @info : error flags
268 static void amdvi_log_illegaldevtab_error(AMDVIState
*s
, uint16_t devid
,
269 hwaddr addr
, uint16_t info
)
273 info
|= AMDVI_EVENT_ILLEGAL_DEVTAB_ENTRY
;
274 amdvi_encode_event(evt
, devid
, addr
, info
);
275 amdvi_log_event(s
, evt
);
277 /* log an error accessing a PTE entry
278 * @addr : address that couldn't be accessed
280 static void amdvi_log_pagetab_error(AMDVIState
*s
, uint16_t devid
,
281 hwaddr addr
, uint16_t info
)
285 info
|= AMDVI_EVENT_PAGE_TAB_HW_ERROR
;
286 amdvi_encode_event(evt
, devid
, addr
, info
);
287 amdvi_log_event(s
, evt
);
288 pci_word_test_and_set_mask(s
->pci
.dev
.config
+ PCI_STATUS
,
289 PCI_STATUS_SIG_TARGET_ABORT
);
292 static gboolean
amdvi_uint64_equal(gconstpointer v1
, gconstpointer v2
)
294 return *((const uint64_t *)v1
) == *((const uint64_t *)v2
);
297 static guint
amdvi_uint64_hash(gconstpointer v
)
299 return (guint
)*(const uint64_t *)v
;
302 static AMDVIIOTLBEntry
*amdvi_iotlb_lookup(AMDVIState
*s
, hwaddr addr
,
305 uint64_t key
= (addr
>> AMDVI_PAGE_SHIFT_4K
) |
306 ((uint64_t)(devid
) << AMDVI_DEVID_SHIFT
);
307 return g_hash_table_lookup(s
->iotlb
, &key
);
310 static void amdvi_iotlb_reset(AMDVIState
*s
)
313 trace_amdvi_iotlb_reset();
314 g_hash_table_remove_all(s
->iotlb
);
317 static gboolean
amdvi_iotlb_remove_by_devid(gpointer key
, gpointer value
,
320 AMDVIIOTLBEntry
*entry
= (AMDVIIOTLBEntry
*)value
;
321 uint16_t devid
= *(uint16_t *)user_data
;
322 return entry
->devid
== devid
;
325 static void amdvi_iotlb_remove_page(AMDVIState
*s
, hwaddr addr
,
328 uint64_t key
= (addr
>> AMDVI_PAGE_SHIFT_4K
) |
329 ((uint64_t)(devid
) << AMDVI_DEVID_SHIFT
);
330 g_hash_table_remove(s
->iotlb
, &key
);
333 static void amdvi_update_iotlb(AMDVIState
*s
, uint16_t devid
,
334 uint64_t gpa
, IOMMUTLBEntry to_cache
,
337 AMDVIIOTLBEntry
*entry
= g_new(AMDVIIOTLBEntry
, 1);
338 uint64_t *key
= g_new(uint64_t, 1);
339 uint64_t gfn
= gpa
>> AMDVI_PAGE_SHIFT_4K
;
341 /* don't cache erroneous translations */
342 if (to_cache
.perm
!= IOMMU_NONE
) {
343 trace_amdvi_cache_update(domid
, PCI_BUS_NUM(devid
), PCI_SLOT(devid
),
344 PCI_FUNC(devid
), gpa
, to_cache
.translated_addr
);
346 if (g_hash_table_size(s
->iotlb
) >= AMDVI_IOTLB_MAX_SIZE
) {
347 amdvi_iotlb_reset(s
);
350 entry
->domid
= domid
;
351 entry
->perms
= to_cache
.perm
;
352 entry
->translated_addr
= to_cache
.translated_addr
;
353 entry
->page_mask
= to_cache
.addr_mask
;
354 *key
= gfn
| ((uint64_t)(devid
) << AMDVI_DEVID_SHIFT
);
355 g_hash_table_replace(s
->iotlb
, key
, entry
);
359 static void amdvi_completion_wait(AMDVIState
*s
, uint64_t *cmd
)
361 /* pad the last 3 bits */
362 hwaddr addr
= cpu_to_le64(extract64(cmd
[0], 3, 49)) << 3;
363 uint64_t data
= cpu_to_le64(cmd
[1]);
365 if (extract64(cmd
[0], 51, 8)) {
366 amdvi_log_illegalcom_error(s
, extract64(cmd
[0], 60, 4),
367 s
->cmdbuf
+ s
->cmdbuf_head
);
369 if (extract64(cmd
[0], 0, 1)) {
370 if (dma_memory_write(&address_space_memory
, addr
, &data
,
371 AMDVI_COMPLETION_DATA_SIZE
)) {
372 trace_amdvi_completion_wait_fail(addr
);
375 /* set completion interrupt */
376 if (extract64(cmd
[0], 1, 1)) {
377 amdvi_test_mask(s
, AMDVI_MMIO_STATUS
, AMDVI_MMIO_STATUS_COMP_INT
);
378 /* generate interrupt */
379 amdvi_generate_msi_interrupt(s
);
381 trace_amdvi_completion_wait(addr
, data
);
384 /* log error without aborting since linux seems to be using reserved bits */
385 static void amdvi_inval_devtab_entry(AMDVIState
*s
, uint64_t *cmd
)
387 uint16_t devid
= cpu_to_le16((uint16_t)extract64(cmd
[0], 0, 16));
389 /* This command should invalidate internal caches of which there isn't */
390 if (extract64(cmd
[0], 15, 16) || cmd
[1]) {
391 amdvi_log_illegalcom_error(s
, extract64(cmd
[0], 60, 4),
392 s
->cmdbuf
+ s
->cmdbuf_head
);
394 trace_amdvi_devtab_inval(PCI_BUS_NUM(devid
), PCI_SLOT(devid
),
398 static void amdvi_complete_ppr(AMDVIState
*s
, uint64_t *cmd
)
400 if (extract64(cmd
[0], 15, 16) || extract64(cmd
[0], 19, 8) ||
401 extract64(cmd
[1], 0, 2) || extract64(cmd
[1], 3, 29)
402 || extract64(cmd
[1], 47, 16)) {
403 amdvi_log_illegalcom_error(s
, extract64(cmd
[0], 60, 4),
404 s
->cmdbuf
+ s
->cmdbuf_head
);
406 trace_amdvi_ppr_exec();
409 static void amdvi_inval_all(AMDVIState
*s
, uint64_t *cmd
)
411 if (extract64(cmd
[0], 0, 60) || cmd
[1]) {
412 amdvi_log_illegalcom_error(s
, extract64(cmd
[0], 60, 4),
413 s
->cmdbuf
+ s
->cmdbuf_head
);
416 amdvi_iotlb_reset(s
);
417 trace_amdvi_all_inval();
420 static gboolean
amdvi_iotlb_remove_by_domid(gpointer key
, gpointer value
,
423 AMDVIIOTLBEntry
*entry
= (AMDVIIOTLBEntry
*)value
;
424 uint16_t domid
= *(uint16_t *)user_data
;
425 return entry
->domid
== domid
;
428 /* we don't have devid - we can't remove pages by address */
429 static void amdvi_inval_pages(AMDVIState
*s
, uint64_t *cmd
)
431 uint16_t domid
= cpu_to_le16((uint16_t)extract64(cmd
[0], 32, 16));
433 if (extract64(cmd
[0], 20, 12) || extract64(cmd
[0], 16, 12) ||
434 extract64(cmd
[0], 3, 10)) {
435 amdvi_log_illegalcom_error(s
, extract64(cmd
[0], 60, 4),
436 s
->cmdbuf
+ s
->cmdbuf_head
);
439 g_hash_table_foreach_remove(s
->iotlb
, amdvi_iotlb_remove_by_domid
,
441 trace_amdvi_pages_inval(domid
);
444 static void amdvi_prefetch_pages(AMDVIState
*s
, uint64_t *cmd
)
446 if (extract64(cmd
[0], 16, 8) || extract64(cmd
[0], 20, 8) ||
447 extract64(cmd
[1], 1, 1) || extract64(cmd
[1], 3, 1) ||
448 extract64(cmd
[1], 5, 7)) {
449 amdvi_log_illegalcom_error(s
, extract64(cmd
[0], 60, 4),
450 s
->cmdbuf
+ s
->cmdbuf_head
);
453 trace_amdvi_prefetch_pages();
456 static void amdvi_inval_inttable(AMDVIState
*s
, uint64_t *cmd
)
458 if (extract64(cmd
[0], 16, 16) || cmd
[1]) {
459 amdvi_log_illegalcom_error(s
, extract64(cmd
[0], 60, 4),
460 s
->cmdbuf
+ s
->cmdbuf_head
);
464 trace_amdvi_intr_inval();
467 /* FIXME: Try to work with the specified size instead of all the pages
468 * when the S bit is on
470 static void iommu_inval_iotlb(AMDVIState
*s
, uint64_t *cmd
)
473 uint16_t devid
= extract64(cmd
[0], 0, 16);
474 if (extract64(cmd
[1], 1, 1) || extract64(cmd
[1], 3, 9)) {
475 amdvi_log_illegalcom_error(s
, extract64(cmd
[0], 60, 4),
476 s
->cmdbuf
+ s
->cmdbuf_head
);
480 if (extract64(cmd
[1], 0, 1)) {
481 g_hash_table_foreach_remove(s
->iotlb
, amdvi_iotlb_remove_by_devid
,
484 amdvi_iotlb_remove_page(s
, cpu_to_le64(extract64(cmd
[1], 12, 52)) << 12,
485 cpu_to_le16(extract64(cmd
[1], 0, 16)));
487 trace_amdvi_iotlb_inval();
490 /* not honouring reserved bits is regarded as an illegal command */
491 static void amdvi_cmdbuf_exec(AMDVIState
*s
)
495 if (dma_memory_read(&address_space_memory
, s
->cmdbuf
+ s
->cmdbuf_head
,
496 cmd
, AMDVI_COMMAND_SIZE
)) {
497 trace_amdvi_command_read_fail(s
->cmdbuf
, s
->cmdbuf_head
);
498 amdvi_log_command_error(s
, s
->cmdbuf
+ s
->cmdbuf_head
);
502 switch (extract64(cmd
[0], 60, 4)) {
503 case AMDVI_CMD_COMPLETION_WAIT
:
504 amdvi_completion_wait(s
, cmd
);
506 case AMDVI_CMD_INVAL_DEVTAB_ENTRY
:
507 amdvi_inval_devtab_entry(s
, cmd
);
509 case AMDVI_CMD_INVAL_AMDVI_PAGES
:
510 amdvi_inval_pages(s
, cmd
);
512 case AMDVI_CMD_INVAL_IOTLB_PAGES
:
513 iommu_inval_iotlb(s
, cmd
);
515 case AMDVI_CMD_INVAL_INTR_TABLE
:
516 amdvi_inval_inttable(s
, cmd
);
518 case AMDVI_CMD_PREFETCH_AMDVI_PAGES
:
519 amdvi_prefetch_pages(s
, cmd
);
521 case AMDVI_CMD_COMPLETE_PPR_REQUEST
:
522 amdvi_complete_ppr(s
, cmd
);
524 case AMDVI_CMD_INVAL_AMDVI_ALL
:
525 amdvi_inval_all(s
, cmd
);
528 trace_amdvi_unhandled_command(extract64(cmd
[1], 60, 4));
529 /* log illegal command */
530 amdvi_log_illegalcom_error(s
, extract64(cmd
[1], 60, 4),
531 s
->cmdbuf
+ s
->cmdbuf_head
);
535 static void amdvi_cmdbuf_run(AMDVIState
*s
)
537 if (!s
->cmdbuf_enabled
) {
538 trace_amdvi_command_error(amdvi_readq(s
, AMDVI_MMIO_CONTROL
));
542 /* check if there is work to do. */
543 while (s
->cmdbuf_head
!= s
->cmdbuf_tail
) {
544 trace_amdvi_command_exec(s
->cmdbuf_head
, s
->cmdbuf_tail
, s
->cmdbuf
);
545 amdvi_cmdbuf_exec(s
);
546 s
->cmdbuf_head
+= AMDVI_COMMAND_SIZE
;
547 amdvi_writeq_raw(s
, s
->cmdbuf_head
, AMDVI_MMIO_COMMAND_HEAD
);
549 /* wrap head pointer */
550 if (s
->cmdbuf_head
>= s
->cmdbuf_len
* AMDVI_COMMAND_SIZE
) {
556 static void amdvi_mmio_trace(hwaddr addr
, unsigned size
)
558 uint8_t index
= (addr
& ~0x2000) / 8;
560 if ((addr
& 0x2000)) {
562 index
= index
>= AMDVI_MMIO_REGS_HIGH
? AMDVI_MMIO_REGS_HIGH
: index
;
563 trace_amdvi_mmio_read(amdvi_mmio_high
[index
], addr
, size
, addr
& ~0x07);
565 index
= index
>= AMDVI_MMIO_REGS_LOW
? AMDVI_MMIO_REGS_LOW
: index
;
566 trace_amdvi_mmio_read(amdvi_mmio_low
[index
], addr
, size
, addr
& ~0x07);
570 static uint64_t amdvi_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
572 AMDVIState
*s
= opaque
;
575 if (addr
+ size
> AMDVI_MMIO_SIZE
) {
576 trace_amdvi_mmio_read_invalid(AMDVI_MMIO_SIZE
, addr
, size
);
581 val
= amdvi_readw(s
, addr
);
582 } else if (size
== 4) {
583 val
= amdvi_readl(s
, addr
);
584 } else if (size
== 8) {
585 val
= amdvi_readq(s
, addr
);
587 amdvi_mmio_trace(addr
, size
);
592 static void amdvi_handle_control_write(AMDVIState
*s
)
594 unsigned long control
= amdvi_readq(s
, AMDVI_MMIO_CONTROL
);
595 s
->enabled
= !!(control
& AMDVI_MMIO_CONTROL_AMDVIEN
);
597 s
->ats_enabled
= !!(control
& AMDVI_MMIO_CONTROL_HTTUNEN
);
598 s
->evtlog_enabled
= s
->enabled
&& !!(control
&
599 AMDVI_MMIO_CONTROL_EVENTLOGEN
);
601 s
->evtlog_intr
= !!(control
& AMDVI_MMIO_CONTROL_EVENTINTEN
);
602 s
->completion_wait_intr
= !!(control
& AMDVI_MMIO_CONTROL_COMWAITINTEN
);
603 s
->cmdbuf_enabled
= s
->enabled
&& !!(control
&
604 AMDVI_MMIO_CONTROL_CMDBUFLEN
);
606 /* update the flags depending on the control register */
607 if (s
->cmdbuf_enabled
) {
608 amdvi_assign_orq(s
, AMDVI_MMIO_STATUS
, AMDVI_MMIO_STATUS_CMDBUF_RUN
);
610 amdvi_assign_andq(s
, AMDVI_MMIO_STATUS
, ~AMDVI_MMIO_STATUS_CMDBUF_RUN
);
612 if (s
->evtlog_enabled
) {
613 amdvi_assign_orq(s
, AMDVI_MMIO_STATUS
, AMDVI_MMIO_STATUS_EVT_RUN
);
615 amdvi_assign_andq(s
, AMDVI_MMIO_STATUS
, ~AMDVI_MMIO_STATUS_EVT_RUN
);
618 trace_amdvi_control_status(control
);
622 static inline void amdvi_handle_devtab_write(AMDVIState
*s
)
625 uint64_t val
= amdvi_readq(s
, AMDVI_MMIO_DEVICE_TABLE
);
626 s
->devtab
= (val
& AMDVI_MMIO_DEVTAB_BASE_MASK
);
628 /* set device table length */
629 s
->devtab_len
= ((val
& AMDVI_MMIO_DEVTAB_SIZE_MASK
) + 1 *
630 (AMDVI_MMIO_DEVTAB_SIZE_UNIT
/
631 AMDVI_MMIO_DEVTAB_ENTRY_SIZE
));
634 static inline void amdvi_handle_cmdhead_write(AMDVIState
*s
)
636 s
->cmdbuf_head
= amdvi_readq(s
, AMDVI_MMIO_COMMAND_HEAD
)
637 & AMDVI_MMIO_CMDBUF_HEAD_MASK
;
641 static inline void amdvi_handle_cmdbase_write(AMDVIState
*s
)
643 s
->cmdbuf
= amdvi_readq(s
, AMDVI_MMIO_COMMAND_BASE
)
644 & AMDVI_MMIO_CMDBUF_BASE_MASK
;
645 s
->cmdbuf_len
= 1UL << (amdvi_readq(s
, AMDVI_MMIO_CMDBUF_SIZE_BYTE
)
646 & AMDVI_MMIO_CMDBUF_SIZE_MASK
);
647 s
->cmdbuf_head
= s
->cmdbuf_tail
= 0;
650 static inline void amdvi_handle_cmdtail_write(AMDVIState
*s
)
652 s
->cmdbuf_tail
= amdvi_readq(s
, AMDVI_MMIO_COMMAND_TAIL
)
653 & AMDVI_MMIO_CMDBUF_TAIL_MASK
;
657 static inline void amdvi_handle_excllim_write(AMDVIState
*s
)
659 uint64_t val
= amdvi_readq(s
, AMDVI_MMIO_EXCL_LIMIT
);
660 s
->excl_limit
= (val
& AMDVI_MMIO_EXCL_LIMIT_MASK
) |
661 AMDVI_MMIO_EXCL_LIMIT_LOW
;
664 static inline void amdvi_handle_evtbase_write(AMDVIState
*s
)
666 uint64_t val
= amdvi_readq(s
, AMDVI_MMIO_EVENT_BASE
);
667 s
->evtlog
= val
& AMDVI_MMIO_EVTLOG_BASE_MASK
;
668 s
->evtlog_len
= 1UL << (amdvi_readq(s
, AMDVI_MMIO_EVTLOG_SIZE_BYTE
)
669 & AMDVI_MMIO_EVTLOG_SIZE_MASK
);
672 static inline void amdvi_handle_evttail_write(AMDVIState
*s
)
674 uint64_t val
= amdvi_readq(s
, AMDVI_MMIO_EVENT_TAIL
);
675 s
->evtlog_tail
= val
& AMDVI_MMIO_EVTLOG_TAIL_MASK
;
678 static inline void amdvi_handle_evthead_write(AMDVIState
*s
)
680 uint64_t val
= amdvi_readq(s
, AMDVI_MMIO_EVENT_HEAD
);
681 s
->evtlog_head
= val
& AMDVI_MMIO_EVTLOG_HEAD_MASK
;
684 static inline void amdvi_handle_pprbase_write(AMDVIState
*s
)
686 uint64_t val
= amdvi_readq(s
, AMDVI_MMIO_PPR_BASE
);
687 s
->ppr_log
= val
& AMDVI_MMIO_PPRLOG_BASE_MASK
;
688 s
->pprlog_len
= 1UL << (amdvi_readq(s
, AMDVI_MMIO_PPRLOG_SIZE_BYTE
)
689 & AMDVI_MMIO_PPRLOG_SIZE_MASK
);
692 static inline void amdvi_handle_pprhead_write(AMDVIState
*s
)
694 uint64_t val
= amdvi_readq(s
, AMDVI_MMIO_PPR_HEAD
);
695 s
->pprlog_head
= val
& AMDVI_MMIO_PPRLOG_HEAD_MASK
;
698 static inline void amdvi_handle_pprtail_write(AMDVIState
*s
)
700 uint64_t val
= amdvi_readq(s
, AMDVI_MMIO_PPR_TAIL
);
701 s
->pprlog_tail
= val
& AMDVI_MMIO_PPRLOG_TAIL_MASK
;
704 /* FIXME: something might go wrong if System Software writes in chunks
705 * of one byte but linux writes in chunks of 4 bytes so currently it
706 * works correctly with linux but will definitely be busted if software
707 * reads/writes 8 bytes
709 static void amdvi_mmio_reg_write(AMDVIState
*s
, unsigned size
, uint64_t val
,
713 amdvi_writew(s
, addr
, val
);
714 } else if (size
== 4) {
715 amdvi_writel(s
, addr
, val
);
716 } else if (size
== 8) {
717 amdvi_writeq(s
, addr
, val
);
721 static void amdvi_mmio_write(void *opaque
, hwaddr addr
, uint64_t val
,
724 AMDVIState
*s
= opaque
;
725 unsigned long offset
= addr
& 0x07;
727 if (addr
+ size
> AMDVI_MMIO_SIZE
) {
728 trace_amdvi_mmio_write("error: addr outside region: max ",
729 (uint64_t)AMDVI_MMIO_SIZE
, size
, val
, offset
);
733 amdvi_mmio_trace(addr
, size
);
734 switch (addr
& ~0x07) {
735 case AMDVI_MMIO_CONTROL
:
736 amdvi_mmio_reg_write(s
, size
, val
, addr
);
737 amdvi_handle_control_write(s
);
739 case AMDVI_MMIO_DEVICE_TABLE
:
740 amdvi_mmio_reg_write(s
, size
, val
, addr
);
741 /* set device table address
742 * This also suffers from inability to tell whether software
745 if (offset
|| (size
== 8)) {
746 amdvi_handle_devtab_write(s
);
749 case AMDVI_MMIO_COMMAND_HEAD
:
750 amdvi_mmio_reg_write(s
, size
, val
, addr
);
751 amdvi_handle_cmdhead_write(s
);
753 case AMDVI_MMIO_COMMAND_BASE
:
754 amdvi_mmio_reg_write(s
, size
, val
, addr
);
755 /* FIXME - make sure System Software has finished writing incase
756 * it writes in chucks less than 8 bytes in a robust way.As for
757 * now, this hacks works for the linux driver
759 if (offset
|| (size
== 8)) {
760 amdvi_handle_cmdbase_write(s
);
763 case AMDVI_MMIO_COMMAND_TAIL
:
764 amdvi_mmio_reg_write(s
, size
, val
, addr
);
765 amdvi_handle_cmdtail_write(s
);
767 case AMDVI_MMIO_EVENT_BASE
:
768 amdvi_mmio_reg_write(s
, size
, val
, addr
);
769 amdvi_handle_evtbase_write(s
);
771 case AMDVI_MMIO_EVENT_HEAD
:
772 amdvi_mmio_reg_write(s
, size
, val
, addr
);
773 amdvi_handle_evthead_write(s
);
775 case AMDVI_MMIO_EVENT_TAIL
:
776 amdvi_mmio_reg_write(s
, size
, val
, addr
);
777 amdvi_handle_evttail_write(s
);
779 case AMDVI_MMIO_EXCL_LIMIT
:
780 amdvi_mmio_reg_write(s
, size
, val
, addr
);
781 amdvi_handle_excllim_write(s
);
783 /* PPR log base - unused for now */
784 case AMDVI_MMIO_PPR_BASE
:
785 amdvi_mmio_reg_write(s
, size
, val
, addr
);
786 amdvi_handle_pprbase_write(s
);
788 /* PPR log head - also unused for now */
789 case AMDVI_MMIO_PPR_HEAD
:
790 amdvi_mmio_reg_write(s
, size
, val
, addr
);
791 amdvi_handle_pprhead_write(s
);
793 /* PPR log tail - unused for now */
794 case AMDVI_MMIO_PPR_TAIL
:
795 amdvi_mmio_reg_write(s
, size
, val
, addr
);
796 amdvi_handle_pprtail_write(s
);
801 static inline uint64_t amdvi_get_perms(uint64_t entry
)
803 return (entry
& (AMDVI_DEV_PERM_READ
| AMDVI_DEV_PERM_WRITE
)) >>
804 AMDVI_DEV_PERM_SHIFT
;
807 /* a valid entry should have V = 1 and reserved bits honoured */
808 static bool amdvi_validate_dte(AMDVIState
*s
, uint16_t devid
,
811 if ((dte
[0] & AMDVI_DTE_LOWER_QUAD_RESERVED
)
812 || (dte
[1] & AMDVI_DTE_MIDDLE_QUAD_RESERVED
)
813 || (dte
[2] & AMDVI_DTE_UPPER_QUAD_RESERVED
) || dte
[3]) {
814 amdvi_log_illegaldevtab_error(s
, devid
,
816 devid
* AMDVI_DEVTAB_ENTRY_SIZE
, 0);
820 return dte
[0] & AMDVI_DEV_VALID
;
823 /* get a device table entry given the devid */
824 static bool amdvi_get_dte(AMDVIState
*s
, int devid
, uint64_t *entry
)
826 uint32_t offset
= devid
* AMDVI_DEVTAB_ENTRY_SIZE
;
828 if (dma_memory_read(&address_space_memory
, s
->devtab
+ offset
, entry
,
829 AMDVI_DEVTAB_ENTRY_SIZE
)) {
830 trace_amdvi_dte_get_fail(s
->devtab
, offset
);
831 /* log error accessing dte */
832 amdvi_log_devtab_error(s
, devid
, s
->devtab
+ offset
, 0);
836 *entry
= le64_to_cpu(*entry
);
837 if (!amdvi_validate_dte(s
, devid
, entry
)) {
838 trace_amdvi_invalid_dte(entry
[0]);
845 /* get pte translation mode */
846 static inline uint8_t get_pte_translation_mode(uint64_t pte
)
848 return (pte
>> AMDVI_DEV_MODE_RSHIFT
) & AMDVI_DEV_MODE_MASK
;
851 static inline uint64_t pte_override_page_mask(uint64_t pte
)
853 uint8_t page_mask
= 12;
854 uint64_t addr
= (pte
& AMDVI_DEV_PT_ROOT_MASK
) ^ AMDVI_DEV_PT_ROOT_MASK
;
855 /* find the first zero bit */
861 return ~((1ULL << page_mask
) - 1);
864 static inline uint64_t pte_get_page_mask(uint64_t oldlevel
)
866 return ~((1UL << ((oldlevel
* 9) + 3)) - 1);
869 static inline uint64_t amdvi_get_pte_entry(AMDVIState
*s
, uint64_t pte_addr
,
874 if (dma_memory_read(&address_space_memory
, pte_addr
, &pte
, sizeof(pte
))) {
875 trace_amdvi_get_pte_hwerror(pte_addr
);
876 amdvi_log_pagetab_error(s
, devid
, pte_addr
, 0);
881 pte
= le64_to_cpu(pte
);
885 static void amdvi_page_walk(AMDVIAddressSpace
*as
, uint64_t *dte
,
886 IOMMUTLBEntry
*ret
, unsigned perms
,
889 unsigned level
, present
, pte_perms
, oldlevel
;
890 uint64_t pte
= dte
[0], pte_addr
, page_mask
;
892 /* make sure the DTE has TV = 1 */
893 if (pte
& AMDVI_DEV_TRANSLATION_VALID
) {
894 level
= get_pte_translation_mode(pte
);
896 trace_amdvi_mode_invalid(level
, addr
);
903 /* we are at the leaf page table or page table encodes a huge page */
905 pte_perms
= amdvi_get_perms(pte
);
907 if (!present
|| perms
!= (perms
& pte_perms
)) {
908 amdvi_page_fault(as
->iommu_state
, as
->devfn
, addr
, perms
);
909 trace_amdvi_page_fault(addr
);
913 /* go to the next lower level */
914 pte_addr
= pte
& AMDVI_DEV_PT_ROOT_MASK
;
915 /* add offset and load pte */
916 pte_addr
+= ((addr
>> (3 + 9 * level
)) & 0x1FF) << 3;
917 pte
= amdvi_get_pte_entry(as
->iommu_state
, pte_addr
, as
->devfn
);
922 level
= get_pte_translation_mode(pte
);
929 page_mask
= pte_override_page_mask(pte
);
931 page_mask
= pte_get_page_mask(oldlevel
);
934 /* get access permissions from pte */
935 ret
->iova
= addr
& page_mask
;
936 ret
->translated_addr
= (pte
& AMDVI_DEV_PT_ROOT_MASK
) & page_mask
;
937 ret
->addr_mask
= ~page_mask
;
938 ret
->perm
= amdvi_get_perms(pte
);
942 ret
->iova
= addr
& AMDVI_PAGE_MASK_4K
;
943 ret
->translated_addr
= addr
& AMDVI_PAGE_MASK_4K
;
944 ret
->addr_mask
= ~AMDVI_PAGE_MASK_4K
;
945 ret
->perm
= amdvi_get_perms(pte
);
948 static void amdvi_do_translate(AMDVIAddressSpace
*as
, hwaddr addr
,
949 bool is_write
, IOMMUTLBEntry
*ret
)
951 AMDVIState
*s
= as
->iommu_state
;
952 uint16_t devid
= PCI_BUILD_BDF(as
->bus_num
, as
->devfn
);
953 AMDVIIOTLBEntry
*iotlb_entry
= amdvi_iotlb_lookup(s
, addr
, devid
);
957 trace_amdvi_iotlb_hit(PCI_BUS_NUM(devid
), PCI_SLOT(devid
),
958 PCI_FUNC(devid
), addr
, iotlb_entry
->translated_addr
);
959 ret
->iova
= addr
& ~iotlb_entry
->page_mask
;
960 ret
->translated_addr
= iotlb_entry
->translated_addr
;
961 ret
->addr_mask
= iotlb_entry
->page_mask
;
962 ret
->perm
= iotlb_entry
->perms
;
966 /* devices with V = 0 are not translated */
967 if (!amdvi_get_dte(s
, devid
, entry
)) {
971 amdvi_page_walk(as
, entry
, ret
,
972 is_write
? AMDVI_PERM_WRITE
: AMDVI_PERM_READ
, addr
);
974 amdvi_update_iotlb(s
, devid
, addr
, *ret
,
975 entry
[1] & AMDVI_DEV_DOMID_ID_MASK
);
979 ret
->iova
= addr
& AMDVI_PAGE_MASK_4K
;
980 ret
->translated_addr
= addr
& AMDVI_PAGE_MASK_4K
;
981 ret
->addr_mask
= ~AMDVI_PAGE_MASK_4K
;
982 ret
->perm
= IOMMU_RW
;
985 static inline bool amdvi_is_interrupt_addr(hwaddr addr
)
987 return addr
>= AMDVI_INT_ADDR_FIRST
&& addr
<= AMDVI_INT_ADDR_LAST
;
990 static IOMMUTLBEntry
amdvi_translate(IOMMUMemoryRegion
*iommu
, hwaddr addr
,
991 IOMMUAccessFlags flag
)
993 AMDVIAddressSpace
*as
= container_of(iommu
, AMDVIAddressSpace
, iommu
);
994 AMDVIState
*s
= as
->iommu_state
;
995 IOMMUTLBEntry ret
= {
996 .target_as
= &address_space_memory
,
998 .translated_addr
= 0,
999 .addr_mask
= ~(hwaddr
)0,
1004 /* AMDVI disabled - corresponds to iommu=off not
1005 * failure to provide any parameter
1007 ret
.iova
= addr
& AMDVI_PAGE_MASK_4K
;
1008 ret
.translated_addr
= addr
& AMDVI_PAGE_MASK_4K
;
1009 ret
.addr_mask
= ~AMDVI_PAGE_MASK_4K
;
1010 ret
.perm
= IOMMU_RW
;
1012 } else if (amdvi_is_interrupt_addr(addr
)) {
1013 ret
.iova
= addr
& AMDVI_PAGE_MASK_4K
;
1014 ret
.translated_addr
= addr
& AMDVI_PAGE_MASK_4K
;
1015 ret
.addr_mask
= ~AMDVI_PAGE_MASK_4K
;
1016 ret
.perm
= IOMMU_WO
;
1020 amdvi_do_translate(as
, addr
, flag
& IOMMU_WO
, &ret
);
1021 trace_amdvi_translation_result(as
->bus_num
, PCI_SLOT(as
->devfn
),
1022 PCI_FUNC(as
->devfn
), addr
, ret
.translated_addr
);
1026 static AddressSpace
*amdvi_host_dma_iommu(PCIBus
*bus
, void *opaque
, int devfn
)
1028 AMDVIState
*s
= opaque
;
1029 AMDVIAddressSpace
**iommu_as
;
1030 int bus_num
= pci_bus_num(bus
);
1032 iommu_as
= s
->address_spaces
[bus_num
];
1034 /* allocate memory during the first run */
1036 iommu_as
= g_malloc0(sizeof(AMDVIAddressSpace
*) * PCI_DEVFN_MAX
);
1037 s
->address_spaces
[bus_num
] = iommu_as
;
1040 /* set up AMD-Vi region */
1041 if (!iommu_as
[devfn
]) {
1042 iommu_as
[devfn
] = g_malloc0(sizeof(AMDVIAddressSpace
));
1043 iommu_as
[devfn
]->bus_num
= (uint8_t)bus_num
;
1044 iommu_as
[devfn
]->devfn
= (uint8_t)devfn
;
1045 iommu_as
[devfn
]->iommu_state
= s
;
1047 memory_region_init_iommu(&iommu_as
[devfn
]->iommu
,
1048 sizeof(iommu_as
[devfn
]->iommu
),
1049 TYPE_AMD_IOMMU_MEMORY_REGION
,
1051 "amd-iommu", UINT64_MAX
);
1052 address_space_init(&iommu_as
[devfn
]->as
,
1053 MEMORY_REGION(&iommu_as
[devfn
]->iommu
),
1056 return &iommu_as
[devfn
]->as
;
1059 static const MemoryRegionOps mmio_mem_ops
= {
1060 .read
= amdvi_mmio_read
,
1061 .write
= amdvi_mmio_write
,
1062 .endianness
= DEVICE_LITTLE_ENDIAN
,
1064 .min_access_size
= 1,
1065 .max_access_size
= 8,
1069 .min_access_size
= 1,
1070 .max_access_size
= 8,
1074 static void amdvi_iommu_notify_flag_changed(IOMMUMemoryRegion
*iommu
,
1075 IOMMUNotifierFlag old
,
1076 IOMMUNotifierFlag
new)
1078 AMDVIAddressSpace
*as
= container_of(iommu
, AMDVIAddressSpace
, iommu
);
1080 if (new & IOMMU_NOTIFIER_MAP
) {
1081 error_report("device %02x.%02x.%x requires iommu notifier which is not "
1082 "currently supported", as
->bus_num
, PCI_SLOT(as
->devfn
),
1083 PCI_FUNC(as
->devfn
));
1088 static void amdvi_init(AMDVIState
*s
)
1090 amdvi_iotlb_reset(s
);
1098 s
->excl_enabled
= false;
1099 s
->excl_allow
= false;
1100 s
->mmio_enabled
= false;
1102 s
->ats_enabled
= false;
1103 s
->cmdbuf_enabled
= false;
1106 memset(s
->mmior
, 0, AMDVI_MMIO_SIZE
);
1107 amdvi_set_quad(s
, AMDVI_MMIO_EXT_FEATURES
, AMDVI_EXT_FEATURES
,
1108 0xffffffffffffffef, 0);
1109 amdvi_set_quad(s
, AMDVI_MMIO_STATUS
, 0, 0x98, 0x67);
1111 /* reset device ident */
1112 pci_config_set_vendor_id(s
->pci
.dev
.config
, PCI_VENDOR_ID_AMD
);
1113 pci_config_set_prog_interface(s
->pci
.dev
.config
, 00);
1114 pci_config_set_device_id(s
->pci
.dev
.config
, s
->devid
);
1115 pci_config_set_class(s
->pci
.dev
.config
, 0x0806);
1117 /* reset AMDVI specific capabilities, all r/o */
1118 pci_set_long(s
->pci
.dev
.config
+ s
->capab_offset
, AMDVI_CAPAB_FEATURES
);
1119 pci_set_long(s
->pci
.dev
.config
+ s
->capab_offset
+ AMDVI_CAPAB_BAR_LOW
,
1120 s
->mmio
.addr
& ~(0xffff0000));
1121 pci_set_long(s
->pci
.dev
.config
+ s
->capab_offset
+ AMDVI_CAPAB_BAR_HIGH
,
1122 (s
->mmio
.addr
& ~(0xffff)) >> 16);
1123 pci_set_long(s
->pci
.dev
.config
+ s
->capab_offset
+ AMDVI_CAPAB_RANGE
,
1125 pci_set_long(s
->pci
.dev
.config
+ s
->capab_offset
+ AMDVI_CAPAB_MISC
, 0);
1126 pci_set_long(s
->pci
.dev
.config
+ s
->capab_offset
+ AMDVI_CAPAB_MISC
,
1127 AMDVI_MAX_PH_ADDR
| AMDVI_MAX_GVA_ADDR
| AMDVI_MAX_VA_ADDR
);
1130 static void amdvi_reset(DeviceState
*dev
)
1132 AMDVIState
*s
= AMD_IOMMU_DEVICE(dev
);
1134 msi_reset(&s
->pci
.dev
);
1138 static void amdvi_realize(DeviceState
*dev
, Error
**err
)
1141 AMDVIState
*s
= AMD_IOMMU_DEVICE(dev
);
1142 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(dev
);
1143 MachineState
*ms
= MACHINE(qdev_get_machine());
1144 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
1145 PCMachineState
*pcms
=
1146 PC_MACHINE(object_dynamic_cast(OBJECT(ms
), TYPE_PC_MACHINE
));
1150 error_setg(err
, "Machine-type '%s' not supported by amd-iommu",
1156 s
->iotlb
= g_hash_table_new_full(amdvi_uint64_hash
,
1157 amdvi_uint64_equal
, g_free
, g_free
);
1159 /* This device should take care of IOMMU PCI properties */
1160 x86_iommu
->type
= TYPE_AMD
;
1161 qdev_set_parent_bus(DEVICE(&s
->pci
), &bus
->qbus
);
1162 object_property_set_bool(OBJECT(&s
->pci
), true, "realized", err
);
1163 ret
= pci_add_capability(&s
->pci
.dev
, AMDVI_CAPAB_ID_SEC
, 0,
1164 AMDVI_CAPAB_SIZE
, err
);
1168 s
->capab_offset
= ret
;
1170 ret
= pci_add_capability(&s
->pci
.dev
, PCI_CAP_ID_MSI
, 0,
1171 AMDVI_CAPAB_REG_SIZE
, err
);
1175 ret
= pci_add_capability(&s
->pci
.dev
, PCI_CAP_ID_HT
, 0,
1176 AMDVI_CAPAB_REG_SIZE
, err
);
1182 memory_region_init_io(&s
->mmio
, OBJECT(s
), &mmio_mem_ops
, s
, "amdvi-mmio",
1185 sysbus_init_mmio(SYS_BUS_DEVICE(s
), &s
->mmio
);
1186 sysbus_mmio_map(SYS_BUS_DEVICE(s
), 0, AMDVI_BASE_ADDR
);
1187 pci_setup_iommu(bus
, amdvi_host_dma_iommu
, s
);
1188 s
->devid
= object_property_get_int(OBJECT(&s
->pci
), "addr", err
);
1189 msi_init(&s
->pci
.dev
, 0, 1, true, false, err
);
1193 static const VMStateDescription vmstate_amdvi
= {
1194 .name
= "amd-iommu",
1198 static void amdvi_instance_init(Object
*klass
)
1200 AMDVIState
*s
= AMD_IOMMU_DEVICE(klass
);
1202 object_initialize(&s
->pci
, sizeof(s
->pci
), TYPE_AMD_IOMMU_PCI
);
1205 static void amdvi_class_init(ObjectClass
*klass
, void* data
)
1207 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1208 X86IOMMUClass
*dc_class
= X86_IOMMU_CLASS(klass
);
1210 dc
->reset
= amdvi_reset
;
1211 dc
->vmsd
= &vmstate_amdvi
;
1212 dc
->hotpluggable
= false;
1213 dc_class
->realize
= amdvi_realize
;
1214 /* Supported by the pc-q35-* machine types */
1215 dc
->user_creatable
= true;
1218 static const TypeInfo amdvi
= {
1219 .name
= TYPE_AMD_IOMMU_DEVICE
,
1220 .parent
= TYPE_X86_IOMMU_DEVICE
,
1221 .instance_size
= sizeof(AMDVIState
),
1222 .instance_init
= amdvi_instance_init
,
1223 .class_init
= amdvi_class_init
1226 static const TypeInfo amdviPCI
= {
1227 .name
= "AMDVI-PCI",
1228 .parent
= TYPE_PCI_DEVICE
,
1229 .instance_size
= sizeof(AMDVIPCIState
),
1230 .interfaces
= (InterfaceInfo
[]) {
1231 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
1236 static void amdvi_iommu_memory_region_class_init(ObjectClass
*klass
, void *data
)
1238 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_CLASS(klass
);
1240 imrc
->translate
= amdvi_translate
;
1241 imrc
->notify_flag_changed
= amdvi_iommu_notify_flag_changed
;
1244 static const TypeInfo amdvi_iommu_memory_region_info
= {
1245 .parent
= TYPE_IOMMU_MEMORY_REGION
,
1246 .name
= TYPE_AMD_IOMMU_MEMORY_REGION
,
1247 .class_init
= amdvi_iommu_memory_region_class_init
,
1250 static void amdviPCI_register_types(void)
1252 type_register_static(&amdviPCI
);
1253 type_register_static(&amdvi
);
1254 type_register_static(&amdvi_iommu_memory_region_info
);
1257 type_init(amdviPCI_register_types
);