2 * QEMU emulation of AMD IOMMU (AMD-Vi)
4 * Copyright (C) 2011 Eduard - Gabriel Munteanu
5 * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, see <http://www.gnu.org/licenses/>.
20 * Cache implementation inspired by hw/i386/intel_iommu.c
22 #include "qemu/osdep.h"
23 #include "hw/i386/amd_iommu.h"
24 #include "qemu/error-report.h"
27 /* used AMD-Vi MMIO registers */
28 const char *amdvi_mmio_low
[] = {
29 "AMDVI_MMIO_DEVTAB_BASE",
30 "AMDVI_MMIO_CMDBUF_BASE",
31 "AMDVI_MMIO_EVTLOG_BASE",
33 "AMDVI_MMIO_EXCL_BASE",
34 "AMDVI_MMIO_EXCL_LIMIT",
35 "AMDVI_MMIO_EXT_FEATURES",
36 "AMDVI_MMIO_PPR_BASE",
39 const char *amdvi_mmio_high
[] = {
40 "AMDVI_MMIO_COMMAND_HEAD",
41 "AMDVI_MMIO_COMMAND_TAIL",
42 "AMDVI_MMIO_EVTLOG_HEAD",
43 "AMDVI_MMIO_EVTLOG_TAIL",
45 "AMDVI_MMIO_PPR_HEAD",
46 "AMDVI_MMIO_PPR_TAIL",
50 struct AMDVIAddressSpace
{
51 uint8_t bus_num
; /* bus number */
52 uint8_t devfn
; /* device function */
53 AMDVIState
*iommu_state
; /* AMDVI - one per machine */
54 MemoryRegion iommu
; /* Device's address translation region */
55 MemoryRegion iommu_ir
; /* Device's interrupt remapping region */
56 AddressSpace as
; /* device's corresponding address space */
59 /* AMDVI cache entry */
60 typedef struct AMDVIIOTLBEntry
{
61 uint16_t domid
; /* assigned domain id */
62 uint16_t devid
; /* device owning entry */
63 uint64_t perms
; /* access permissions */
64 uint64_t translated_addr
; /* translated address */
65 uint64_t page_mask
; /* physical page size */
68 /* configure MMIO registers at startup/reset */
69 static void amdvi_set_quad(AMDVIState
*s
, hwaddr addr
, uint64_t val
,
70 uint64_t romask
, uint64_t w1cmask
)
72 stq_le_p(&s
->mmior
[addr
], val
);
73 stq_le_p(&s
->romask
[addr
], romask
);
74 stq_le_p(&s
->w1cmask
[addr
], w1cmask
);
77 static uint16_t amdvi_readw(AMDVIState
*s
, hwaddr addr
)
79 return lduw_le_p(&s
->mmior
[addr
]);
82 static uint32_t amdvi_readl(AMDVIState
*s
, hwaddr addr
)
84 return ldl_le_p(&s
->mmior
[addr
]);
87 static uint64_t amdvi_readq(AMDVIState
*s
, hwaddr addr
)
89 return ldq_le_p(&s
->mmior
[addr
]);
93 static void amdvi_writeq_raw(AMDVIState
*s
, uint64_t val
, hwaddr addr
)
95 stq_le_p(&s
->mmior
[addr
], val
);
99 static void amdvi_writew(AMDVIState
*s
, hwaddr addr
, uint16_t val
)
101 uint16_t romask
= lduw_le_p(&s
->romask
[addr
]);
102 uint16_t w1cmask
= lduw_le_p(&s
->w1cmask
[addr
]);
103 uint16_t oldval
= lduw_le_p(&s
->mmior
[addr
]);
104 stw_le_p(&s
->mmior
[addr
],
105 ((oldval
& romask
) | (val
& ~romask
)) & ~(val
& w1cmask
));
108 static void amdvi_writel(AMDVIState
*s
, hwaddr addr
, uint32_t val
)
110 uint32_t romask
= ldl_le_p(&s
->romask
[addr
]);
111 uint32_t w1cmask
= ldl_le_p(&s
->w1cmask
[addr
]);
112 uint32_t oldval
= ldl_le_p(&s
->mmior
[addr
]);
113 stl_le_p(&s
->mmior
[addr
],
114 ((oldval
& romask
) | (val
& ~romask
)) & ~(val
& w1cmask
));
117 static void amdvi_writeq(AMDVIState
*s
, hwaddr addr
, uint64_t val
)
119 uint64_t romask
= ldq_le_p(&s
->romask
[addr
]);
120 uint64_t w1cmask
= ldq_le_p(&s
->w1cmask
[addr
]);
121 uint32_t oldval
= ldq_le_p(&s
->mmior
[addr
]);
122 stq_le_p(&s
->mmior
[addr
],
123 ((oldval
& romask
) | (val
& ~romask
)) & ~(val
& w1cmask
));
126 /* OR a 64-bit register with a 64-bit value */
127 static bool amdvi_test_mask(AMDVIState
*s
, hwaddr addr
, uint64_t val
)
129 return amdvi_readq(s
, addr
) | val
;
132 /* OR a 64-bit register with a 64-bit value storing result in the register */
133 static void amdvi_assign_orq(AMDVIState
*s
, hwaddr addr
, uint64_t val
)
135 amdvi_writeq_raw(s
, addr
, amdvi_readq(s
, addr
) | val
);
138 /* AND a 64-bit register with a 64-bit value storing result in the register */
139 static void amdvi_assign_andq(AMDVIState
*s
, hwaddr addr
, uint64_t val
)
141 amdvi_writeq_raw(s
, addr
, amdvi_readq(s
, addr
) & val
);
144 static void amdvi_generate_msi_interrupt(AMDVIState
*s
)
148 .requester_id
= pci_requester_id(&s
->pci
.dev
)
151 if (msi_enabled(&s
->pci
.dev
)) {
152 msg
= msi_get_message(&s
->pci
.dev
, 0);
153 address_space_stl_le(&address_space_memory
, msg
.address
, msg
.data
,
158 static void amdvi_log_event(AMDVIState
*s
, uint64_t *evt
)
160 /* event logging not enabled */
161 if (!s
->evtlog_enabled
|| amdvi_test_mask(s
, AMDVI_MMIO_STATUS
,
162 AMDVI_MMIO_STATUS_EVT_OVF
)) {
166 /* event log buffer full */
167 if (s
->evtlog_tail
>= s
->evtlog_len
) {
168 amdvi_assign_orq(s
, AMDVI_MMIO_STATUS
, AMDVI_MMIO_STATUS_EVT_OVF
);
169 /* generate interrupt */
170 amdvi_generate_msi_interrupt(s
);
174 if (dma_memory_write(&address_space_memory
, s
->evtlog
+ s
->evtlog_tail
,
175 &evt
, AMDVI_EVENT_LEN
)) {
176 trace_amdvi_evntlog_fail(s
->evtlog
, s
->evtlog_tail
);
179 s
->evtlog_tail
+= AMDVI_EVENT_LEN
;
180 amdvi_assign_orq(s
, AMDVI_MMIO_STATUS
, AMDVI_MMIO_STATUS_COMP_INT
);
181 amdvi_generate_msi_interrupt(s
);
184 static void amdvi_setevent_bits(uint64_t *buffer
, uint64_t value
, int start
,
187 int index
= start
/ 64, bitpos
= start
% 64;
188 uint64_t mask
= MAKE_64BIT_MASK(start
, length
);
189 buffer
[index
] &= ~mask
;
190 buffer
[index
] |= (value
<< bitpos
) & mask
;
193 * AMDVi event structure
195 * 55:63 -> event type + miscellaneous info
196 * 63:127 -> related address
198 static void amdvi_encode_event(uint64_t *evt
, uint16_t devid
, uint64_t addr
,
201 amdvi_setevent_bits(evt
, devid
, 0, 16);
202 amdvi_setevent_bits(evt
, info
, 55, 8);
203 amdvi_setevent_bits(evt
, addr
, 63, 64);
205 /* log an error encountered during a page walk
207 * @addr: virtual address in translation request
209 static void amdvi_page_fault(AMDVIState
*s
, uint16_t devid
,
210 hwaddr addr
, uint16_t info
)
214 info
|= AMDVI_EVENT_IOPF_I
| AMDVI_EVENT_IOPF
;
215 amdvi_encode_event(evt
, devid
, addr
, info
);
216 amdvi_log_event(s
, evt
);
217 pci_word_test_and_set_mask(s
->pci
.dev
.config
+ PCI_STATUS
,
218 PCI_STATUS_SIG_TARGET_ABORT
);
221 * log a master abort accessing device table
222 * @devtab : address of device table entry
223 * @info : error flags
225 static void amdvi_log_devtab_error(AMDVIState
*s
, uint16_t devid
,
226 hwaddr devtab
, uint16_t info
)
230 info
|= AMDVI_EVENT_DEV_TAB_HW_ERROR
;
232 amdvi_encode_event(evt
, devid
, devtab
, info
);
233 amdvi_log_event(s
, evt
);
234 pci_word_test_and_set_mask(s
->pci
.dev
.config
+ PCI_STATUS
,
235 PCI_STATUS_SIG_TARGET_ABORT
);
237 /* log an event trying to access command buffer
238 * @addr : address that couldn't be accessed
240 static void amdvi_log_command_error(AMDVIState
*s
, hwaddr addr
)
242 uint64_t evt
[4], info
= AMDVI_EVENT_COMMAND_HW_ERROR
;
244 amdvi_encode_event(evt
, 0, addr
, info
);
245 amdvi_log_event(s
, evt
);
246 pci_word_test_and_set_mask(s
->pci
.dev
.config
+ PCI_STATUS
,
247 PCI_STATUS_SIG_TARGET_ABORT
);
249 /* log an illegal comand event
250 * @addr : address of illegal command
252 static void amdvi_log_illegalcom_error(AMDVIState
*s
, uint16_t info
,
257 info
|= AMDVI_EVENT_ILLEGAL_COMMAND_ERROR
;
258 amdvi_encode_event(evt
, 0, addr
, info
);
259 amdvi_log_event(s
, evt
);
261 /* log an error accessing device table
263 * @devid : device owning the table entry
264 * @devtab : address of device table entry
265 * @info : error flags
267 static void amdvi_log_illegaldevtab_error(AMDVIState
*s
, uint16_t devid
,
268 hwaddr addr
, uint16_t info
)
272 info
|= AMDVI_EVENT_ILLEGAL_DEVTAB_ENTRY
;
273 amdvi_encode_event(evt
, devid
, addr
, info
);
274 amdvi_log_event(s
, evt
);
276 /* log an error accessing a PTE entry
277 * @addr : address that couldn't be accessed
279 static void amdvi_log_pagetab_error(AMDVIState
*s
, uint16_t devid
,
280 hwaddr addr
, uint16_t info
)
284 info
|= AMDVI_EVENT_PAGE_TAB_HW_ERROR
;
285 amdvi_encode_event(evt
, devid
, addr
, info
);
286 amdvi_log_event(s
, evt
);
287 pci_word_test_and_set_mask(s
->pci
.dev
.config
+ PCI_STATUS
,
288 PCI_STATUS_SIG_TARGET_ABORT
);
291 static gboolean
amdvi_uint64_equal(gconstpointer v1
, gconstpointer v2
)
293 return *((const uint64_t *)v1
) == *((const uint64_t *)v2
);
296 static guint
amdvi_uint64_hash(gconstpointer v
)
298 return (guint
)*(const uint64_t *)v
;
301 static AMDVIIOTLBEntry
*amdvi_iotlb_lookup(AMDVIState
*s
, hwaddr addr
,
304 uint64_t key
= (addr
>> AMDVI_PAGE_SHIFT_4K
) |
305 ((uint64_t)(devid
) << AMDVI_DEVID_SHIFT
);
306 return g_hash_table_lookup(s
->iotlb
, &key
);
309 static void amdvi_iotlb_reset(AMDVIState
*s
)
312 trace_amdvi_iotlb_reset();
313 g_hash_table_remove_all(s
->iotlb
);
316 static gboolean
amdvi_iotlb_remove_by_devid(gpointer key
, gpointer value
,
319 AMDVIIOTLBEntry
*entry
= (AMDVIIOTLBEntry
*)value
;
320 uint16_t devid
= *(uint16_t *)user_data
;
321 return entry
->devid
== devid
;
324 static void amdvi_iotlb_remove_page(AMDVIState
*s
, hwaddr addr
,
327 uint64_t key
= (addr
>> AMDVI_PAGE_SHIFT_4K
) |
328 ((uint64_t)(devid
) << AMDVI_DEVID_SHIFT
);
329 g_hash_table_remove(s
->iotlb
, &key
);
332 static void amdvi_update_iotlb(AMDVIState
*s
, uint16_t devid
,
333 uint64_t gpa
, IOMMUTLBEntry to_cache
,
336 AMDVIIOTLBEntry
*entry
= g_new(AMDVIIOTLBEntry
, 1);
337 uint64_t *key
= g_new(uint64_t, 1);
338 uint64_t gfn
= gpa
>> AMDVI_PAGE_SHIFT_4K
;
340 /* don't cache erroneous translations */
341 if (to_cache
.perm
!= IOMMU_NONE
) {
342 trace_amdvi_cache_update(domid
, PCI_BUS_NUM(devid
), PCI_SLOT(devid
),
343 PCI_FUNC(devid
), gpa
, to_cache
.translated_addr
);
345 if (g_hash_table_size(s
->iotlb
) >= AMDVI_IOTLB_MAX_SIZE
) {
346 amdvi_iotlb_reset(s
);
349 entry
->domid
= domid
;
350 entry
->perms
= to_cache
.perm
;
351 entry
->translated_addr
= to_cache
.translated_addr
;
352 entry
->page_mask
= to_cache
.addr_mask
;
353 *key
= gfn
| ((uint64_t)(devid
) << AMDVI_DEVID_SHIFT
);
354 g_hash_table_replace(s
->iotlb
, key
, entry
);
358 static void amdvi_completion_wait(AMDVIState
*s
, uint64_t *cmd
)
360 /* pad the last 3 bits */
361 hwaddr addr
= cpu_to_le64(extract64(cmd
[0], 3, 49)) << 3;
362 uint64_t data
= cpu_to_le64(cmd
[1]);
364 if (extract64(cmd
[0], 51, 8)) {
365 amdvi_log_illegalcom_error(s
, extract64(cmd
[0], 60, 4),
366 s
->cmdbuf
+ s
->cmdbuf_head
);
368 if (extract64(cmd
[0], 0, 1)) {
369 if (dma_memory_write(&address_space_memory
, addr
, &data
,
370 AMDVI_COMPLETION_DATA_SIZE
)) {
371 trace_amdvi_completion_wait_fail(addr
);
374 /* set completion interrupt */
375 if (extract64(cmd
[0], 1, 1)) {
376 amdvi_test_mask(s
, AMDVI_MMIO_STATUS
, AMDVI_MMIO_STATUS_COMP_INT
);
377 /* generate interrupt */
378 amdvi_generate_msi_interrupt(s
);
380 trace_amdvi_completion_wait(addr
, data
);
383 /* log error without aborting since linux seems to be using reserved bits */
384 static void amdvi_inval_devtab_entry(AMDVIState
*s
, uint64_t *cmd
)
386 uint16_t devid
= cpu_to_le16((uint16_t)extract64(cmd
[0], 0, 16));
388 /* This command should invalidate internal caches of which there isn't */
389 if (extract64(cmd
[0], 15, 16) || cmd
[1]) {
390 amdvi_log_illegalcom_error(s
, extract64(cmd
[0], 60, 4),
391 s
->cmdbuf
+ s
->cmdbuf_head
);
393 trace_amdvi_devtab_inval(PCI_BUS_NUM(devid
), PCI_SLOT(devid
),
397 static void amdvi_complete_ppr(AMDVIState
*s
, uint64_t *cmd
)
399 if (extract64(cmd
[0], 15, 16) || extract64(cmd
[0], 19, 8) ||
400 extract64(cmd
[1], 0, 2) || extract64(cmd
[1], 3, 29)
401 || extract64(cmd
[1], 47, 16)) {
402 amdvi_log_illegalcom_error(s
, extract64(cmd
[0], 60, 4),
403 s
->cmdbuf
+ s
->cmdbuf_head
);
405 trace_amdvi_ppr_exec();
408 static void amdvi_inval_all(AMDVIState
*s
, uint64_t *cmd
)
410 if (extract64(cmd
[0], 0, 60) || cmd
[1]) {
411 amdvi_log_illegalcom_error(s
, extract64(cmd
[0], 60, 4),
412 s
->cmdbuf
+ s
->cmdbuf_head
);
415 amdvi_iotlb_reset(s
);
416 trace_amdvi_all_inval();
419 static gboolean
amdvi_iotlb_remove_by_domid(gpointer key
, gpointer value
,
422 AMDVIIOTLBEntry
*entry
= (AMDVIIOTLBEntry
*)value
;
423 uint16_t domid
= *(uint16_t *)user_data
;
424 return entry
->domid
== domid
;
427 /* we don't have devid - we can't remove pages by address */
428 static void amdvi_inval_pages(AMDVIState
*s
, uint64_t *cmd
)
430 uint16_t domid
= cpu_to_le16((uint16_t)extract64(cmd
[0], 32, 16));
432 if (extract64(cmd
[0], 20, 12) || extract64(cmd
[0], 16, 12) ||
433 extract64(cmd
[0], 3, 10)) {
434 amdvi_log_illegalcom_error(s
, extract64(cmd
[0], 60, 4),
435 s
->cmdbuf
+ s
->cmdbuf_head
);
438 g_hash_table_foreach_remove(s
->iotlb
, amdvi_iotlb_remove_by_domid
,
440 trace_amdvi_pages_inval(domid
);
443 static void amdvi_prefetch_pages(AMDVIState
*s
, uint64_t *cmd
)
445 if (extract64(cmd
[0], 16, 8) || extract64(cmd
[0], 20, 8) ||
446 extract64(cmd
[1], 1, 1) || extract64(cmd
[1], 3, 1) ||
447 extract64(cmd
[1], 5, 7)) {
448 amdvi_log_illegalcom_error(s
, extract64(cmd
[0], 60, 4),
449 s
->cmdbuf
+ s
->cmdbuf_head
);
452 trace_amdvi_prefetch_pages();
455 static void amdvi_inval_inttable(AMDVIState
*s
, uint64_t *cmd
)
457 if (extract64(cmd
[0], 16, 16) || cmd
[1]) {
458 amdvi_log_illegalcom_error(s
, extract64(cmd
[0], 60, 4),
459 s
->cmdbuf
+ s
->cmdbuf_head
);
463 trace_amdvi_intr_inval();
466 /* FIXME: Try to work with the specified size instead of all the pages
467 * when the S bit is on
469 static void iommu_inval_iotlb(AMDVIState
*s
, uint64_t *cmd
)
472 uint16_t devid
= extract64(cmd
[0], 0, 16);
473 if (extract64(cmd
[1], 1, 1) || extract64(cmd
[1], 3, 9)) {
474 amdvi_log_illegalcom_error(s
, extract64(cmd
[0], 60, 4),
475 s
->cmdbuf
+ s
->cmdbuf_head
);
479 if (extract64(cmd
[1], 0, 1)) {
480 g_hash_table_foreach_remove(s
->iotlb
, amdvi_iotlb_remove_by_devid
,
483 amdvi_iotlb_remove_page(s
, cpu_to_le64(extract64(cmd
[1], 12, 52)) << 12,
484 cpu_to_le16(extract64(cmd
[1], 0, 16)));
486 trace_amdvi_iotlb_inval();
489 /* not honouring reserved bits is regarded as an illegal command */
490 static void amdvi_cmdbuf_exec(AMDVIState
*s
)
494 if (dma_memory_read(&address_space_memory
, s
->cmdbuf
+ s
->cmdbuf_head
,
495 cmd
, AMDVI_COMMAND_SIZE
)) {
496 trace_amdvi_command_read_fail(s
->cmdbuf
, s
->cmdbuf_head
);
497 amdvi_log_command_error(s
, s
->cmdbuf
+ s
->cmdbuf_head
);
501 switch (extract64(cmd
[0], 60, 4)) {
502 case AMDVI_CMD_COMPLETION_WAIT
:
503 amdvi_completion_wait(s
, cmd
);
505 case AMDVI_CMD_INVAL_DEVTAB_ENTRY
:
506 amdvi_inval_devtab_entry(s
, cmd
);
508 case AMDVI_CMD_INVAL_AMDVI_PAGES
:
509 amdvi_inval_pages(s
, cmd
);
511 case AMDVI_CMD_INVAL_IOTLB_PAGES
:
512 iommu_inval_iotlb(s
, cmd
);
514 case AMDVI_CMD_INVAL_INTR_TABLE
:
515 amdvi_inval_inttable(s
, cmd
);
517 case AMDVI_CMD_PREFETCH_AMDVI_PAGES
:
518 amdvi_prefetch_pages(s
, cmd
);
520 case AMDVI_CMD_COMPLETE_PPR_REQUEST
:
521 amdvi_complete_ppr(s
, cmd
);
523 case AMDVI_CMD_INVAL_AMDVI_ALL
:
524 amdvi_inval_all(s
, cmd
);
527 trace_amdvi_unhandled_command(extract64(cmd
[1], 60, 4));
528 /* log illegal command */
529 amdvi_log_illegalcom_error(s
, extract64(cmd
[1], 60, 4),
530 s
->cmdbuf
+ s
->cmdbuf_head
);
534 static void amdvi_cmdbuf_run(AMDVIState
*s
)
536 if (!s
->cmdbuf_enabled
) {
537 trace_amdvi_command_error(amdvi_readq(s
, AMDVI_MMIO_CONTROL
));
541 /* check if there is work to do. */
542 while (s
->cmdbuf_head
!= s
->cmdbuf_tail
) {
543 trace_amdvi_command_exec(s
->cmdbuf_head
, s
->cmdbuf_tail
, s
->cmdbuf
);
544 amdvi_cmdbuf_exec(s
);
545 s
->cmdbuf_head
+= AMDVI_COMMAND_SIZE
;
546 amdvi_writeq_raw(s
, s
->cmdbuf_head
, AMDVI_MMIO_COMMAND_HEAD
);
548 /* wrap head pointer */
549 if (s
->cmdbuf_head
>= s
->cmdbuf_len
* AMDVI_COMMAND_SIZE
) {
555 static void amdvi_mmio_trace(hwaddr addr
, unsigned size
)
557 uint8_t index
= (addr
& ~0x2000) / 8;
559 if ((addr
& 0x2000)) {
561 index
= index
>= AMDVI_MMIO_REGS_HIGH
? AMDVI_MMIO_REGS_HIGH
: index
;
562 trace_amdvi_mmio_read(amdvi_mmio_high
[index
], addr
, size
, addr
& ~0x07);
564 index
= index
>= AMDVI_MMIO_REGS_LOW
? AMDVI_MMIO_REGS_LOW
: index
;
565 trace_amdvi_mmio_read(amdvi_mmio_low
[index
], addr
, size
, addr
& ~0x07);
569 static uint64_t amdvi_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
571 AMDVIState
*s
= opaque
;
574 if (addr
+ size
> AMDVI_MMIO_SIZE
) {
575 trace_amdvi_mmio_read("error: addr outside region: max ",
576 (uint64_t)AMDVI_MMIO_SIZE
, addr
, size
);
581 val
= amdvi_readw(s
, addr
);
582 } else if (size
== 4) {
583 val
= amdvi_readl(s
, addr
);
584 } else if (size
== 8) {
585 val
= amdvi_readq(s
, addr
);
587 amdvi_mmio_trace(addr
, size
);
592 static void amdvi_handle_control_write(AMDVIState
*s
)
594 unsigned long control
= amdvi_readq(s
, AMDVI_MMIO_CONTROL
);
595 s
->enabled
= !!(control
& AMDVI_MMIO_CONTROL_AMDVIEN
);
597 s
->ats_enabled
= !!(control
& AMDVI_MMIO_CONTROL_HTTUNEN
);
598 s
->evtlog_enabled
= s
->enabled
&& !!(control
&
599 AMDVI_MMIO_CONTROL_EVENTLOGEN
);
601 s
->evtlog_intr
= !!(control
& AMDVI_MMIO_CONTROL_EVENTINTEN
);
602 s
->completion_wait_intr
= !!(control
& AMDVI_MMIO_CONTROL_COMWAITINTEN
);
603 s
->cmdbuf_enabled
= s
->enabled
&& !!(control
&
604 AMDVI_MMIO_CONTROL_CMDBUFLEN
);
606 /* update the flags depending on the control register */
607 if (s
->cmdbuf_enabled
) {
608 amdvi_assign_orq(s
, AMDVI_MMIO_STATUS
, AMDVI_MMIO_STATUS_CMDBUF_RUN
);
610 amdvi_assign_andq(s
, AMDVI_MMIO_STATUS
, ~AMDVI_MMIO_STATUS_CMDBUF_RUN
);
612 if (s
->evtlog_enabled
) {
613 amdvi_assign_orq(s
, AMDVI_MMIO_STATUS
, AMDVI_MMIO_STATUS_EVT_RUN
);
615 amdvi_assign_andq(s
, AMDVI_MMIO_STATUS
, ~AMDVI_MMIO_STATUS_EVT_RUN
);
618 trace_amdvi_control_status(control
);
622 static inline void amdvi_handle_devtab_write(AMDVIState
*s
)
625 uint64_t val
= amdvi_readq(s
, AMDVI_MMIO_DEVICE_TABLE
);
626 s
->devtab
= (val
& AMDVI_MMIO_DEVTAB_BASE_MASK
);
628 /* set device table length */
629 s
->devtab_len
= ((val
& AMDVI_MMIO_DEVTAB_SIZE_MASK
) + 1 *
630 (AMDVI_MMIO_DEVTAB_SIZE_UNIT
/
631 AMDVI_MMIO_DEVTAB_ENTRY_SIZE
));
634 static inline void amdvi_handle_cmdhead_write(AMDVIState
*s
)
636 s
->cmdbuf_head
= amdvi_readq(s
, AMDVI_MMIO_COMMAND_HEAD
)
637 & AMDVI_MMIO_CMDBUF_HEAD_MASK
;
641 static inline void amdvi_handle_cmdbase_write(AMDVIState
*s
)
643 s
->cmdbuf
= amdvi_readq(s
, AMDVI_MMIO_COMMAND_BASE
)
644 & AMDVI_MMIO_CMDBUF_BASE_MASK
;
645 s
->cmdbuf_len
= 1UL << (amdvi_readq(s
, AMDVI_MMIO_CMDBUF_SIZE_BYTE
)
646 & AMDVI_MMIO_CMDBUF_SIZE_MASK
);
647 s
->cmdbuf_head
= s
->cmdbuf_tail
= 0;
650 static inline void amdvi_handle_cmdtail_write(AMDVIState
*s
)
652 s
->cmdbuf_tail
= amdvi_readq(s
, AMDVI_MMIO_COMMAND_TAIL
)
653 & AMDVI_MMIO_CMDBUF_TAIL_MASK
;
657 static inline void amdvi_handle_excllim_write(AMDVIState
*s
)
659 uint64_t val
= amdvi_readq(s
, AMDVI_MMIO_EXCL_LIMIT
);
660 s
->excl_limit
= (val
& AMDVI_MMIO_EXCL_LIMIT_MASK
) |
661 AMDVI_MMIO_EXCL_LIMIT_LOW
;
664 static inline void amdvi_handle_evtbase_write(AMDVIState
*s
)
666 uint64_t val
= amdvi_readq(s
, AMDVI_MMIO_EVENT_BASE
);
667 s
->evtlog
= val
& AMDVI_MMIO_EVTLOG_BASE_MASK
;
668 s
->evtlog_len
= 1UL << (amdvi_readq(s
, AMDVI_MMIO_EVTLOG_SIZE_BYTE
)
669 & AMDVI_MMIO_EVTLOG_SIZE_MASK
);
672 static inline void amdvi_handle_evttail_write(AMDVIState
*s
)
674 uint64_t val
= amdvi_readq(s
, AMDVI_MMIO_EVENT_TAIL
);
675 s
->evtlog_tail
= val
& AMDVI_MMIO_EVTLOG_TAIL_MASK
;
678 static inline void amdvi_handle_evthead_write(AMDVIState
*s
)
680 uint64_t val
= amdvi_readq(s
, AMDVI_MMIO_EVENT_HEAD
);
681 s
->evtlog_head
= val
& AMDVI_MMIO_EVTLOG_HEAD_MASK
;
684 static inline void amdvi_handle_pprbase_write(AMDVIState
*s
)
686 uint64_t val
= amdvi_readq(s
, AMDVI_MMIO_PPR_BASE
);
687 s
->ppr_log
= val
& AMDVI_MMIO_PPRLOG_BASE_MASK
;
688 s
->pprlog_len
= 1UL << (amdvi_readq(s
, AMDVI_MMIO_PPRLOG_SIZE_BYTE
)
689 & AMDVI_MMIO_PPRLOG_SIZE_MASK
);
692 static inline void amdvi_handle_pprhead_write(AMDVIState
*s
)
694 uint64_t val
= amdvi_readq(s
, AMDVI_MMIO_PPR_HEAD
);
695 s
->pprlog_head
= val
& AMDVI_MMIO_PPRLOG_HEAD_MASK
;
698 static inline void amdvi_handle_pprtail_write(AMDVIState
*s
)
700 uint64_t val
= amdvi_readq(s
, AMDVI_MMIO_PPR_TAIL
);
701 s
->pprlog_tail
= val
& AMDVI_MMIO_PPRLOG_TAIL_MASK
;
704 /* FIXME: something might go wrong if System Software writes in chunks
705 * of one byte but linux writes in chunks of 4 bytes so currently it
706 * works correctly with linux but will definitely be busted if software
707 * reads/writes 8 bytes
709 static void amdvi_mmio_reg_write(AMDVIState
*s
, unsigned size
, uint64_t val
,
713 amdvi_writew(s
, addr
, val
);
714 } else if (size
== 4) {
715 amdvi_writel(s
, addr
, val
);
716 } else if (size
== 8) {
717 amdvi_writeq(s
, addr
, val
);
721 static void amdvi_mmio_write(void *opaque
, hwaddr addr
, uint64_t val
,
724 AMDVIState
*s
= opaque
;
725 unsigned long offset
= addr
& 0x07;
727 if (addr
+ size
> AMDVI_MMIO_SIZE
) {
728 trace_amdvi_mmio_write("error: addr outside region: max ",
729 (uint64_t)AMDVI_MMIO_SIZE
, size
, val
, offset
);
733 amdvi_mmio_trace(addr
, size
);
734 switch (addr
& ~0x07) {
735 case AMDVI_MMIO_CONTROL
:
736 amdvi_mmio_reg_write(s
, size
, val
, addr
);
737 amdvi_handle_control_write(s
);
739 case AMDVI_MMIO_DEVICE_TABLE
:
740 amdvi_mmio_reg_write(s
, size
, val
, addr
);
741 /* set device table address
742 * This also suffers from inability to tell whether software
745 if (offset
|| (size
== 8)) {
746 amdvi_handle_devtab_write(s
);
749 case AMDVI_MMIO_COMMAND_HEAD
:
750 amdvi_mmio_reg_write(s
, size
, val
, addr
);
751 amdvi_handle_cmdhead_write(s
);
753 case AMDVI_MMIO_COMMAND_BASE
:
754 amdvi_mmio_reg_write(s
, size
, val
, addr
);
755 /* FIXME - make sure System Software has finished writing incase
756 * it writes in chucks less than 8 bytes in a robust way.As for
757 * now, this hacks works for the linux driver
759 if (offset
|| (size
== 8)) {
760 amdvi_handle_cmdbase_write(s
);
763 case AMDVI_MMIO_COMMAND_TAIL
:
764 amdvi_mmio_reg_write(s
, size
, val
, addr
);
765 amdvi_handle_cmdtail_write(s
);
767 case AMDVI_MMIO_EVENT_BASE
:
768 amdvi_mmio_reg_write(s
, size
, val
, addr
);
769 amdvi_handle_evtbase_write(s
);
771 case AMDVI_MMIO_EVENT_HEAD
:
772 amdvi_mmio_reg_write(s
, size
, val
, addr
);
773 amdvi_handle_evthead_write(s
);
775 case AMDVI_MMIO_EVENT_TAIL
:
776 amdvi_mmio_reg_write(s
, size
, val
, addr
);
777 amdvi_handle_evttail_write(s
);
779 case AMDVI_MMIO_EXCL_LIMIT
:
780 amdvi_mmio_reg_write(s
, size
, val
, addr
);
781 amdvi_handle_excllim_write(s
);
783 /* PPR log base - unused for now */
784 case AMDVI_MMIO_PPR_BASE
:
785 amdvi_mmio_reg_write(s
, size
, val
, addr
);
786 amdvi_handle_pprbase_write(s
);
788 /* PPR log head - also unused for now */
789 case AMDVI_MMIO_PPR_HEAD
:
790 amdvi_mmio_reg_write(s
, size
, val
, addr
);
791 amdvi_handle_pprhead_write(s
);
793 /* PPR log tail - unused for now */
794 case AMDVI_MMIO_PPR_TAIL
:
795 amdvi_mmio_reg_write(s
, size
, val
, addr
);
796 amdvi_handle_pprtail_write(s
);
801 static inline uint64_t amdvi_get_perms(uint64_t entry
)
803 return (entry
& (AMDVI_DEV_PERM_READ
| AMDVI_DEV_PERM_WRITE
)) >>
804 AMDVI_DEV_PERM_SHIFT
;
807 /* a valid entry should have V = 1 and reserved bits honoured */
808 static bool amdvi_validate_dte(AMDVIState
*s
, uint16_t devid
,
811 if ((dte
[0] & AMDVI_DTE_LOWER_QUAD_RESERVED
)
812 || (dte
[1] & AMDVI_DTE_MIDDLE_QUAD_RESERVED
)
813 || (dte
[2] & AMDVI_DTE_UPPER_QUAD_RESERVED
) || dte
[3]) {
814 amdvi_log_illegaldevtab_error(s
, devid
,
816 devid
* AMDVI_DEVTAB_ENTRY_SIZE
, 0);
820 return dte
[0] & AMDVI_DEV_VALID
;
823 /* get a device table entry given the devid */
824 static bool amdvi_get_dte(AMDVIState
*s
, int devid
, uint64_t *entry
)
826 uint32_t offset
= devid
* AMDVI_DEVTAB_ENTRY_SIZE
;
828 if (dma_memory_read(&address_space_memory
, s
->devtab
+ offset
, entry
,
829 AMDVI_DEVTAB_ENTRY_SIZE
)) {
830 trace_amdvi_dte_get_fail(s
->devtab
, offset
);
831 /* log error accessing dte */
832 amdvi_log_devtab_error(s
, devid
, s
->devtab
+ offset
, 0);
836 *entry
= le64_to_cpu(*entry
);
837 if (!amdvi_validate_dte(s
, devid
, entry
)) {
838 trace_amdvi_invalid_dte(entry
[0]);
845 /* get pte translation mode */
846 static inline uint8_t get_pte_translation_mode(uint64_t pte
)
848 return (pte
>> AMDVI_DEV_MODE_RSHIFT
) & AMDVI_DEV_MODE_MASK
;
851 static inline uint64_t pte_override_page_mask(uint64_t pte
)
853 uint8_t page_mask
= 12;
854 uint64_t addr
= (pte
& AMDVI_DEV_PT_ROOT_MASK
) ^ AMDVI_DEV_PT_ROOT_MASK
;
855 /* find the first zero bit */
861 return ~((1ULL << page_mask
) - 1);
864 static inline uint64_t pte_get_page_mask(uint64_t oldlevel
)
866 return ~((1UL << ((oldlevel
* 9) + 3)) - 1);
869 static inline uint64_t amdvi_get_pte_entry(AMDVIState
*s
, uint64_t pte_addr
,
874 if (dma_memory_read(&address_space_memory
, pte_addr
, &pte
, sizeof(pte
))) {
875 trace_amdvi_get_pte_hwerror(pte_addr
);
876 amdvi_log_pagetab_error(s
, devid
, pte_addr
, 0);
881 pte
= le64_to_cpu(pte
);
885 static void amdvi_page_walk(AMDVIAddressSpace
*as
, uint64_t *dte
,
886 IOMMUTLBEntry
*ret
, unsigned perms
,
889 unsigned level
, present
, pte_perms
, oldlevel
;
890 uint64_t pte
= dte
[0], pte_addr
, page_mask
;
892 /* make sure the DTE has TV = 1 */
893 if (pte
& AMDVI_DEV_TRANSLATION_VALID
) {
894 level
= get_pte_translation_mode(pte
);
896 trace_amdvi_mode_invalid(level
, addr
);
903 /* we are at the leaf page table or page table encodes a huge page */
905 pte_perms
= amdvi_get_perms(pte
);
907 if (!present
|| perms
!= (perms
& pte_perms
)) {
908 amdvi_page_fault(as
->iommu_state
, as
->devfn
, addr
, perms
);
909 trace_amdvi_page_fault(addr
);
913 /* go to the next lower level */
914 pte_addr
= pte
& AMDVI_DEV_PT_ROOT_MASK
;
915 /* add offset and load pte */
916 pte_addr
+= ((addr
>> (3 + 9 * level
)) & 0x1FF) << 3;
917 pte
= amdvi_get_pte_entry(as
->iommu_state
, pte_addr
, as
->devfn
);
922 level
= get_pte_translation_mode(pte
);
929 page_mask
= pte_override_page_mask(pte
);
931 page_mask
= pte_get_page_mask(oldlevel
);
934 /* get access permissions from pte */
935 ret
->iova
= addr
& page_mask
;
936 ret
->translated_addr
= (pte
& AMDVI_DEV_PT_ROOT_MASK
) & page_mask
;
937 ret
->addr_mask
= ~page_mask
;
938 ret
->perm
= amdvi_get_perms(pte
);
942 ret
->iova
= addr
& AMDVI_PAGE_MASK_4K
;
943 ret
->translated_addr
= addr
& AMDVI_PAGE_MASK_4K
;
944 ret
->addr_mask
= ~AMDVI_PAGE_MASK_4K
;
945 ret
->perm
= amdvi_get_perms(pte
);
948 static void amdvi_do_translate(AMDVIAddressSpace
*as
, hwaddr addr
,
949 bool is_write
, IOMMUTLBEntry
*ret
)
951 AMDVIState
*s
= as
->iommu_state
;
952 uint16_t devid
= PCI_BUILD_BDF(as
->bus_num
, as
->devfn
);
953 AMDVIIOTLBEntry
*iotlb_entry
= amdvi_iotlb_lookup(s
, addr
, devid
);
957 trace_amdvi_iotlb_hit(PCI_BUS_NUM(devid
), PCI_SLOT(devid
),
958 PCI_FUNC(devid
), addr
, iotlb_entry
->translated_addr
);
959 ret
->iova
= addr
& ~iotlb_entry
->page_mask
;
960 ret
->translated_addr
= iotlb_entry
->translated_addr
;
961 ret
->addr_mask
= iotlb_entry
->page_mask
;
962 ret
->perm
= iotlb_entry
->perms
;
966 /* devices with V = 0 are not translated */
967 if (!amdvi_get_dte(s
, devid
, entry
)) {
971 amdvi_page_walk(as
, entry
, ret
,
972 is_write
? AMDVI_PERM_WRITE
: AMDVI_PERM_READ
, addr
);
974 amdvi_update_iotlb(s
, devid
, addr
, *ret
,
975 entry
[1] & AMDVI_DEV_DOMID_ID_MASK
);
979 ret
->iova
= addr
& AMDVI_PAGE_MASK_4K
;
980 ret
->translated_addr
= addr
& AMDVI_PAGE_MASK_4K
;
981 ret
->addr_mask
= ~AMDVI_PAGE_MASK_4K
;
982 ret
->perm
= IOMMU_RW
;
985 static inline bool amdvi_is_interrupt_addr(hwaddr addr
)
987 return addr
>= AMDVI_INT_ADDR_FIRST
&& addr
<= AMDVI_INT_ADDR_LAST
;
990 static IOMMUTLBEntry
amdvi_translate(MemoryRegion
*iommu
, hwaddr addr
,
993 AMDVIAddressSpace
*as
= container_of(iommu
, AMDVIAddressSpace
, iommu
);
994 AMDVIState
*s
= as
->iommu_state
;
995 IOMMUTLBEntry ret
= {
996 .target_as
= &address_space_memory
,
998 .translated_addr
= 0,
999 .addr_mask
= ~(hwaddr
)0,
1004 /* AMDVI disabled - corresponds to iommu=off not
1005 * failure to provide any parameter
1007 ret
.iova
= addr
& AMDVI_PAGE_MASK_4K
;
1008 ret
.translated_addr
= addr
& AMDVI_PAGE_MASK_4K
;
1009 ret
.addr_mask
= ~AMDVI_PAGE_MASK_4K
;
1010 ret
.perm
= IOMMU_RW
;
1012 } else if (amdvi_is_interrupt_addr(addr
)) {
1013 ret
.iova
= addr
& AMDVI_PAGE_MASK_4K
;
1014 ret
.translated_addr
= addr
& AMDVI_PAGE_MASK_4K
;
1015 ret
.addr_mask
= ~AMDVI_PAGE_MASK_4K
;
1016 ret
.perm
= IOMMU_WO
;
1020 amdvi_do_translate(as
, addr
, is_write
, &ret
);
1021 trace_amdvi_translation_result(as
->bus_num
, PCI_SLOT(as
->devfn
),
1022 PCI_FUNC(as
->devfn
), addr
, ret
.translated_addr
);
1026 static AddressSpace
*amdvi_host_dma_iommu(PCIBus
*bus
, void *opaque
, int devfn
)
1028 AMDVIState
*s
= opaque
;
1029 AMDVIAddressSpace
**iommu_as
;
1030 int bus_num
= pci_bus_num(bus
);
1032 iommu_as
= s
->address_spaces
[bus_num
];
1034 /* allocate memory during the first run */
1036 iommu_as
= g_malloc0(sizeof(AMDVIAddressSpace
*) * PCI_DEVFN_MAX
);
1037 s
->address_spaces
[bus_num
] = iommu_as
;
1040 /* set up AMD-Vi region */
1041 if (!iommu_as
[devfn
]) {
1042 iommu_as
[devfn
] = g_malloc0(sizeof(AMDVIAddressSpace
));
1043 iommu_as
[devfn
]->bus_num
= (uint8_t)bus_num
;
1044 iommu_as
[devfn
]->devfn
= (uint8_t)devfn
;
1045 iommu_as
[devfn
]->iommu_state
= s
;
1047 memory_region_init_iommu(&iommu_as
[devfn
]->iommu
, OBJECT(s
),
1048 &s
->iommu_ops
, "amd-iommu", UINT64_MAX
);
1049 address_space_init(&iommu_as
[devfn
]->as
, &iommu_as
[devfn
]->iommu
,
1052 return &iommu_as
[devfn
]->as
;
1055 static const MemoryRegionOps mmio_mem_ops
= {
1056 .read
= amdvi_mmio_read
,
1057 .write
= amdvi_mmio_write
,
1058 .endianness
= DEVICE_LITTLE_ENDIAN
,
1060 .min_access_size
= 1,
1061 .max_access_size
= 8,
1065 .min_access_size
= 1,
1066 .max_access_size
= 8,
1070 static void amdvi_iommu_notify_flag_changed(MemoryRegion
*iommu
,
1071 IOMMUNotifierFlag old
,
1072 IOMMUNotifierFlag
new)
1074 AMDVIAddressSpace
*as
= container_of(iommu
, AMDVIAddressSpace
, iommu
);
1076 if (new & IOMMU_NOTIFIER_MAP
) {
1077 error_report("device %02x.%02x.%x requires iommu notifier which is not "
1078 "currently supported", as
->bus_num
, PCI_SLOT(as
->devfn
),
1079 PCI_FUNC(as
->devfn
));
1084 static void amdvi_init(AMDVIState
*s
)
1086 amdvi_iotlb_reset(s
);
1088 s
->iommu_ops
.translate
= amdvi_translate
;
1089 s
->iommu_ops
.notify_flag_changed
= amdvi_iommu_notify_flag_changed
;
1096 s
->excl_enabled
= false;
1097 s
->excl_allow
= false;
1098 s
->mmio_enabled
= false;
1100 s
->ats_enabled
= false;
1101 s
->cmdbuf_enabled
= false;
1104 memset(s
->mmior
, 0, AMDVI_MMIO_SIZE
);
1105 amdvi_set_quad(s
, AMDVI_MMIO_EXT_FEATURES
, AMDVI_EXT_FEATURES
,
1106 0xffffffffffffffef, 0);
1107 amdvi_set_quad(s
, AMDVI_MMIO_STATUS
, 0, 0x98, 0x67);
1109 /* reset device ident */
1110 pci_config_set_vendor_id(s
->pci
.dev
.config
, PCI_VENDOR_ID_AMD
);
1111 pci_config_set_prog_interface(s
->pci
.dev
.config
, 00);
1112 pci_config_set_device_id(s
->pci
.dev
.config
, s
->devid
);
1113 pci_config_set_class(s
->pci
.dev
.config
, 0x0806);
1115 /* reset AMDVI specific capabilities, all r/o */
1116 pci_set_long(s
->pci
.dev
.config
+ s
->capab_offset
, AMDVI_CAPAB_FEATURES
);
1117 pci_set_long(s
->pci
.dev
.config
+ s
->capab_offset
+ AMDVI_CAPAB_BAR_LOW
,
1118 s
->mmio
.addr
& ~(0xffff0000));
1119 pci_set_long(s
->pci
.dev
.config
+ s
->capab_offset
+ AMDVI_CAPAB_BAR_HIGH
,
1120 (s
->mmio
.addr
& ~(0xffff)) >> 16);
1121 pci_set_long(s
->pci
.dev
.config
+ s
->capab_offset
+ AMDVI_CAPAB_RANGE
,
1123 pci_set_long(s
->pci
.dev
.config
+ s
->capab_offset
+ AMDVI_CAPAB_MISC
, 0);
1124 pci_set_long(s
->pci
.dev
.config
+ s
->capab_offset
+ AMDVI_CAPAB_MISC
,
1125 AMDVI_MAX_PH_ADDR
| AMDVI_MAX_GVA_ADDR
| AMDVI_MAX_VA_ADDR
);
1128 static void amdvi_reset(DeviceState
*dev
)
1130 AMDVIState
*s
= AMD_IOMMU_DEVICE(dev
);
1132 msi_reset(&s
->pci
.dev
);
1136 static void amdvi_realize(DeviceState
*dev
, Error
**err
)
1139 AMDVIState
*s
= AMD_IOMMU_DEVICE(dev
);
1140 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(dev
);
1141 PCIBus
*bus
= PC_MACHINE(qdev_get_machine())->bus
;
1142 s
->iotlb
= g_hash_table_new_full(amdvi_uint64_hash
,
1143 amdvi_uint64_equal
, g_free
, g_free
);
1145 /* This device should take care of IOMMU PCI properties */
1146 x86_iommu
->type
= TYPE_AMD
;
1147 qdev_set_parent_bus(DEVICE(&s
->pci
), &bus
->qbus
);
1148 object_property_set_bool(OBJECT(&s
->pci
), true, "realized", err
);
1149 s
->capab_offset
= pci_add_capability(&s
->pci
.dev
, AMDVI_CAPAB_ID_SEC
, 0,
1151 assert(s
->capab_offset
> 0);
1152 ret
= pci_add_capability(&s
->pci
.dev
, PCI_CAP_ID_MSI
, 0, AMDVI_CAPAB_REG_SIZE
);
1154 ret
= pci_add_capability(&s
->pci
.dev
, PCI_CAP_ID_HT
, 0, AMDVI_CAPAB_REG_SIZE
);
1158 memory_region_init_io(&s
->mmio
, OBJECT(s
), &mmio_mem_ops
, s
, "amdvi-mmio",
1161 sysbus_init_mmio(SYS_BUS_DEVICE(s
), &s
->mmio
);
1162 sysbus_mmio_map(SYS_BUS_DEVICE(s
), 0, AMDVI_BASE_ADDR
);
1163 pci_setup_iommu(bus
, amdvi_host_dma_iommu
, s
);
1164 s
->devid
= object_property_get_int(OBJECT(&s
->pci
), "addr", err
);
1165 msi_init(&s
->pci
.dev
, 0, 1, true, false, err
);
1169 static const VMStateDescription vmstate_amdvi
= {
1170 .name
= "amd-iommu",
1174 static void amdvi_instance_init(Object
*klass
)
1176 AMDVIState
*s
= AMD_IOMMU_DEVICE(klass
);
1178 object_initialize(&s
->pci
, sizeof(s
->pci
), TYPE_AMD_IOMMU_PCI
);
1181 static void amdvi_class_init(ObjectClass
*klass
, void* data
)
1183 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1184 X86IOMMUClass
*dc_class
= X86_IOMMU_CLASS(klass
);
1186 dc
->reset
= amdvi_reset
;
1187 dc
->vmsd
= &vmstate_amdvi
;
1188 dc
->hotpluggable
= false;
1189 dc_class
->realize
= amdvi_realize
;
1192 static const TypeInfo amdvi
= {
1193 .name
= TYPE_AMD_IOMMU_DEVICE
,
1194 .parent
= TYPE_X86_IOMMU_DEVICE
,
1195 .instance_size
= sizeof(AMDVIState
),
1196 .instance_init
= amdvi_instance_init
,
1197 .class_init
= amdvi_class_init
1200 static const TypeInfo amdviPCI
= {
1201 .name
= "AMDVI-PCI",
1202 .parent
= TYPE_PCI_DEVICE
,
1203 .instance_size
= sizeof(AMDVIPCIState
),
1206 static void amdviPCI_register_types(void)
1208 type_register_static(&amdviPCI
);
1209 type_register_static(&amdvi
);
1212 type_init(amdviPCI_register_types
);