pc-dimm: pass in the machine and to the MemoryHotplugState
[qemu.git] / hw / i386 / amd_iommu.c
blob63d46ff6ee2757f5d290b18a89aab99078aa52ad
1 /*
2 * QEMU emulation of AMD IOMMU (AMD-Vi)
4 * Copyright (C) 2011 Eduard - Gabriel Munteanu
5 * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, see <http://www.gnu.org/licenses/>.
20 * Cache implementation inspired by hw/i386/intel_iommu.c
22 #include "qemu/osdep.h"
23 #include "hw/i386/pc.h"
24 #include "hw/pci/msi.h"
25 #include "hw/pci/pci_bus.h"
26 #include "amd_iommu.h"
27 #include "qapi/error.h"
28 #include "qemu/error-report.h"
29 #include "trace.h"
31 /* used AMD-Vi MMIO registers */
32 const char *amdvi_mmio_low[] = {
33 "AMDVI_MMIO_DEVTAB_BASE",
34 "AMDVI_MMIO_CMDBUF_BASE",
35 "AMDVI_MMIO_EVTLOG_BASE",
36 "AMDVI_MMIO_CONTROL",
37 "AMDVI_MMIO_EXCL_BASE",
38 "AMDVI_MMIO_EXCL_LIMIT",
39 "AMDVI_MMIO_EXT_FEATURES",
40 "AMDVI_MMIO_PPR_BASE",
41 "UNHANDLED"
43 const char *amdvi_mmio_high[] = {
44 "AMDVI_MMIO_COMMAND_HEAD",
45 "AMDVI_MMIO_COMMAND_TAIL",
46 "AMDVI_MMIO_EVTLOG_HEAD",
47 "AMDVI_MMIO_EVTLOG_TAIL",
48 "AMDVI_MMIO_STATUS",
49 "AMDVI_MMIO_PPR_HEAD",
50 "AMDVI_MMIO_PPR_TAIL",
51 "UNHANDLED"
54 struct AMDVIAddressSpace {
55 uint8_t bus_num; /* bus number */
56 uint8_t devfn; /* device function */
57 AMDVIState *iommu_state; /* AMDVI - one per machine */
58 IOMMUMemoryRegion iommu; /* Device's address translation region */
59 MemoryRegion iommu_ir; /* Device's interrupt remapping region */
60 AddressSpace as; /* device's corresponding address space */
63 /* AMDVI cache entry */
64 typedef struct AMDVIIOTLBEntry {
65 uint16_t domid; /* assigned domain id */
66 uint16_t devid; /* device owning entry */
67 uint64_t perms; /* access permissions */
68 uint64_t translated_addr; /* translated address */
69 uint64_t page_mask; /* physical page size */
70 } AMDVIIOTLBEntry;
72 /* configure MMIO registers at startup/reset */
73 static void amdvi_set_quad(AMDVIState *s, hwaddr addr, uint64_t val,
74 uint64_t romask, uint64_t w1cmask)
76 stq_le_p(&s->mmior[addr], val);
77 stq_le_p(&s->romask[addr], romask);
78 stq_le_p(&s->w1cmask[addr], w1cmask);
81 static uint16_t amdvi_readw(AMDVIState *s, hwaddr addr)
83 return lduw_le_p(&s->mmior[addr]);
86 static uint32_t amdvi_readl(AMDVIState *s, hwaddr addr)
88 return ldl_le_p(&s->mmior[addr]);
91 static uint64_t amdvi_readq(AMDVIState *s, hwaddr addr)
93 return ldq_le_p(&s->mmior[addr]);
96 /* internal write */
97 static void amdvi_writeq_raw(AMDVIState *s, uint64_t val, hwaddr addr)
99 stq_le_p(&s->mmior[addr], val);
102 /* external write */
103 static void amdvi_writew(AMDVIState *s, hwaddr addr, uint16_t val)
105 uint16_t romask = lduw_le_p(&s->romask[addr]);
106 uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]);
107 uint16_t oldval = lduw_le_p(&s->mmior[addr]);
108 stw_le_p(&s->mmior[addr],
109 ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
112 static void amdvi_writel(AMDVIState *s, hwaddr addr, uint32_t val)
114 uint32_t romask = ldl_le_p(&s->romask[addr]);
115 uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
116 uint32_t oldval = ldl_le_p(&s->mmior[addr]);
117 stl_le_p(&s->mmior[addr],
118 ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
121 static void amdvi_writeq(AMDVIState *s, hwaddr addr, uint64_t val)
123 uint64_t romask = ldq_le_p(&s->romask[addr]);
124 uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
125 uint32_t oldval = ldq_le_p(&s->mmior[addr]);
126 stq_le_p(&s->mmior[addr],
127 ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
130 /* OR a 64-bit register with a 64-bit value */
131 static bool amdvi_test_mask(AMDVIState *s, hwaddr addr, uint64_t val)
133 return amdvi_readq(s, addr) | val;
136 /* OR a 64-bit register with a 64-bit value storing result in the register */
137 static void amdvi_assign_orq(AMDVIState *s, hwaddr addr, uint64_t val)
139 amdvi_writeq_raw(s, addr, amdvi_readq(s, addr) | val);
142 /* AND a 64-bit register with a 64-bit value storing result in the register */
143 static void amdvi_assign_andq(AMDVIState *s, hwaddr addr, uint64_t val)
145 amdvi_writeq_raw(s, addr, amdvi_readq(s, addr) & val);
148 static void amdvi_generate_msi_interrupt(AMDVIState *s)
150 MSIMessage msg = {};
151 MemTxAttrs attrs = {
152 .requester_id = pci_requester_id(&s->pci.dev)
155 if (msi_enabled(&s->pci.dev)) {
156 msg = msi_get_message(&s->pci.dev, 0);
157 address_space_stl_le(&address_space_memory, msg.address, msg.data,
158 attrs, NULL);
162 static void amdvi_log_event(AMDVIState *s, uint64_t *evt)
164 /* event logging not enabled */
165 if (!s->evtlog_enabled || amdvi_test_mask(s, AMDVI_MMIO_STATUS,
166 AMDVI_MMIO_STATUS_EVT_OVF)) {
167 return;
170 /* event log buffer full */
171 if (s->evtlog_tail >= s->evtlog_len) {
172 amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF);
173 /* generate interrupt */
174 amdvi_generate_msi_interrupt(s);
175 return;
178 if (dma_memory_write(&address_space_memory, s->evtlog + s->evtlog_tail,
179 &evt, AMDVI_EVENT_LEN)) {
180 trace_amdvi_evntlog_fail(s->evtlog, s->evtlog_tail);
183 s->evtlog_tail += AMDVI_EVENT_LEN;
184 amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT);
185 amdvi_generate_msi_interrupt(s);
188 static void amdvi_setevent_bits(uint64_t *buffer, uint64_t value, int start,
189 int length)
191 int index = start / 64, bitpos = start % 64;
192 uint64_t mask = MAKE_64BIT_MASK(start, length);
193 buffer[index] &= ~mask;
194 buffer[index] |= (value << bitpos) & mask;
197 * AMDVi event structure
198 * 0:15 -> DeviceID
199 * 55:63 -> event type + miscellaneous info
200 * 63:127 -> related address
202 static void amdvi_encode_event(uint64_t *evt, uint16_t devid, uint64_t addr,
203 uint16_t info)
205 amdvi_setevent_bits(evt, devid, 0, 16);
206 amdvi_setevent_bits(evt, info, 55, 8);
207 amdvi_setevent_bits(evt, addr, 63, 64);
209 /* log an error encountered during a page walk
211 * @addr: virtual address in translation request
213 static void amdvi_page_fault(AMDVIState *s, uint16_t devid,
214 hwaddr addr, uint16_t info)
216 uint64_t evt[4];
218 info |= AMDVI_EVENT_IOPF_I | AMDVI_EVENT_IOPF;
219 amdvi_encode_event(evt, devid, addr, info);
220 amdvi_log_event(s, evt);
221 pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
222 PCI_STATUS_SIG_TARGET_ABORT);
225 * log a master abort accessing device table
226 * @devtab : address of device table entry
227 * @info : error flags
229 static void amdvi_log_devtab_error(AMDVIState *s, uint16_t devid,
230 hwaddr devtab, uint16_t info)
232 uint64_t evt[4];
234 info |= AMDVI_EVENT_DEV_TAB_HW_ERROR;
236 amdvi_encode_event(evt, devid, devtab, info);
237 amdvi_log_event(s, evt);
238 pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
239 PCI_STATUS_SIG_TARGET_ABORT);
241 /* log an event trying to access command buffer
242 * @addr : address that couldn't be accessed
244 static void amdvi_log_command_error(AMDVIState *s, hwaddr addr)
246 uint64_t evt[4], info = AMDVI_EVENT_COMMAND_HW_ERROR;
248 amdvi_encode_event(evt, 0, addr, info);
249 amdvi_log_event(s, evt);
250 pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
251 PCI_STATUS_SIG_TARGET_ABORT);
253 /* log an illegal comand event
254 * @addr : address of illegal command
256 static void amdvi_log_illegalcom_error(AMDVIState *s, uint16_t info,
257 hwaddr addr)
259 uint64_t evt[4];
261 info |= AMDVI_EVENT_ILLEGAL_COMMAND_ERROR;
262 amdvi_encode_event(evt, 0, addr, info);
263 amdvi_log_event(s, evt);
265 /* log an error accessing device table
267 * @devid : device owning the table entry
268 * @devtab : address of device table entry
269 * @info : error flags
271 static void amdvi_log_illegaldevtab_error(AMDVIState *s, uint16_t devid,
272 hwaddr addr, uint16_t info)
274 uint64_t evt[4];
276 info |= AMDVI_EVENT_ILLEGAL_DEVTAB_ENTRY;
277 amdvi_encode_event(evt, devid, addr, info);
278 amdvi_log_event(s, evt);
280 /* log an error accessing a PTE entry
281 * @addr : address that couldn't be accessed
283 static void amdvi_log_pagetab_error(AMDVIState *s, uint16_t devid,
284 hwaddr addr, uint16_t info)
286 uint64_t evt[4];
288 info |= AMDVI_EVENT_PAGE_TAB_HW_ERROR;
289 amdvi_encode_event(evt, devid, addr, info);
290 amdvi_log_event(s, evt);
291 pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
292 PCI_STATUS_SIG_TARGET_ABORT);
295 static gboolean amdvi_uint64_equal(gconstpointer v1, gconstpointer v2)
297 return *((const uint64_t *)v1) == *((const uint64_t *)v2);
300 static guint amdvi_uint64_hash(gconstpointer v)
302 return (guint)*(const uint64_t *)v;
305 static AMDVIIOTLBEntry *amdvi_iotlb_lookup(AMDVIState *s, hwaddr addr,
306 uint64_t devid)
308 uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) |
309 ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
310 return g_hash_table_lookup(s->iotlb, &key);
313 static void amdvi_iotlb_reset(AMDVIState *s)
315 assert(s->iotlb);
316 trace_amdvi_iotlb_reset();
317 g_hash_table_remove_all(s->iotlb);
320 static gboolean amdvi_iotlb_remove_by_devid(gpointer key, gpointer value,
321 gpointer user_data)
323 AMDVIIOTLBEntry *entry = (AMDVIIOTLBEntry *)value;
324 uint16_t devid = *(uint16_t *)user_data;
325 return entry->devid == devid;
328 static void amdvi_iotlb_remove_page(AMDVIState *s, hwaddr addr,
329 uint64_t devid)
331 uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) |
332 ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
333 g_hash_table_remove(s->iotlb, &key);
336 static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid,
337 uint64_t gpa, IOMMUTLBEntry to_cache,
338 uint16_t domid)
340 AMDVIIOTLBEntry *entry = g_new(AMDVIIOTLBEntry, 1);
341 uint64_t *key = g_new(uint64_t, 1);
342 uint64_t gfn = gpa >> AMDVI_PAGE_SHIFT_4K;
344 /* don't cache erroneous translations */
345 if (to_cache.perm != IOMMU_NONE) {
346 trace_amdvi_cache_update(domid, PCI_BUS_NUM(devid), PCI_SLOT(devid),
347 PCI_FUNC(devid), gpa, to_cache.translated_addr);
349 if (g_hash_table_size(s->iotlb) >= AMDVI_IOTLB_MAX_SIZE) {
350 amdvi_iotlb_reset(s);
353 entry->domid = domid;
354 entry->perms = to_cache.perm;
355 entry->translated_addr = to_cache.translated_addr;
356 entry->page_mask = to_cache.addr_mask;
357 *key = gfn | ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
358 g_hash_table_replace(s->iotlb, key, entry);
362 static void amdvi_completion_wait(AMDVIState *s, uint64_t *cmd)
364 /* pad the last 3 bits */
365 hwaddr addr = cpu_to_le64(extract64(cmd[0], 3, 49)) << 3;
366 uint64_t data = cpu_to_le64(cmd[1]);
368 if (extract64(cmd[0], 51, 8)) {
369 amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
370 s->cmdbuf + s->cmdbuf_head);
372 if (extract64(cmd[0], 0, 1)) {
373 if (dma_memory_write(&address_space_memory, addr, &data,
374 AMDVI_COMPLETION_DATA_SIZE)) {
375 trace_amdvi_completion_wait_fail(addr);
378 /* set completion interrupt */
379 if (extract64(cmd[0], 1, 1)) {
380 amdvi_test_mask(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT);
381 /* generate interrupt */
382 amdvi_generate_msi_interrupt(s);
384 trace_amdvi_completion_wait(addr, data);
387 /* log error without aborting since linux seems to be using reserved bits */
388 static void amdvi_inval_devtab_entry(AMDVIState *s, uint64_t *cmd)
390 uint16_t devid = cpu_to_le16((uint16_t)extract64(cmd[0], 0, 16));
392 /* This command should invalidate internal caches of which there isn't */
393 if (extract64(cmd[0], 15, 16) || cmd[1]) {
394 amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
395 s->cmdbuf + s->cmdbuf_head);
397 trace_amdvi_devtab_inval(PCI_BUS_NUM(devid), PCI_SLOT(devid),
398 PCI_FUNC(devid));
401 static void amdvi_complete_ppr(AMDVIState *s, uint64_t *cmd)
403 if (extract64(cmd[0], 15, 16) || extract64(cmd[0], 19, 8) ||
404 extract64(cmd[1], 0, 2) || extract64(cmd[1], 3, 29)
405 || extract64(cmd[1], 47, 16)) {
406 amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
407 s->cmdbuf + s->cmdbuf_head);
409 trace_amdvi_ppr_exec();
412 static void amdvi_inval_all(AMDVIState *s, uint64_t *cmd)
414 if (extract64(cmd[0], 0, 60) || cmd[1]) {
415 amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
416 s->cmdbuf + s->cmdbuf_head);
419 amdvi_iotlb_reset(s);
420 trace_amdvi_all_inval();
423 static gboolean amdvi_iotlb_remove_by_domid(gpointer key, gpointer value,
424 gpointer user_data)
426 AMDVIIOTLBEntry *entry = (AMDVIIOTLBEntry *)value;
427 uint16_t domid = *(uint16_t *)user_data;
428 return entry->domid == domid;
431 /* we don't have devid - we can't remove pages by address */
432 static void amdvi_inval_pages(AMDVIState *s, uint64_t *cmd)
434 uint16_t domid = cpu_to_le16((uint16_t)extract64(cmd[0], 32, 16));
436 if (extract64(cmd[0], 20, 12) || extract64(cmd[0], 16, 12) ||
437 extract64(cmd[0], 3, 10)) {
438 amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
439 s->cmdbuf + s->cmdbuf_head);
442 g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_domid,
443 &domid);
444 trace_amdvi_pages_inval(domid);
447 static void amdvi_prefetch_pages(AMDVIState *s, uint64_t *cmd)
449 if (extract64(cmd[0], 16, 8) || extract64(cmd[0], 20, 8) ||
450 extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 1) ||
451 extract64(cmd[1], 5, 7)) {
452 amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
453 s->cmdbuf + s->cmdbuf_head);
456 trace_amdvi_prefetch_pages();
459 static void amdvi_inval_inttable(AMDVIState *s, uint64_t *cmd)
461 if (extract64(cmd[0], 16, 16) || cmd[1]) {
462 amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
463 s->cmdbuf + s->cmdbuf_head);
464 return;
467 trace_amdvi_intr_inval();
470 /* FIXME: Try to work with the specified size instead of all the pages
471 * when the S bit is on
473 static void iommu_inval_iotlb(AMDVIState *s, uint64_t *cmd)
476 uint16_t devid = extract64(cmd[0], 0, 16);
477 if (extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 9)) {
478 amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
479 s->cmdbuf + s->cmdbuf_head);
480 return;
483 if (extract64(cmd[1], 0, 1)) {
484 g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_devid,
485 &devid);
486 } else {
487 amdvi_iotlb_remove_page(s, cpu_to_le64(extract64(cmd[1], 12, 52)) << 12,
488 cpu_to_le16(extract64(cmd[1], 0, 16)));
490 trace_amdvi_iotlb_inval();
493 /* not honouring reserved bits is regarded as an illegal command */
494 static void amdvi_cmdbuf_exec(AMDVIState *s)
496 uint64_t cmd[2];
498 if (dma_memory_read(&address_space_memory, s->cmdbuf + s->cmdbuf_head,
499 cmd, AMDVI_COMMAND_SIZE)) {
500 trace_amdvi_command_read_fail(s->cmdbuf, s->cmdbuf_head);
501 amdvi_log_command_error(s, s->cmdbuf + s->cmdbuf_head);
502 return;
505 switch (extract64(cmd[0], 60, 4)) {
506 case AMDVI_CMD_COMPLETION_WAIT:
507 amdvi_completion_wait(s, cmd);
508 break;
509 case AMDVI_CMD_INVAL_DEVTAB_ENTRY:
510 amdvi_inval_devtab_entry(s, cmd);
511 break;
512 case AMDVI_CMD_INVAL_AMDVI_PAGES:
513 amdvi_inval_pages(s, cmd);
514 break;
515 case AMDVI_CMD_INVAL_IOTLB_PAGES:
516 iommu_inval_iotlb(s, cmd);
517 break;
518 case AMDVI_CMD_INVAL_INTR_TABLE:
519 amdvi_inval_inttable(s, cmd);
520 break;
521 case AMDVI_CMD_PREFETCH_AMDVI_PAGES:
522 amdvi_prefetch_pages(s, cmd);
523 break;
524 case AMDVI_CMD_COMPLETE_PPR_REQUEST:
525 amdvi_complete_ppr(s, cmd);
526 break;
527 case AMDVI_CMD_INVAL_AMDVI_ALL:
528 amdvi_inval_all(s, cmd);
529 break;
530 default:
531 trace_amdvi_unhandled_command(extract64(cmd[1], 60, 4));
532 /* log illegal command */
533 amdvi_log_illegalcom_error(s, extract64(cmd[1], 60, 4),
534 s->cmdbuf + s->cmdbuf_head);
538 static void amdvi_cmdbuf_run(AMDVIState *s)
540 if (!s->cmdbuf_enabled) {
541 trace_amdvi_command_error(amdvi_readq(s, AMDVI_MMIO_CONTROL));
542 return;
545 /* check if there is work to do. */
546 while (s->cmdbuf_head != s->cmdbuf_tail) {
547 trace_amdvi_command_exec(s->cmdbuf_head, s->cmdbuf_tail, s->cmdbuf);
548 amdvi_cmdbuf_exec(s);
549 s->cmdbuf_head += AMDVI_COMMAND_SIZE;
550 amdvi_writeq_raw(s, s->cmdbuf_head, AMDVI_MMIO_COMMAND_HEAD);
552 /* wrap head pointer */
553 if (s->cmdbuf_head >= s->cmdbuf_len * AMDVI_COMMAND_SIZE) {
554 s->cmdbuf_head = 0;
559 static void amdvi_mmio_trace(hwaddr addr, unsigned size)
561 uint8_t index = (addr & ~0x2000) / 8;
563 if ((addr & 0x2000)) {
564 /* high table */
565 index = index >= AMDVI_MMIO_REGS_HIGH ? AMDVI_MMIO_REGS_HIGH : index;
566 trace_amdvi_mmio_read(amdvi_mmio_high[index], addr, size, addr & ~0x07);
567 } else {
568 index = index >= AMDVI_MMIO_REGS_LOW ? AMDVI_MMIO_REGS_LOW : index;
569 trace_amdvi_mmio_read(amdvi_mmio_low[index], addr, size, addr & ~0x07);
573 static uint64_t amdvi_mmio_read(void *opaque, hwaddr addr, unsigned size)
575 AMDVIState *s = opaque;
577 uint64_t val = -1;
578 if (addr + size > AMDVI_MMIO_SIZE) {
579 trace_amdvi_mmio_read_invalid(AMDVI_MMIO_SIZE, addr, size);
580 return (uint64_t)-1;
583 if (size == 2) {
584 val = amdvi_readw(s, addr);
585 } else if (size == 4) {
586 val = amdvi_readl(s, addr);
587 } else if (size == 8) {
588 val = amdvi_readq(s, addr);
590 amdvi_mmio_trace(addr, size);
592 return val;
595 static void amdvi_handle_control_write(AMDVIState *s)
597 unsigned long control = amdvi_readq(s, AMDVI_MMIO_CONTROL);
598 s->enabled = !!(control & AMDVI_MMIO_CONTROL_AMDVIEN);
600 s->ats_enabled = !!(control & AMDVI_MMIO_CONTROL_HTTUNEN);
601 s->evtlog_enabled = s->enabled && !!(control &
602 AMDVI_MMIO_CONTROL_EVENTLOGEN);
604 s->evtlog_intr = !!(control & AMDVI_MMIO_CONTROL_EVENTINTEN);
605 s->completion_wait_intr = !!(control & AMDVI_MMIO_CONTROL_COMWAITINTEN);
606 s->cmdbuf_enabled = s->enabled && !!(control &
607 AMDVI_MMIO_CONTROL_CMDBUFLEN);
609 /* update the flags depending on the control register */
610 if (s->cmdbuf_enabled) {
611 amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_CMDBUF_RUN);
612 } else {
613 amdvi_assign_andq(s, AMDVI_MMIO_STATUS, ~AMDVI_MMIO_STATUS_CMDBUF_RUN);
615 if (s->evtlog_enabled) {
616 amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_RUN);
617 } else {
618 amdvi_assign_andq(s, AMDVI_MMIO_STATUS, ~AMDVI_MMIO_STATUS_EVT_RUN);
621 trace_amdvi_control_status(control);
622 amdvi_cmdbuf_run(s);
625 static inline void amdvi_handle_devtab_write(AMDVIState *s)
628 uint64_t val = amdvi_readq(s, AMDVI_MMIO_DEVICE_TABLE);
629 s->devtab = (val & AMDVI_MMIO_DEVTAB_BASE_MASK);
631 /* set device table length */
632 s->devtab_len = ((val & AMDVI_MMIO_DEVTAB_SIZE_MASK) + 1 *
633 (AMDVI_MMIO_DEVTAB_SIZE_UNIT /
634 AMDVI_MMIO_DEVTAB_ENTRY_SIZE));
637 static inline void amdvi_handle_cmdhead_write(AMDVIState *s)
639 s->cmdbuf_head = amdvi_readq(s, AMDVI_MMIO_COMMAND_HEAD)
640 & AMDVI_MMIO_CMDBUF_HEAD_MASK;
641 amdvi_cmdbuf_run(s);
644 static inline void amdvi_handle_cmdbase_write(AMDVIState *s)
646 s->cmdbuf = amdvi_readq(s, AMDVI_MMIO_COMMAND_BASE)
647 & AMDVI_MMIO_CMDBUF_BASE_MASK;
648 s->cmdbuf_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_CMDBUF_SIZE_BYTE)
649 & AMDVI_MMIO_CMDBUF_SIZE_MASK);
650 s->cmdbuf_head = s->cmdbuf_tail = 0;
653 static inline void amdvi_handle_cmdtail_write(AMDVIState *s)
655 s->cmdbuf_tail = amdvi_readq(s, AMDVI_MMIO_COMMAND_TAIL)
656 & AMDVI_MMIO_CMDBUF_TAIL_MASK;
657 amdvi_cmdbuf_run(s);
660 static inline void amdvi_handle_excllim_write(AMDVIState *s)
662 uint64_t val = amdvi_readq(s, AMDVI_MMIO_EXCL_LIMIT);
663 s->excl_limit = (val & AMDVI_MMIO_EXCL_LIMIT_MASK) |
664 AMDVI_MMIO_EXCL_LIMIT_LOW;
667 static inline void amdvi_handle_evtbase_write(AMDVIState *s)
669 uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_BASE);
670 s->evtlog = val & AMDVI_MMIO_EVTLOG_BASE_MASK;
671 s->evtlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_EVTLOG_SIZE_BYTE)
672 & AMDVI_MMIO_EVTLOG_SIZE_MASK);
675 static inline void amdvi_handle_evttail_write(AMDVIState *s)
677 uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_TAIL);
678 s->evtlog_tail = val & AMDVI_MMIO_EVTLOG_TAIL_MASK;
681 static inline void amdvi_handle_evthead_write(AMDVIState *s)
683 uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_HEAD);
684 s->evtlog_head = val & AMDVI_MMIO_EVTLOG_HEAD_MASK;
687 static inline void amdvi_handle_pprbase_write(AMDVIState *s)
689 uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_BASE);
690 s->ppr_log = val & AMDVI_MMIO_PPRLOG_BASE_MASK;
691 s->pprlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_PPRLOG_SIZE_BYTE)
692 & AMDVI_MMIO_PPRLOG_SIZE_MASK);
695 static inline void amdvi_handle_pprhead_write(AMDVIState *s)
697 uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_HEAD);
698 s->pprlog_head = val & AMDVI_MMIO_PPRLOG_HEAD_MASK;
701 static inline void amdvi_handle_pprtail_write(AMDVIState *s)
703 uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_TAIL);
704 s->pprlog_tail = val & AMDVI_MMIO_PPRLOG_TAIL_MASK;
707 /* FIXME: something might go wrong if System Software writes in chunks
708 * of one byte but linux writes in chunks of 4 bytes so currently it
709 * works correctly with linux but will definitely be busted if software
710 * reads/writes 8 bytes
712 static void amdvi_mmio_reg_write(AMDVIState *s, unsigned size, uint64_t val,
713 hwaddr addr)
715 if (size == 2) {
716 amdvi_writew(s, addr, val);
717 } else if (size == 4) {
718 amdvi_writel(s, addr, val);
719 } else if (size == 8) {
720 amdvi_writeq(s, addr, val);
724 static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val,
725 unsigned size)
727 AMDVIState *s = opaque;
728 unsigned long offset = addr & 0x07;
730 if (addr + size > AMDVI_MMIO_SIZE) {
731 trace_amdvi_mmio_write("error: addr outside region: max ",
732 (uint64_t)AMDVI_MMIO_SIZE, size, val, offset);
733 return;
736 amdvi_mmio_trace(addr, size);
737 switch (addr & ~0x07) {
738 case AMDVI_MMIO_CONTROL:
739 amdvi_mmio_reg_write(s, size, val, addr);
740 amdvi_handle_control_write(s);
741 break;
742 case AMDVI_MMIO_DEVICE_TABLE:
743 amdvi_mmio_reg_write(s, size, val, addr);
744 /* set device table address
745 * This also suffers from inability to tell whether software
746 * is done writing
748 if (offset || (size == 8)) {
749 amdvi_handle_devtab_write(s);
751 break;
752 case AMDVI_MMIO_COMMAND_HEAD:
753 amdvi_mmio_reg_write(s, size, val, addr);
754 amdvi_handle_cmdhead_write(s);
755 break;
756 case AMDVI_MMIO_COMMAND_BASE:
757 amdvi_mmio_reg_write(s, size, val, addr);
758 /* FIXME - make sure System Software has finished writing incase
759 * it writes in chucks less than 8 bytes in a robust way.As for
760 * now, this hacks works for the linux driver
762 if (offset || (size == 8)) {
763 amdvi_handle_cmdbase_write(s);
765 break;
766 case AMDVI_MMIO_COMMAND_TAIL:
767 amdvi_mmio_reg_write(s, size, val, addr);
768 amdvi_handle_cmdtail_write(s);
769 break;
770 case AMDVI_MMIO_EVENT_BASE:
771 amdvi_mmio_reg_write(s, size, val, addr);
772 amdvi_handle_evtbase_write(s);
773 break;
774 case AMDVI_MMIO_EVENT_HEAD:
775 amdvi_mmio_reg_write(s, size, val, addr);
776 amdvi_handle_evthead_write(s);
777 break;
778 case AMDVI_MMIO_EVENT_TAIL:
779 amdvi_mmio_reg_write(s, size, val, addr);
780 amdvi_handle_evttail_write(s);
781 break;
782 case AMDVI_MMIO_EXCL_LIMIT:
783 amdvi_mmio_reg_write(s, size, val, addr);
784 amdvi_handle_excllim_write(s);
785 break;
786 /* PPR log base - unused for now */
787 case AMDVI_MMIO_PPR_BASE:
788 amdvi_mmio_reg_write(s, size, val, addr);
789 amdvi_handle_pprbase_write(s);
790 break;
791 /* PPR log head - also unused for now */
792 case AMDVI_MMIO_PPR_HEAD:
793 amdvi_mmio_reg_write(s, size, val, addr);
794 amdvi_handle_pprhead_write(s);
795 break;
796 /* PPR log tail - unused for now */
797 case AMDVI_MMIO_PPR_TAIL:
798 amdvi_mmio_reg_write(s, size, val, addr);
799 amdvi_handle_pprtail_write(s);
800 break;
804 static inline uint64_t amdvi_get_perms(uint64_t entry)
806 return (entry & (AMDVI_DEV_PERM_READ | AMDVI_DEV_PERM_WRITE)) >>
807 AMDVI_DEV_PERM_SHIFT;
810 /* a valid entry should have V = 1 and reserved bits honoured */
811 static bool amdvi_validate_dte(AMDVIState *s, uint16_t devid,
812 uint64_t *dte)
814 if ((dte[0] & AMDVI_DTE_LOWER_QUAD_RESERVED)
815 || (dte[1] & AMDVI_DTE_MIDDLE_QUAD_RESERVED)
816 || (dte[2] & AMDVI_DTE_UPPER_QUAD_RESERVED) || dte[3]) {
817 amdvi_log_illegaldevtab_error(s, devid,
818 s->devtab +
819 devid * AMDVI_DEVTAB_ENTRY_SIZE, 0);
820 return false;
823 return dte[0] & AMDVI_DEV_VALID;
826 /* get a device table entry given the devid */
827 static bool amdvi_get_dte(AMDVIState *s, int devid, uint64_t *entry)
829 uint32_t offset = devid * AMDVI_DEVTAB_ENTRY_SIZE;
831 if (dma_memory_read(&address_space_memory, s->devtab + offset, entry,
832 AMDVI_DEVTAB_ENTRY_SIZE)) {
833 trace_amdvi_dte_get_fail(s->devtab, offset);
834 /* log error accessing dte */
835 amdvi_log_devtab_error(s, devid, s->devtab + offset, 0);
836 return false;
839 *entry = le64_to_cpu(*entry);
840 if (!amdvi_validate_dte(s, devid, entry)) {
841 trace_amdvi_invalid_dte(entry[0]);
842 return false;
845 return true;
848 /* get pte translation mode */
849 static inline uint8_t get_pte_translation_mode(uint64_t pte)
851 return (pte >> AMDVI_DEV_MODE_RSHIFT) & AMDVI_DEV_MODE_MASK;
854 static inline uint64_t pte_override_page_mask(uint64_t pte)
856 uint8_t page_mask = 12;
857 uint64_t addr = (pte & AMDVI_DEV_PT_ROOT_MASK) ^ AMDVI_DEV_PT_ROOT_MASK;
858 /* find the first zero bit */
859 while (addr & 1) {
860 page_mask++;
861 addr = addr >> 1;
864 return ~((1ULL << page_mask) - 1);
867 static inline uint64_t pte_get_page_mask(uint64_t oldlevel)
869 return ~((1UL << ((oldlevel * 9) + 3)) - 1);
872 static inline uint64_t amdvi_get_pte_entry(AMDVIState *s, uint64_t pte_addr,
873 uint16_t devid)
875 uint64_t pte;
877 if (dma_memory_read(&address_space_memory, pte_addr, &pte, sizeof(pte))) {
878 trace_amdvi_get_pte_hwerror(pte_addr);
879 amdvi_log_pagetab_error(s, devid, pte_addr, 0);
880 pte = 0;
881 return pte;
884 pte = le64_to_cpu(pte);
885 return pte;
888 static void amdvi_page_walk(AMDVIAddressSpace *as, uint64_t *dte,
889 IOMMUTLBEntry *ret, unsigned perms,
890 hwaddr addr)
892 unsigned level, present, pte_perms, oldlevel;
893 uint64_t pte = dte[0], pte_addr, page_mask;
895 /* make sure the DTE has TV = 1 */
896 if (pte & AMDVI_DEV_TRANSLATION_VALID) {
897 level = get_pte_translation_mode(pte);
898 if (level >= 7) {
899 trace_amdvi_mode_invalid(level, addr);
900 return;
902 if (level == 0) {
903 goto no_remap;
906 /* we are at the leaf page table or page table encodes a huge page */
907 while (level > 0) {
908 pte_perms = amdvi_get_perms(pte);
909 present = pte & 1;
910 if (!present || perms != (perms & pte_perms)) {
911 amdvi_page_fault(as->iommu_state, as->devfn, addr, perms);
912 trace_amdvi_page_fault(addr);
913 return;
916 /* go to the next lower level */
917 pte_addr = pte & AMDVI_DEV_PT_ROOT_MASK;
918 /* add offset and load pte */
919 pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3;
920 pte = amdvi_get_pte_entry(as->iommu_state, pte_addr, as->devfn);
921 if (!pte) {
922 return;
924 oldlevel = level;
925 level = get_pte_translation_mode(pte);
926 if (level == 0x7) {
927 break;
931 if (level == 0x7) {
932 page_mask = pte_override_page_mask(pte);
933 } else {
934 page_mask = pte_get_page_mask(oldlevel);
937 /* get access permissions from pte */
938 ret->iova = addr & page_mask;
939 ret->translated_addr = (pte & AMDVI_DEV_PT_ROOT_MASK) & page_mask;
940 ret->addr_mask = ~page_mask;
941 ret->perm = amdvi_get_perms(pte);
942 return;
944 no_remap:
945 ret->iova = addr & AMDVI_PAGE_MASK_4K;
946 ret->translated_addr = addr & AMDVI_PAGE_MASK_4K;
947 ret->addr_mask = ~AMDVI_PAGE_MASK_4K;
948 ret->perm = amdvi_get_perms(pte);
951 static void amdvi_do_translate(AMDVIAddressSpace *as, hwaddr addr,
952 bool is_write, IOMMUTLBEntry *ret)
954 AMDVIState *s = as->iommu_state;
955 uint16_t devid = PCI_BUILD_BDF(as->bus_num, as->devfn);
956 AMDVIIOTLBEntry *iotlb_entry = amdvi_iotlb_lookup(s, addr, devid);
957 uint64_t entry[4];
959 if (iotlb_entry) {
960 trace_amdvi_iotlb_hit(PCI_BUS_NUM(devid), PCI_SLOT(devid),
961 PCI_FUNC(devid), addr, iotlb_entry->translated_addr);
962 ret->iova = addr & ~iotlb_entry->page_mask;
963 ret->translated_addr = iotlb_entry->translated_addr;
964 ret->addr_mask = iotlb_entry->page_mask;
965 ret->perm = iotlb_entry->perms;
966 return;
969 /* devices with V = 0 are not translated */
970 if (!amdvi_get_dte(s, devid, entry)) {
971 goto out;
974 amdvi_page_walk(as, entry, ret,
975 is_write ? AMDVI_PERM_WRITE : AMDVI_PERM_READ, addr);
977 amdvi_update_iotlb(s, devid, addr, *ret,
978 entry[1] & AMDVI_DEV_DOMID_ID_MASK);
979 return;
981 out:
982 ret->iova = addr & AMDVI_PAGE_MASK_4K;
983 ret->translated_addr = addr & AMDVI_PAGE_MASK_4K;
984 ret->addr_mask = ~AMDVI_PAGE_MASK_4K;
985 ret->perm = IOMMU_RW;
988 static inline bool amdvi_is_interrupt_addr(hwaddr addr)
990 return addr >= AMDVI_INT_ADDR_FIRST && addr <= AMDVI_INT_ADDR_LAST;
993 static IOMMUTLBEntry amdvi_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
994 IOMMUAccessFlags flag)
996 AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu);
997 AMDVIState *s = as->iommu_state;
998 IOMMUTLBEntry ret = {
999 .target_as = &address_space_memory,
1000 .iova = addr,
1001 .translated_addr = 0,
1002 .addr_mask = ~(hwaddr)0,
1003 .perm = IOMMU_NONE
1006 if (!s->enabled) {
1007 /* AMDVI disabled - corresponds to iommu=off not
1008 * failure to provide any parameter
1010 ret.iova = addr & AMDVI_PAGE_MASK_4K;
1011 ret.translated_addr = addr & AMDVI_PAGE_MASK_4K;
1012 ret.addr_mask = ~AMDVI_PAGE_MASK_4K;
1013 ret.perm = IOMMU_RW;
1014 return ret;
1015 } else if (amdvi_is_interrupt_addr(addr)) {
1016 ret.iova = addr & AMDVI_PAGE_MASK_4K;
1017 ret.translated_addr = addr & AMDVI_PAGE_MASK_4K;
1018 ret.addr_mask = ~AMDVI_PAGE_MASK_4K;
1019 ret.perm = IOMMU_WO;
1020 return ret;
1023 amdvi_do_translate(as, addr, flag & IOMMU_WO, &ret);
1024 trace_amdvi_translation_result(as->bus_num, PCI_SLOT(as->devfn),
1025 PCI_FUNC(as->devfn), addr, ret.translated_addr);
1026 return ret;
1029 static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn)
1031 AMDVIState *s = opaque;
1032 AMDVIAddressSpace **iommu_as;
1033 int bus_num = pci_bus_num(bus);
1035 iommu_as = s->address_spaces[bus_num];
1037 /* allocate memory during the first run */
1038 if (!iommu_as) {
1039 iommu_as = g_malloc0(sizeof(AMDVIAddressSpace *) * PCI_DEVFN_MAX);
1040 s->address_spaces[bus_num] = iommu_as;
1043 /* set up AMD-Vi region */
1044 if (!iommu_as[devfn]) {
1045 iommu_as[devfn] = g_malloc0(sizeof(AMDVIAddressSpace));
1046 iommu_as[devfn]->bus_num = (uint8_t)bus_num;
1047 iommu_as[devfn]->devfn = (uint8_t)devfn;
1048 iommu_as[devfn]->iommu_state = s;
1050 memory_region_init_iommu(&iommu_as[devfn]->iommu,
1051 sizeof(iommu_as[devfn]->iommu),
1052 TYPE_AMD_IOMMU_MEMORY_REGION,
1053 OBJECT(s),
1054 "amd-iommu", UINT64_MAX);
1055 address_space_init(&iommu_as[devfn]->as,
1056 MEMORY_REGION(&iommu_as[devfn]->iommu),
1057 "amd-iommu");
1059 return &iommu_as[devfn]->as;
1062 static const MemoryRegionOps mmio_mem_ops = {
1063 .read = amdvi_mmio_read,
1064 .write = amdvi_mmio_write,
1065 .endianness = DEVICE_LITTLE_ENDIAN,
1066 .impl = {
1067 .min_access_size = 1,
1068 .max_access_size = 8,
1069 .unaligned = false,
1071 .valid = {
1072 .min_access_size = 1,
1073 .max_access_size = 8,
1077 static void amdvi_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu,
1078 IOMMUNotifierFlag old,
1079 IOMMUNotifierFlag new)
1081 AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu);
1083 if (new & IOMMU_NOTIFIER_MAP) {
1084 error_report("device %02x.%02x.%x requires iommu notifier which is not "
1085 "currently supported", as->bus_num, PCI_SLOT(as->devfn),
1086 PCI_FUNC(as->devfn));
1087 exit(1);
1091 static void amdvi_init(AMDVIState *s)
1093 amdvi_iotlb_reset(s);
1095 s->devtab_len = 0;
1096 s->cmdbuf_len = 0;
1097 s->cmdbuf_head = 0;
1098 s->cmdbuf_tail = 0;
1099 s->evtlog_head = 0;
1100 s->evtlog_tail = 0;
1101 s->excl_enabled = false;
1102 s->excl_allow = false;
1103 s->mmio_enabled = false;
1104 s->enabled = false;
1105 s->ats_enabled = false;
1106 s->cmdbuf_enabled = false;
1108 /* reset MMIO */
1109 memset(s->mmior, 0, AMDVI_MMIO_SIZE);
1110 amdvi_set_quad(s, AMDVI_MMIO_EXT_FEATURES, AMDVI_EXT_FEATURES,
1111 0xffffffffffffffef, 0);
1112 amdvi_set_quad(s, AMDVI_MMIO_STATUS, 0, 0x98, 0x67);
1114 /* reset device ident */
1115 pci_config_set_vendor_id(s->pci.dev.config, PCI_VENDOR_ID_AMD);
1116 pci_config_set_prog_interface(s->pci.dev.config, 00);
1117 pci_config_set_device_id(s->pci.dev.config, s->devid);
1118 pci_config_set_class(s->pci.dev.config, 0x0806);
1120 /* reset AMDVI specific capabilities, all r/o */
1121 pci_set_long(s->pci.dev.config + s->capab_offset, AMDVI_CAPAB_FEATURES);
1122 pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_LOW,
1123 s->mmio.addr & ~(0xffff0000));
1124 pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_HIGH,
1125 (s->mmio.addr & ~(0xffff)) >> 16);
1126 pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_RANGE,
1127 0xff000000);
1128 pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC, 0);
1129 pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC,
1130 AMDVI_MAX_PH_ADDR | AMDVI_MAX_GVA_ADDR | AMDVI_MAX_VA_ADDR);
1133 static void amdvi_reset(DeviceState *dev)
1135 AMDVIState *s = AMD_IOMMU_DEVICE(dev);
1137 msi_reset(&s->pci.dev);
1138 amdvi_init(s);
1141 static void amdvi_realize(DeviceState *dev, Error **err)
1143 int ret = 0;
1144 AMDVIState *s = AMD_IOMMU_DEVICE(dev);
1145 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev);
1146 MachineState *ms = MACHINE(qdev_get_machine());
1147 PCMachineState *pcms = PC_MACHINE(ms);
1148 PCIBus *bus = pcms->bus;
1150 s->iotlb = g_hash_table_new_full(amdvi_uint64_hash,
1151 amdvi_uint64_equal, g_free, g_free);
1153 /* This device should take care of IOMMU PCI properties */
1154 x86_iommu->type = TYPE_AMD;
1155 qdev_set_parent_bus(DEVICE(&s->pci), &bus->qbus);
1156 object_property_set_bool(OBJECT(&s->pci), true, "realized", err);
1157 ret = pci_add_capability(&s->pci.dev, AMDVI_CAPAB_ID_SEC, 0,
1158 AMDVI_CAPAB_SIZE, err);
1159 if (ret < 0) {
1160 return;
1162 s->capab_offset = ret;
1164 ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_MSI, 0,
1165 AMDVI_CAPAB_REG_SIZE, err);
1166 if (ret < 0) {
1167 return;
1169 ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_HT, 0,
1170 AMDVI_CAPAB_REG_SIZE, err);
1171 if (ret < 0) {
1172 return;
1175 /* set up MMIO */
1176 memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "amdvi-mmio",
1177 AMDVI_MMIO_SIZE);
1179 sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->mmio);
1180 sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, AMDVI_BASE_ADDR);
1181 pci_setup_iommu(bus, amdvi_host_dma_iommu, s);
1182 s->devid = object_property_get_int(OBJECT(&s->pci), "addr", err);
1183 msi_init(&s->pci.dev, 0, 1, true, false, err);
1184 amdvi_init(s);
1187 static const VMStateDescription vmstate_amdvi = {
1188 .name = "amd-iommu",
1189 .unmigratable = 1
1192 static void amdvi_instance_init(Object *klass)
1194 AMDVIState *s = AMD_IOMMU_DEVICE(klass);
1196 object_initialize(&s->pci, sizeof(s->pci), TYPE_AMD_IOMMU_PCI);
1199 static void amdvi_class_init(ObjectClass *klass, void* data)
1201 DeviceClass *dc = DEVICE_CLASS(klass);
1202 X86IOMMUClass *dc_class = X86_IOMMU_CLASS(klass);
1204 dc->reset = amdvi_reset;
1205 dc->vmsd = &vmstate_amdvi;
1206 dc->hotpluggable = false;
1207 dc_class->realize = amdvi_realize;
1208 /* Supported by the pc-q35-* machine types */
1209 dc->user_creatable = true;
1212 static const TypeInfo amdvi = {
1213 .name = TYPE_AMD_IOMMU_DEVICE,
1214 .parent = TYPE_X86_IOMMU_DEVICE,
1215 .instance_size = sizeof(AMDVIState),
1216 .instance_init = amdvi_instance_init,
1217 .class_init = amdvi_class_init
1220 static const TypeInfo amdviPCI = {
1221 .name = "AMDVI-PCI",
1222 .parent = TYPE_PCI_DEVICE,
1223 .instance_size = sizeof(AMDVIPCIState),
1224 .interfaces = (InterfaceInfo[]) {
1225 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
1226 { },
1230 static void amdvi_iommu_memory_region_class_init(ObjectClass *klass, void *data)
1232 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1234 imrc->translate = amdvi_translate;
1235 imrc->notify_flag_changed = amdvi_iommu_notify_flag_changed;
1238 static const TypeInfo amdvi_iommu_memory_region_info = {
1239 .parent = TYPE_IOMMU_MEMORY_REGION,
1240 .name = TYPE_AMD_IOMMU_MEMORY_REGION,
1241 .class_init = amdvi_iommu_memory_region_class_init,
1244 static void amdviPCI_register_types(void)
1246 type_register_static(&amdviPCI);
1247 type_register_static(&amdvi);
1248 type_register_static(&amdvi_iommu_memory_region_info);
1251 type_init(amdviPCI_register_types);