qemu/xen: Add 64 bits big bar support on qemu
[qemu/ar7.git] / hw / xen_pt.c
blob838bcea4d6760e2c4307505d25bcf7866c7a6f49
1 /*
2 * Copyright (c) 2007, Neocleus Corporation.
3 * Copyright (c) 2007, Intel Corporation.
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
8 * Alex Novik <alex@neocleus.com>
9 * Allen Kay <allen.m.kay@intel.com>
10 * Guy Zana <guy@neocleus.com>
12 * This file implements direct PCI assignment to a HVM guest
16 * Interrupt Disable policy:
18 * INTx interrupt:
19 * Initialize(register_real_device)
20 * Map INTx(xc_physdev_map_pirq):
21 * <fail>
22 * - Set real Interrupt Disable bit to '1'.
23 * - Set machine_irq and assigned_device->machine_irq to '0'.
24 * * Don't bind INTx.
26 * Bind INTx(xc_domain_bind_pt_pci_irq):
27 * <fail>
28 * - Set real Interrupt Disable bit to '1'.
29 * - Unmap INTx.
30 * - Decrement xen_pt_mapped_machine_irq[machine_irq]
31 * - Set assigned_device->machine_irq to '0'.
33 * Write to Interrupt Disable bit by guest software(xen_pt_cmd_reg_write)
34 * Write '0'
35 * - Set real bit to '0' if assigned_device->machine_irq isn't '0'.
37 * Write '1'
38 * - Set real bit to '1'.
40 * MSI interrupt:
41 * Initialize MSI register(xen_pt_msi_setup, xen_pt_msi_update)
42 * Bind MSI(xc_domain_update_msi_irq)
43 * <fail>
44 * - Unmap MSI.
45 * - Set dev->msi->pirq to '-1'.
47 * MSI-X interrupt:
48 * Initialize MSI-X register(xen_pt_msix_update_one)
49 * Bind MSI-X(xc_domain_update_msi_irq)
50 * <fail>
51 * - Unmap MSI-X.
52 * - Set entry->pirq to '-1'.
55 #include <sys/ioctl.h>
57 #include "pci.h"
58 #include "xen.h"
59 #include "xen_backend.h"
60 #include "xen_pt.h"
61 #include "range.h"
63 #define XEN_PT_NR_IRQS (256)
64 static uint8_t xen_pt_mapped_machine_irq[XEN_PT_NR_IRQS] = {0};
66 void xen_pt_log(const PCIDevice *d, const char *f, ...)
68 va_list ap;
70 va_start(ap, f);
71 if (d) {
72 fprintf(stderr, "[%02x:%02x.%d] ", pci_bus_num(d->bus),
73 PCI_SLOT(d->devfn), PCI_FUNC(d->devfn));
75 vfprintf(stderr, f, ap);
76 va_end(ap);
79 /* Config Space */
81 static int xen_pt_pci_config_access_check(PCIDevice *d, uint32_t addr, int len)
83 /* check offset range */
84 if (addr >= 0xFF) {
85 XEN_PT_ERR(d, "Failed to access register with offset exceeding 0xFF. "
86 "(addr: 0x%02x, len: %d)\n", addr, len);
87 return -1;
90 /* check read size */
91 if ((len != 1) && (len != 2) && (len != 4)) {
92 XEN_PT_ERR(d, "Failed to access register with invalid access length. "
93 "(addr: 0x%02x, len: %d)\n", addr, len);
94 return -1;
97 /* check offset alignment */
98 if (addr & (len - 1)) {
99 XEN_PT_ERR(d, "Failed to access register with invalid access size "
100 "alignment. (addr: 0x%02x, len: %d)\n", addr, len);
101 return -1;
104 return 0;
107 int xen_pt_bar_offset_to_index(uint32_t offset)
109 int index = 0;
111 /* check Exp ROM BAR */
112 if (offset == PCI_ROM_ADDRESS) {
113 return PCI_ROM_SLOT;
116 /* calculate BAR index */
117 index = (offset - PCI_BASE_ADDRESS_0) >> 2;
118 if (index >= PCI_NUM_REGIONS) {
119 return -1;
122 return index;
125 static uint32_t xen_pt_pci_read_config(PCIDevice *d, uint32_t addr, int len)
127 XenPCIPassthroughState *s = DO_UPCAST(XenPCIPassthroughState, dev, d);
128 uint32_t val = 0;
129 XenPTRegGroup *reg_grp_entry = NULL;
130 XenPTReg *reg_entry = NULL;
131 int rc = 0;
132 int emul_len = 0;
133 uint32_t find_addr = addr;
135 if (xen_pt_pci_config_access_check(d, addr, len)) {
136 goto exit;
139 /* find register group entry */
140 reg_grp_entry = xen_pt_find_reg_grp(s, addr);
141 if (reg_grp_entry) {
142 /* check 0-Hardwired register group */
143 if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) {
144 /* no need to emulate, just return 0 */
145 val = 0;
146 goto exit;
150 /* read I/O device register value */
151 rc = xen_host_pci_get_block(&s->real_device, addr, (uint8_t *)&val, len);
152 if (rc < 0) {
153 XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc);
154 memset(&val, 0xff, len);
157 /* just return the I/O device register value for
158 * passthrough type register group */
159 if (reg_grp_entry == NULL) {
160 goto exit;
163 /* adjust the read value to appropriate CFC-CFF window */
164 val <<= (addr & 3) << 3;
165 emul_len = len;
167 /* loop around the guest requested size */
168 while (emul_len > 0) {
169 /* find register entry to be emulated */
170 reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr);
171 if (reg_entry) {
172 XenPTRegInfo *reg = reg_entry->reg;
173 uint32_t real_offset = reg_grp_entry->base_offset + reg->offset;
174 uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3);
175 uint8_t *ptr_val = NULL;
177 valid_mask <<= (find_addr - real_offset) << 3;
178 ptr_val = (uint8_t *)&val + (real_offset & 3);
180 /* do emulation based on register size */
181 switch (reg->size) {
182 case 1:
183 if (reg->u.b.read) {
184 rc = reg->u.b.read(s, reg_entry, ptr_val, valid_mask);
186 break;
187 case 2:
188 if (reg->u.w.read) {
189 rc = reg->u.w.read(s, reg_entry,
190 (uint16_t *)ptr_val, valid_mask);
192 break;
193 case 4:
194 if (reg->u.dw.read) {
195 rc = reg->u.dw.read(s, reg_entry,
196 (uint32_t *)ptr_val, valid_mask);
198 break;
201 if (rc < 0) {
202 xen_shutdown_fatal_error("Internal error: Invalid read "
203 "emulation. (%s, rc: %d)\n",
204 __func__, rc);
205 return 0;
208 /* calculate next address to find */
209 emul_len -= reg->size;
210 if (emul_len > 0) {
211 find_addr = real_offset + reg->size;
213 } else {
214 /* nothing to do with passthrough type register,
215 * continue to find next byte */
216 emul_len--;
217 find_addr++;
221 /* need to shift back before returning them to pci bus emulator */
222 val >>= ((addr & 3) << 3);
224 exit:
225 XEN_PT_LOG_CONFIG(d, addr, val, len);
226 return val;
229 static void xen_pt_pci_write_config(PCIDevice *d, uint32_t addr,
230 uint32_t val, int len)
232 XenPCIPassthroughState *s = DO_UPCAST(XenPCIPassthroughState, dev, d);
233 int index = 0;
234 XenPTRegGroup *reg_grp_entry = NULL;
235 int rc = 0;
236 uint32_t read_val = 0;
237 int emul_len = 0;
238 XenPTReg *reg_entry = NULL;
239 uint32_t find_addr = addr;
240 XenPTRegInfo *reg = NULL;
242 if (xen_pt_pci_config_access_check(d, addr, len)) {
243 return;
246 XEN_PT_LOG_CONFIG(d, addr, val, len);
248 /* check unused BAR register */
249 index = xen_pt_bar_offset_to_index(addr);
250 if ((index >= 0) && (val > 0 && val < XEN_PT_BAR_ALLF) &&
251 (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED)) {
252 XEN_PT_WARN(d, "Guest attempt to set address to unused Base Address "
253 "Register. (addr: 0x%02x, len: %d)\n", addr, len);
256 /* find register group entry */
257 reg_grp_entry = xen_pt_find_reg_grp(s, addr);
258 if (reg_grp_entry) {
259 /* check 0-Hardwired register group */
260 if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) {
261 /* ignore silently */
262 XEN_PT_WARN(d, "Access to 0-Hardwired register. "
263 "(addr: 0x%02x, len: %d)\n", addr, len);
264 return;
268 rc = xen_host_pci_get_block(&s->real_device, addr,
269 (uint8_t *)&read_val, len);
270 if (rc < 0) {
271 XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc);
272 memset(&read_val, 0xff, len);
275 /* pass directly to the real device for passthrough type register group */
276 if (reg_grp_entry == NULL) {
277 goto out;
280 memory_region_transaction_begin();
281 pci_default_write_config(d, addr, val, len);
283 /* adjust the read and write value to appropriate CFC-CFF window */
284 read_val <<= (addr & 3) << 3;
285 val <<= (addr & 3) << 3;
286 emul_len = len;
288 /* loop around the guest requested size */
289 while (emul_len > 0) {
290 /* find register entry to be emulated */
291 reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr);
292 if (reg_entry) {
293 reg = reg_entry->reg;
294 uint32_t real_offset = reg_grp_entry->base_offset + reg->offset;
295 uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3);
296 uint8_t *ptr_val = NULL;
298 valid_mask <<= (find_addr - real_offset) << 3;
299 ptr_val = (uint8_t *)&val + (real_offset & 3);
301 /* do emulation based on register size */
302 switch (reg->size) {
303 case 1:
304 if (reg->u.b.write) {
305 rc = reg->u.b.write(s, reg_entry, ptr_val,
306 read_val >> ((real_offset & 3) << 3),
307 valid_mask);
309 break;
310 case 2:
311 if (reg->u.w.write) {
312 rc = reg->u.w.write(s, reg_entry, (uint16_t *)ptr_val,
313 (read_val >> ((real_offset & 3) << 3)),
314 valid_mask);
316 break;
317 case 4:
318 if (reg->u.dw.write) {
319 rc = reg->u.dw.write(s, reg_entry, (uint32_t *)ptr_val,
320 (read_val >> ((real_offset & 3) << 3)),
321 valid_mask);
323 break;
326 if (rc < 0) {
327 xen_shutdown_fatal_error("Internal error: Invalid write"
328 " emulation. (%s, rc: %d)\n",
329 __func__, rc);
330 return;
333 /* calculate next address to find */
334 emul_len -= reg->size;
335 if (emul_len > 0) {
336 find_addr = real_offset + reg->size;
338 } else {
339 /* nothing to do with passthrough type register,
340 * continue to find next byte */
341 emul_len--;
342 find_addr++;
346 /* need to shift back before passing them to xen_host_pci_device */
347 val >>= (addr & 3) << 3;
349 memory_region_transaction_commit();
351 out:
352 if (!(reg && reg->no_wb)) {
353 /* unknown regs are passed through */
354 rc = xen_host_pci_set_block(&s->real_device, addr,
355 (uint8_t *)&val, len);
357 if (rc < 0) {
358 XEN_PT_ERR(d, "pci_write_block failed. return value: %d.\n", rc);
363 /* register regions */
365 static uint64_t xen_pt_bar_read(void *o, target_phys_addr_t addr,
366 unsigned size)
368 PCIDevice *d = o;
369 /* if this function is called, that probably means that there is a
370 * misconfiguration of the IOMMU. */
371 XEN_PT_ERR(d, "Should not read BAR through QEMU. @0x"TARGET_FMT_plx"\n",
372 addr);
373 return 0;
375 static void xen_pt_bar_write(void *o, target_phys_addr_t addr, uint64_t val,
376 unsigned size)
378 PCIDevice *d = o;
379 /* Same comment as xen_pt_bar_read function */
380 XEN_PT_ERR(d, "Should not write BAR through QEMU. @0x"TARGET_FMT_plx"\n",
381 addr);
384 static const MemoryRegionOps ops = {
385 .endianness = DEVICE_NATIVE_ENDIAN,
386 .read = xen_pt_bar_read,
387 .write = xen_pt_bar_write,
390 static int xen_pt_register_regions(XenPCIPassthroughState *s)
392 int i = 0;
393 XenHostPCIDevice *d = &s->real_device;
395 /* Register PIO/MMIO BARs */
396 for (i = 0; i < PCI_ROM_SLOT; i++) {
397 XenHostPCIIORegion *r = &d->io_regions[i];
398 uint8_t type;
400 if (r->base_addr == 0 || r->size == 0) {
401 continue;
404 s->bases[i].access.u = r->base_addr;
406 if (r->type & XEN_HOST_PCI_REGION_TYPE_IO) {
407 type = PCI_BASE_ADDRESS_SPACE_IO;
408 } else {
409 type = PCI_BASE_ADDRESS_SPACE_MEMORY;
410 if (r->type & XEN_HOST_PCI_REGION_TYPE_PREFETCH) {
411 type |= PCI_BASE_ADDRESS_MEM_PREFETCH;
413 if (r->type & XEN_HOST_PCI_REGION_TYPE_MEM_64) {
414 type |= PCI_BASE_ADDRESS_MEM_TYPE_64;
418 memory_region_init_io(&s->bar[i], &ops, &s->dev,
419 "xen-pci-pt-bar", r->size);
420 pci_register_bar(&s->dev, i, type, &s->bar[i]);
422 XEN_PT_LOG(&s->dev, "IO region %i registered (size=0x%lx"PRIx64
423 " base_addr=0x%lx"PRIx64" type: %#x)\n",
424 i, r->size, r->base_addr, type);
427 /* Register expansion ROM address */
428 if (d->rom.base_addr && d->rom.size) {
429 uint32_t bar_data = 0;
431 /* Re-set BAR reported by OS, otherwise ROM can't be read. */
432 if (xen_host_pci_get_long(d, PCI_ROM_ADDRESS, &bar_data)) {
433 return 0;
435 if ((bar_data & PCI_ROM_ADDRESS_MASK) == 0) {
436 bar_data |= d->rom.base_addr & PCI_ROM_ADDRESS_MASK;
437 xen_host_pci_set_long(d, PCI_ROM_ADDRESS, bar_data);
440 s->bases[PCI_ROM_SLOT].access.maddr = d->rom.base_addr;
442 memory_region_init_rom_device(&s->rom, NULL, NULL,
443 "xen-pci-pt-rom", d->rom.size);
444 pci_register_bar(&s->dev, PCI_ROM_SLOT, PCI_BASE_ADDRESS_MEM_PREFETCH,
445 &s->rom);
447 XEN_PT_LOG(&s->dev, "Expansion ROM registered (size=0x%08"PRIx64
448 " base_addr=0x%08"PRIx64")\n",
449 d->rom.size, d->rom.base_addr);
452 return 0;
455 static void xen_pt_unregister_regions(XenPCIPassthroughState *s)
457 XenHostPCIDevice *d = &s->real_device;
458 int i;
460 for (i = 0; i < PCI_NUM_REGIONS - 1; i++) {
461 XenHostPCIIORegion *r = &d->io_regions[i];
463 if (r->base_addr == 0 || r->size == 0) {
464 continue;
467 memory_region_destroy(&s->bar[i]);
469 if (d->rom.base_addr && d->rom.size) {
470 memory_region_destroy(&s->rom);
474 /* region mapping */
476 static int xen_pt_bar_from_region(XenPCIPassthroughState *s, MemoryRegion *mr)
478 int i = 0;
480 for (i = 0; i < PCI_NUM_REGIONS - 1; i++) {
481 if (mr == &s->bar[i]) {
482 return i;
485 if (mr == &s->rom) {
486 return PCI_ROM_SLOT;
488 return -1;
492 * This function checks if an io_region overlaps an io_region from another
493 * device. The io_region to check is provided with (addr, size and type)
494 * A callback can be provided and will be called for every region that is
495 * overlapped.
496 * The return value indicates if the region is overlappsed */
497 struct CheckBarArgs {
498 XenPCIPassthroughState *s;
499 pcibus_t addr;
500 pcibus_t size;
501 uint8_t type;
502 bool rc;
504 static void xen_pt_check_bar_overlap(PCIBus *bus, PCIDevice *d, void *opaque)
506 struct CheckBarArgs *arg = opaque;
507 XenPCIPassthroughState *s = arg->s;
508 uint8_t type = arg->type;
509 int i;
511 if (d->devfn == s->dev.devfn) {
512 return;
515 /* xxx: This ignores bridges. */
516 for (i = 0; i < PCI_NUM_REGIONS; i++) {
517 const PCIIORegion *r = &d->io_regions[i];
519 if (!r->size) {
520 continue;
522 if ((type & PCI_BASE_ADDRESS_SPACE_IO)
523 != (r->type & PCI_BASE_ADDRESS_SPACE_IO)) {
524 continue;
527 if (ranges_overlap(arg->addr, arg->size, r->addr, r->size)) {
528 XEN_PT_WARN(&s->dev,
529 "Overlapped to device [%02x:%02x.%d] Region: %i"
530 " (addr: %#"FMT_PCIBUS", len: %#"FMT_PCIBUS")\n",
531 pci_bus_num(bus), PCI_SLOT(d->devfn),
532 PCI_FUNC(d->devfn), i, r->addr, r->size);
533 arg->rc = true;
538 static void xen_pt_region_update(XenPCIPassthroughState *s,
539 MemoryRegionSection *sec, bool adding)
541 PCIDevice *d = &s->dev;
542 MemoryRegion *mr = sec->mr;
543 int bar = -1;
544 int rc;
545 int op = adding ? DPCI_ADD_MAPPING : DPCI_REMOVE_MAPPING;
546 struct CheckBarArgs args = {
547 .s = s,
548 .addr = sec->offset_within_address_space,
549 .size = sec->size,
550 .rc = false,
553 bar = xen_pt_bar_from_region(s, mr);
554 if (bar == -1 && (!s->msix || &s->msix->mmio != mr)) {
555 return;
558 if (s->msix && &s->msix->mmio == mr) {
559 if (adding) {
560 s->msix->mmio_base_addr = sec->offset_within_address_space;
561 rc = xen_pt_msix_update_remap(s, s->msix->bar_index);
563 return;
566 args.type = d->io_regions[bar].type;
567 pci_for_each_device(d->bus, pci_bus_num(d->bus),
568 xen_pt_check_bar_overlap, &args);
569 if (args.rc) {
570 XEN_PT_WARN(d, "Region: %d (addr: %#"FMT_PCIBUS
571 ", len: %#"FMT_PCIBUS") is overlapped.\n",
572 bar, sec->offset_within_address_space, sec->size);
575 if (d->io_regions[bar].type & PCI_BASE_ADDRESS_SPACE_IO) {
576 uint32_t guest_port = sec->offset_within_address_space;
577 uint32_t machine_port = s->bases[bar].access.pio_base;
578 uint32_t size = sec->size;
579 rc = xc_domain_ioport_mapping(xen_xc, xen_domid,
580 guest_port, machine_port, size,
581 op);
582 if (rc) {
583 XEN_PT_ERR(d, "%s ioport mapping failed! (rc: %i)\n",
584 adding ? "create new" : "remove old", rc);
586 } else {
587 pcibus_t guest_addr = sec->offset_within_address_space;
588 pcibus_t machine_addr = s->bases[bar].access.maddr
589 + sec->offset_within_region;
590 pcibus_t size = sec->size;
591 rc = xc_domain_memory_mapping(xen_xc, xen_domid,
592 XEN_PFN(guest_addr + XC_PAGE_SIZE - 1),
593 XEN_PFN(machine_addr + XC_PAGE_SIZE - 1),
594 XEN_PFN(size + XC_PAGE_SIZE - 1),
595 op);
596 if (rc) {
597 XEN_PT_ERR(d, "%s mem mapping failed! (rc: %i)\n",
598 adding ? "create new" : "remove old", rc);
603 static void xen_pt_begin(MemoryListener *l)
607 static void xen_pt_commit(MemoryListener *l)
611 static void xen_pt_region_add(MemoryListener *l, MemoryRegionSection *sec)
613 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
614 memory_listener);
616 xen_pt_region_update(s, sec, true);
619 static void xen_pt_region_del(MemoryListener *l, MemoryRegionSection *sec)
621 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
622 memory_listener);
624 xen_pt_region_update(s, sec, false);
627 static void xen_pt_region_nop(MemoryListener *l, MemoryRegionSection *s)
631 static void xen_pt_log_fns(MemoryListener *l, MemoryRegionSection *s)
635 static void xen_pt_log_global_fns(MemoryListener *l)
639 static void xen_pt_eventfd_fns(MemoryListener *l, MemoryRegionSection *s,
640 bool match_data, uint64_t data, EventNotifier *n)
644 static const MemoryListener xen_pt_memory_listener = {
645 .begin = xen_pt_begin,
646 .commit = xen_pt_commit,
647 .region_add = xen_pt_region_add,
648 .region_nop = xen_pt_region_nop,
649 .region_del = xen_pt_region_del,
650 .log_start = xen_pt_log_fns,
651 .log_stop = xen_pt_log_fns,
652 .log_sync = xen_pt_log_fns,
653 .log_global_start = xen_pt_log_global_fns,
654 .log_global_stop = xen_pt_log_global_fns,
655 .eventfd_add = xen_pt_eventfd_fns,
656 .eventfd_del = xen_pt_eventfd_fns,
657 .priority = 10,
660 /* init */
662 static int xen_pt_initfn(PCIDevice *d)
664 XenPCIPassthroughState *s = DO_UPCAST(XenPCIPassthroughState, dev, d);
665 int rc = 0;
666 uint8_t machine_irq = 0;
667 int pirq = XEN_PT_UNASSIGNED_PIRQ;
669 /* register real device */
670 XEN_PT_LOG(d, "Assigning real physical device %02x:%02x.%d"
671 " to devfn %#x\n",
672 s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function,
673 s->dev.devfn);
675 rc = xen_host_pci_device_get(&s->real_device,
676 s->hostaddr.domain, s->hostaddr.bus,
677 s->hostaddr.slot, s->hostaddr.function);
678 if (rc) {
679 XEN_PT_ERR(d, "Failed to \"open\" the real pci device. rc: %i\n", rc);
680 return -1;
683 s->is_virtfn = s->real_device.is_virtfn;
684 if (s->is_virtfn) {
685 XEN_PT_LOG(d, "%04x:%02x:%02x.%d is a SR-IOV Virtual Function\n",
686 s->real_device.domain, bus, slot, func);
689 /* Initialize virtualized PCI configuration (Extended 256 Bytes) */
690 if (xen_host_pci_get_block(&s->real_device, 0, d->config,
691 PCI_CONFIG_SPACE_SIZE) == -1) {
692 xen_host_pci_device_put(&s->real_device);
693 return -1;
696 s->memory_listener = xen_pt_memory_listener;
698 /* Handle real device's MMIO/PIO BARs */
699 xen_pt_register_regions(s);
701 /* reinitialize each config register to be emulated */
702 if (xen_pt_config_init(s)) {
703 XEN_PT_ERR(d, "PCI Config space initialisation failed.\n");
704 xen_host_pci_device_put(&s->real_device);
705 return -1;
708 /* Bind interrupt */
709 if (!s->dev.config[PCI_INTERRUPT_PIN]) {
710 XEN_PT_LOG(d, "no pin interrupt\n");
711 goto out;
714 machine_irq = s->real_device.irq;
715 rc = xc_physdev_map_pirq(xen_xc, xen_domid, machine_irq, &pirq);
717 if (rc < 0) {
718 XEN_PT_ERR(d, "Mapping machine irq %u to pirq %i failed, (rc: %d)\n",
719 machine_irq, pirq, rc);
721 /* Disable PCI intx assertion (turn on bit10 of devctl) */
722 xen_host_pci_set_word(&s->real_device,
723 PCI_COMMAND,
724 pci_get_word(s->dev.config + PCI_COMMAND)
725 | PCI_COMMAND_INTX_DISABLE);
726 machine_irq = 0;
727 s->machine_irq = 0;
728 } else {
729 machine_irq = pirq;
730 s->machine_irq = pirq;
731 xen_pt_mapped_machine_irq[machine_irq]++;
734 /* bind machine_irq to device */
735 if (machine_irq != 0) {
736 uint8_t e_intx = xen_pt_pci_intx(s);
738 rc = xc_domain_bind_pt_pci_irq(xen_xc, xen_domid, machine_irq,
739 pci_bus_num(d->bus),
740 PCI_SLOT(d->devfn),
741 e_intx);
742 if (rc < 0) {
743 XEN_PT_ERR(d, "Binding of interrupt %i failed! (rc: %d)\n",
744 e_intx, rc);
746 /* Disable PCI intx assertion (turn on bit10 of devctl) */
747 xen_host_pci_set_word(&s->real_device, PCI_COMMAND,
748 *(uint16_t *)(&s->dev.config[PCI_COMMAND])
749 | PCI_COMMAND_INTX_DISABLE);
750 xen_pt_mapped_machine_irq[machine_irq]--;
752 if (xen_pt_mapped_machine_irq[machine_irq] == 0) {
753 if (xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq)) {
754 XEN_PT_ERR(d, "Unmapping of machine interrupt %i failed!"
755 " (rc: %d)\n", machine_irq, rc);
758 s->machine_irq = 0;
762 out:
763 memory_listener_register(&s->memory_listener, NULL);
764 XEN_PT_LOG(d, "Real physical device %02x:%02x.%d registered successfuly!\n",
765 bus, slot, func);
767 return 0;
770 static void xen_pt_unregister_device(PCIDevice *d)
772 XenPCIPassthroughState *s = DO_UPCAST(XenPCIPassthroughState, dev, d);
773 uint8_t machine_irq = s->machine_irq;
774 uint8_t intx = xen_pt_pci_intx(s);
775 int rc;
777 if (machine_irq) {
778 rc = xc_domain_unbind_pt_irq(xen_xc, xen_domid, machine_irq,
779 PT_IRQ_TYPE_PCI,
780 pci_bus_num(d->bus),
781 PCI_SLOT(s->dev.devfn),
782 intx,
783 0 /* isa_irq */);
784 if (rc < 0) {
785 XEN_PT_ERR(d, "unbinding of interrupt INT%c failed."
786 " (machine irq: %i, rc: %d)"
787 " But bravely continuing on..\n",
788 'a' + intx, machine_irq, rc);
792 if (s->msi) {
793 xen_pt_msi_disable(s);
795 if (s->msix) {
796 xen_pt_msix_disable(s);
799 if (machine_irq) {
800 xen_pt_mapped_machine_irq[machine_irq]--;
802 if (xen_pt_mapped_machine_irq[machine_irq] == 0) {
803 rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq);
805 if (rc < 0) {
806 XEN_PT_ERR(d, "unmapping of interrupt %i failed. (rc: %d)"
807 " But bravely continuing on..\n",
808 machine_irq, rc);
813 /* delete all emulated config registers */
814 xen_pt_config_delete(s);
816 xen_pt_unregister_regions(s);
817 memory_listener_unregister(&s->memory_listener);
819 xen_host_pci_device_put(&s->real_device);
822 static Property xen_pci_passthrough_properties[] = {
823 DEFINE_PROP_PCI_HOST_DEVADDR("hostaddr", XenPCIPassthroughState, hostaddr),
824 DEFINE_PROP_END_OF_LIST(),
827 static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data)
829 DeviceClass *dc = DEVICE_CLASS(klass);
830 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
832 k->init = xen_pt_initfn;
833 k->exit = xen_pt_unregister_device;
834 k->config_read = xen_pt_pci_read_config;
835 k->config_write = xen_pt_pci_write_config;
836 dc->desc = "Assign an host PCI device with Xen";
837 dc->props = xen_pci_passthrough_properties;
840 static TypeInfo xen_pci_passthrough_info = {
841 .name = "xen-pci-passthrough",
842 .parent = TYPE_PCI_DEVICE,
843 .instance_size = sizeof(XenPCIPassthroughState),
844 .class_init = xen_pci_passthrough_class_init,
847 static void xen_pci_passthrough_register_types(void)
849 type_register_static(&xen_pci_passthrough_info);
852 type_init(xen_pci_passthrough_register_types)