migration: Allow user to specify available switchover bandwidth
[qemu/kevin.git] / hw / s390x / s390-pci-bus.c
blob2ca36f9f3be11866b6c9cb83a61da01d93a4b7c0
1 /*
2 * s390 PCI BUS
4 * Copyright 2014 IBM Corp.
5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
6 * Hong Bo Li <lihbbj@cn.ibm.com>
7 * Yi Min Zhao <zyimin@cn.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at
10 * your option) any later version. See the COPYING file in the top-level
11 * directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qapi/visitor.h"
17 #include "hw/s390x/s390-pci-bus.h"
18 #include "hw/s390x/s390-pci-inst.h"
19 #include "hw/s390x/s390-pci-kvm.h"
20 #include "hw/s390x/s390-pci-vfio.h"
21 #include "hw/pci/pci_bus.h"
22 #include "hw/qdev-properties.h"
23 #include "hw/pci/pci_bridge.h"
24 #include "hw/pci/msi.h"
25 #include "qemu/error-report.h"
26 #include "qemu/module.h"
27 #include "sysemu/reset.h"
28 #include "sysemu/runstate.h"
30 #include "trace.h"
32 S390pciState *s390_get_phb(void)
34 static S390pciState *phb;
36 if (!phb) {
37 phb = S390_PCI_HOST_BRIDGE(
38 object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL));
39 assert(phb != NULL);
42 return phb;
45 int pci_chsc_sei_nt2_get_event(void *res)
47 ChscSeiNt2Res *nt2_res = (ChscSeiNt2Res *)res;
48 PciCcdfAvail *accdf;
49 PciCcdfErr *eccdf;
50 int rc = 1;
51 SeiContainer *sei_cont;
52 S390pciState *s = s390_get_phb();
54 sei_cont = QTAILQ_FIRST(&s->pending_sei);
55 if (sei_cont) {
56 QTAILQ_REMOVE(&s->pending_sei, sei_cont, link);
57 nt2_res->nt = 2;
58 nt2_res->cc = sei_cont->cc;
59 nt2_res->length = cpu_to_be16(sizeof(ChscSeiNt2Res));
60 switch (sei_cont->cc) {
61 case 1: /* error event */
62 eccdf = (PciCcdfErr *)nt2_res->ccdf;
63 eccdf->fid = cpu_to_be32(sei_cont->fid);
64 eccdf->fh = cpu_to_be32(sei_cont->fh);
65 eccdf->e = cpu_to_be32(sei_cont->e);
66 eccdf->faddr = cpu_to_be64(sei_cont->faddr);
67 eccdf->pec = cpu_to_be16(sei_cont->pec);
68 break;
69 case 2: /* availability event */
70 accdf = (PciCcdfAvail *)nt2_res->ccdf;
71 accdf->fid = cpu_to_be32(sei_cont->fid);
72 accdf->fh = cpu_to_be32(sei_cont->fh);
73 accdf->pec = cpu_to_be16(sei_cont->pec);
74 break;
75 default:
76 abort();
78 g_free(sei_cont);
79 rc = 0;
82 return rc;
85 int pci_chsc_sei_nt2_have_event(void)
87 S390pciState *s = s390_get_phb();
89 return !QTAILQ_EMPTY(&s->pending_sei);
92 S390PCIBusDevice *s390_pci_find_next_avail_dev(S390pciState *s,
93 S390PCIBusDevice *pbdev)
95 S390PCIBusDevice *ret = pbdev ? QTAILQ_NEXT(pbdev, link) :
96 QTAILQ_FIRST(&s->zpci_devs);
98 while (ret && ret->state == ZPCI_FS_RESERVED) {
99 ret = QTAILQ_NEXT(ret, link);
102 return ret;
105 S390PCIBusDevice *s390_pci_find_dev_by_fid(S390pciState *s, uint32_t fid)
107 S390PCIBusDevice *pbdev;
109 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
110 if (pbdev->fid == fid) {
111 return pbdev;
115 return NULL;
118 void s390_pci_sclp_configure(SCCB *sccb)
120 IoaCfgSccb *psccb = (IoaCfgSccb *)sccb;
121 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(),
122 be32_to_cpu(psccb->aid));
123 uint16_t rc;
125 if (!pbdev) {
126 trace_s390_pci_sclp_nodev("configure", be32_to_cpu(psccb->aid));
127 rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED;
128 goto out;
131 switch (pbdev->state) {
132 case ZPCI_FS_RESERVED:
133 rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE;
134 break;
135 case ZPCI_FS_STANDBY:
136 pbdev->state = ZPCI_FS_DISABLED;
137 rc = SCLP_RC_NORMAL_COMPLETION;
138 break;
139 default:
140 rc = SCLP_RC_NO_ACTION_REQUIRED;
142 out:
143 psccb->header.response_code = cpu_to_be16(rc);
146 static void s390_pci_shutdown_notifier(Notifier *n, void *opaque)
148 S390PCIBusDevice *pbdev = container_of(n, S390PCIBusDevice,
149 shutdown_notifier);
151 pci_device_reset(pbdev->pdev);
154 static void s390_pci_reset_cb(void *opaque)
156 S390PCIBusDevice *pbdev = opaque;
158 pci_device_reset(pbdev->pdev);
161 static void s390_pci_perform_unplug(S390PCIBusDevice *pbdev)
163 HotplugHandler *hotplug_ctrl;
165 if (pbdev->pft == ZPCI_PFT_ISM) {
166 notifier_remove(&pbdev->shutdown_notifier);
167 qemu_unregister_reset(s390_pci_reset_cb, pbdev);
170 /* Unplug the PCI device */
171 if (pbdev->pdev) {
172 DeviceState *pdev = DEVICE(pbdev->pdev);
174 hotplug_ctrl = qdev_get_hotplug_handler(pdev);
175 hotplug_handler_unplug(hotplug_ctrl, pdev, &error_abort);
176 object_unparent(OBJECT(pdev));
179 /* Unplug the zPCI device */
180 hotplug_ctrl = qdev_get_hotplug_handler(DEVICE(pbdev));
181 hotplug_handler_unplug(hotplug_ctrl, DEVICE(pbdev), &error_abort);
182 object_unparent(OBJECT(pbdev));
185 void s390_pci_sclp_deconfigure(SCCB *sccb)
187 IoaCfgSccb *psccb = (IoaCfgSccb *)sccb;
188 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(),
189 be32_to_cpu(psccb->aid));
190 uint16_t rc;
192 if (!pbdev) {
193 trace_s390_pci_sclp_nodev("deconfigure", be32_to_cpu(psccb->aid));
194 rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED;
195 goto out;
198 switch (pbdev->state) {
199 case ZPCI_FS_RESERVED:
200 rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE;
201 break;
202 case ZPCI_FS_STANDBY:
203 rc = SCLP_RC_NO_ACTION_REQUIRED;
204 break;
205 default:
206 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) {
207 /* Interpreted devices were using interrupt forwarding */
208 s390_pci_kvm_aif_disable(pbdev);
209 } else if (pbdev->summary_ind) {
210 pci_dereg_irqs(pbdev);
212 if (pbdev->iommu->enabled) {
213 pci_dereg_ioat(pbdev->iommu);
215 pbdev->state = ZPCI_FS_STANDBY;
216 rc = SCLP_RC_NORMAL_COMPLETION;
218 if (pbdev->unplug_requested) {
219 s390_pci_perform_unplug(pbdev);
222 out:
223 psccb->header.response_code = cpu_to_be16(rc);
226 static S390PCIBusDevice *s390_pci_find_dev_by_uid(S390pciState *s, uint16_t uid)
228 S390PCIBusDevice *pbdev;
230 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
231 if (pbdev->uid == uid) {
232 return pbdev;
236 return NULL;
239 S390PCIBusDevice *s390_pci_find_dev_by_target(S390pciState *s,
240 const char *target)
242 S390PCIBusDevice *pbdev;
244 if (!target) {
245 return NULL;
248 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
249 if (!strcmp(pbdev->target, target)) {
250 return pbdev;
254 return NULL;
257 static S390PCIBusDevice *s390_pci_find_dev_by_pci(S390pciState *s,
258 PCIDevice *pci_dev)
260 S390PCIBusDevice *pbdev;
262 if (!pci_dev) {
263 return NULL;
266 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
267 if (pbdev->pdev == pci_dev) {
268 return pbdev;
272 return NULL;
275 S390PCIBusDevice *s390_pci_find_dev_by_idx(S390pciState *s, uint32_t idx)
277 return g_hash_table_lookup(s->zpci_table, &idx);
280 S390PCIBusDevice *s390_pci_find_dev_by_fh(S390pciState *s, uint32_t fh)
282 uint32_t idx = FH_MASK_INDEX & fh;
283 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_idx(s, idx);
285 if (pbdev && pbdev->fh == fh) {
286 return pbdev;
289 return NULL;
292 static void s390_pci_generate_event(uint8_t cc, uint16_t pec, uint32_t fh,
293 uint32_t fid, uint64_t faddr, uint32_t e)
295 SeiContainer *sei_cont;
296 S390pciState *s = s390_get_phb();
298 sei_cont = g_new0(SeiContainer, 1);
299 sei_cont->fh = fh;
300 sei_cont->fid = fid;
301 sei_cont->cc = cc;
302 sei_cont->pec = pec;
303 sei_cont->faddr = faddr;
304 sei_cont->e = e;
306 QTAILQ_INSERT_TAIL(&s->pending_sei, sei_cont, link);
307 css_generate_css_crws(0);
310 static void s390_pci_generate_plug_event(uint16_t pec, uint32_t fh,
311 uint32_t fid)
313 s390_pci_generate_event(2, pec, fh, fid, 0, 0);
316 void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid,
317 uint64_t faddr, uint32_t e)
319 s390_pci_generate_event(1, pec, fh, fid, faddr, e);
322 static void s390_pci_set_irq(void *opaque, int irq, int level)
324 /* nothing to do */
327 static int s390_pci_map_irq(PCIDevice *pci_dev, int irq_num)
329 /* nothing to do */
330 return 0;
333 static uint64_t s390_pci_get_table_origin(uint64_t iota)
335 return iota & ~ZPCI_IOTA_RTTO_FLAG;
338 static unsigned int calc_rtx(dma_addr_t ptr)
340 return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
343 static unsigned int calc_sx(dma_addr_t ptr)
345 return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
348 static unsigned int calc_px(dma_addr_t ptr)
350 return ((unsigned long) ptr >> TARGET_PAGE_BITS) & ZPCI_PT_MASK;
353 static uint64_t get_rt_sto(uint64_t entry)
355 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
356 ? (entry & ZPCI_RTE_ADDR_MASK)
357 : 0;
360 static uint64_t get_st_pto(uint64_t entry)
362 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
363 ? (entry & ZPCI_STE_ADDR_MASK)
364 : 0;
367 static bool rt_entry_isvalid(uint64_t entry)
369 return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
372 static bool pt_entry_isvalid(uint64_t entry)
374 return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
377 static bool entry_isprotected(uint64_t entry)
379 return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED;
382 /* ett is expected table type, -1 page table, 0 segment table, 1 region table */
383 static uint64_t get_table_index(uint64_t iova, int8_t ett)
385 switch (ett) {
386 case ZPCI_ETT_PT:
387 return calc_px(iova);
388 case ZPCI_ETT_ST:
389 return calc_sx(iova);
390 case ZPCI_ETT_RT:
391 return calc_rtx(iova);
394 return -1;
397 static bool entry_isvalid(uint64_t entry, int8_t ett)
399 switch (ett) {
400 case ZPCI_ETT_PT:
401 return pt_entry_isvalid(entry);
402 case ZPCI_ETT_ST:
403 case ZPCI_ETT_RT:
404 return rt_entry_isvalid(entry);
407 return false;
410 /* Return true if address translation is done */
411 static bool translate_iscomplete(uint64_t entry, int8_t ett)
413 switch (ett) {
414 case 0:
415 return (entry & ZPCI_TABLE_FC) ? true : false;
416 case 1:
417 return false;
420 return true;
423 static uint64_t get_frame_size(int8_t ett)
425 switch (ett) {
426 case ZPCI_ETT_PT:
427 return 1ULL << 12;
428 case ZPCI_ETT_ST:
429 return 1ULL << 20;
430 case ZPCI_ETT_RT:
431 return 1ULL << 31;
434 return 0;
437 static uint64_t get_next_table_origin(uint64_t entry, int8_t ett)
439 switch (ett) {
440 case ZPCI_ETT_PT:
441 return entry & ZPCI_PTE_ADDR_MASK;
442 case ZPCI_ETT_ST:
443 return get_st_pto(entry);
444 case ZPCI_ETT_RT:
445 return get_rt_sto(entry);
448 return 0;
452 * table_translate: do translation within one table and return the following
453 * table origin
455 * @entry: the entry being translated, the result is stored in this.
456 * @to: the address of table origin.
457 * @ett: expected table type, 1 region table, 0 segment table and -1 page table.
458 * @error: error code
460 static uint64_t table_translate(S390IOTLBEntry *entry, uint64_t to, int8_t ett,
461 uint16_t *error)
463 uint64_t tx, te, nto = 0;
464 uint16_t err = 0;
466 tx = get_table_index(entry->iova, ett);
467 te = address_space_ldq(&address_space_memory, to + tx * sizeof(uint64_t),
468 MEMTXATTRS_UNSPECIFIED, NULL);
470 if (!te) {
471 err = ERR_EVENT_INVALTE;
472 goto out;
475 if (!entry_isvalid(te, ett)) {
476 entry->perm &= IOMMU_NONE;
477 goto out;
480 if (ett == ZPCI_ETT_RT && ((te & ZPCI_TABLE_LEN_RTX) != ZPCI_TABLE_LEN_RTX
481 || te & ZPCI_TABLE_OFFSET_MASK)) {
482 err = ERR_EVENT_INVALTL;
483 goto out;
486 nto = get_next_table_origin(te, ett);
487 if (!nto) {
488 err = ERR_EVENT_TT;
489 goto out;
492 if (entry_isprotected(te)) {
493 entry->perm &= IOMMU_RO;
494 } else {
495 entry->perm &= IOMMU_RW;
498 if (translate_iscomplete(te, ett)) {
499 switch (ett) {
500 case ZPCI_ETT_PT:
501 entry->translated_addr = te & ZPCI_PTE_ADDR_MASK;
502 break;
503 case ZPCI_ETT_ST:
504 entry->translated_addr = (te & ZPCI_SFAA_MASK) |
505 (entry->iova & ~ZPCI_SFAA_MASK);
506 break;
508 nto = 0;
510 out:
511 if (err) {
512 entry->perm = IOMMU_NONE;
513 *error = err;
515 entry->len = get_frame_size(ett);
516 return nto;
519 uint16_t s390_guest_io_table_walk(uint64_t g_iota, hwaddr addr,
520 S390IOTLBEntry *entry)
522 uint64_t to = s390_pci_get_table_origin(g_iota);
523 int8_t ett = 1;
524 uint16_t error = 0;
526 entry->iova = addr & TARGET_PAGE_MASK;
527 entry->translated_addr = 0;
528 entry->perm = IOMMU_RW;
530 if (entry_isprotected(g_iota)) {
531 entry->perm &= IOMMU_RO;
534 while (to) {
535 to = table_translate(entry, to, ett--, &error);
538 return error;
541 static IOMMUTLBEntry s390_translate_iommu(IOMMUMemoryRegion *mr, hwaddr addr,
542 IOMMUAccessFlags flag, int iommu_idx)
544 S390PCIIOMMU *iommu = container_of(mr, S390PCIIOMMU, iommu_mr);
545 S390IOTLBEntry *entry;
546 uint64_t iova = addr & TARGET_PAGE_MASK;
547 uint16_t error = 0;
548 IOMMUTLBEntry ret = {
549 .target_as = &address_space_memory,
550 .iova = 0,
551 .translated_addr = 0,
552 .addr_mask = ~(hwaddr)0,
553 .perm = IOMMU_NONE,
556 switch (iommu->pbdev->state) {
557 case ZPCI_FS_ENABLED:
558 case ZPCI_FS_BLOCKED:
559 if (!iommu->enabled) {
560 return ret;
562 break;
563 default:
564 return ret;
567 trace_s390_pci_iommu_xlate(addr);
569 if (addr < iommu->pba || addr > iommu->pal) {
570 error = ERR_EVENT_OORANGE;
571 goto err;
574 entry = g_hash_table_lookup(iommu->iotlb, &iova);
575 if (entry) {
576 ret.iova = entry->iova;
577 ret.translated_addr = entry->translated_addr;
578 ret.addr_mask = entry->len - 1;
579 ret.perm = entry->perm;
580 } else {
581 ret.iova = iova;
582 ret.addr_mask = ~TARGET_PAGE_MASK;
583 ret.perm = IOMMU_NONE;
586 if (flag != IOMMU_NONE && !(flag & ret.perm)) {
587 error = ERR_EVENT_TPROTE;
589 err:
590 if (error) {
591 iommu->pbdev->state = ZPCI_FS_ERROR;
592 s390_pci_generate_error_event(error, iommu->pbdev->fh,
593 iommu->pbdev->fid, addr, 0);
595 return ret;
598 static void s390_pci_iommu_replay(IOMMUMemoryRegion *iommu,
599 IOMMUNotifier *notifier)
601 /* It's impossible to plug a pci device on s390x that already has iommu
602 * mappings which need to be replayed, that is due to the "one iommu per
603 * zpci device" construct. But when we support migration of vfio-pci
604 * devices in future, we need to revisit this.
606 return;
609 static S390PCIIOMMU *s390_pci_get_iommu(S390pciState *s, PCIBus *bus,
610 int devfn)
612 uint64_t key = (uintptr_t)bus;
613 S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key);
614 S390PCIIOMMU *iommu;
616 if (!table) {
617 table = g_new0(S390PCIIOMMUTable, 1);
618 table->key = key;
619 g_hash_table_insert(s->iommu_table, &table->key, table);
622 iommu = table->iommu[PCI_SLOT(devfn)];
623 if (!iommu) {
624 iommu = S390_PCI_IOMMU(object_new(TYPE_S390_PCI_IOMMU));
626 char *mr_name = g_strdup_printf("iommu-root-%02x:%02x.%01x",
627 pci_bus_num(bus),
628 PCI_SLOT(devfn),
629 PCI_FUNC(devfn));
630 char *as_name = g_strdup_printf("iommu-pci-%02x:%02x.%01x",
631 pci_bus_num(bus),
632 PCI_SLOT(devfn),
633 PCI_FUNC(devfn));
634 memory_region_init(&iommu->mr, OBJECT(iommu), mr_name, UINT64_MAX);
635 address_space_init(&iommu->as, &iommu->mr, as_name);
636 iommu->iotlb = g_hash_table_new_full(g_int64_hash, g_int64_equal,
637 NULL, g_free);
638 table->iommu[PCI_SLOT(devfn)] = iommu;
640 g_free(mr_name);
641 g_free(as_name);
644 return iommu;
647 static AddressSpace *s390_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
649 S390pciState *s = opaque;
650 S390PCIIOMMU *iommu = s390_pci_get_iommu(s, bus, devfn);
652 return &iommu->as;
655 static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set)
657 uint8_t expected, actual;
658 hwaddr len = 1;
659 /* avoid multiple fetches */
660 uint8_t volatile *ind_addr;
662 ind_addr = cpu_physical_memory_map(ind_loc, &len, true);
663 if (!ind_addr) {
664 s390_pci_generate_error_event(ERR_EVENT_AIRERR, 0, 0, 0, 0);
665 return -1;
667 actual = *ind_addr;
668 do {
669 expected = actual;
670 actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set);
671 } while (actual != expected);
672 cpu_physical_memory_unmap((void *)ind_addr, len, 1, len);
674 return actual;
677 static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data,
678 unsigned int size)
680 S390PCIBusDevice *pbdev = opaque;
681 uint32_t vec = data & ZPCI_MSI_VEC_MASK;
682 uint64_t ind_bit;
683 uint32_t sum_bit;
685 assert(pbdev);
687 trace_s390_pci_msi_ctrl_write(data, pbdev->idx, vec);
689 if (pbdev->state != ZPCI_FS_ENABLED) {
690 return;
693 ind_bit = pbdev->routes.adapter.ind_offset;
694 sum_bit = pbdev->routes.adapter.summary_offset;
696 set_ind_atomic(pbdev->routes.adapter.ind_addr + (ind_bit + vec) / 8,
697 0x80 >> ((ind_bit + vec) % 8));
698 if (!set_ind_atomic(pbdev->routes.adapter.summary_addr + sum_bit / 8,
699 0x80 >> (sum_bit % 8))) {
700 css_adapter_interrupt(CSS_IO_ADAPTER_PCI, pbdev->isc);
704 static uint64_t s390_msi_ctrl_read(void *opaque, hwaddr addr, unsigned size)
706 return 0xffffffff;
709 static const MemoryRegionOps s390_msi_ctrl_ops = {
710 .write = s390_msi_ctrl_write,
711 .read = s390_msi_ctrl_read,
712 .endianness = DEVICE_LITTLE_ENDIAN,
715 void s390_pci_iommu_enable(S390PCIIOMMU *iommu)
718 * The iommu region is initialized against a 0-mapped address space,
719 * so the smallest IOMMU region we can define runs from 0 to the end
720 * of the PCI address space.
722 char *name = g_strdup_printf("iommu-s390-%04x", iommu->pbdev->uid);
723 memory_region_init_iommu(&iommu->iommu_mr, sizeof(iommu->iommu_mr),
724 TYPE_S390_IOMMU_MEMORY_REGION, OBJECT(&iommu->mr),
725 name, iommu->pal + 1);
726 iommu->enabled = true;
727 memory_region_add_subregion(&iommu->mr, 0, MEMORY_REGION(&iommu->iommu_mr));
728 g_free(name);
731 void s390_pci_iommu_disable(S390PCIIOMMU *iommu)
733 iommu->enabled = false;
734 g_hash_table_remove_all(iommu->iotlb);
735 memory_region_del_subregion(&iommu->mr, MEMORY_REGION(&iommu->iommu_mr));
736 object_unparent(OBJECT(&iommu->iommu_mr));
739 static void s390_pci_iommu_free(S390pciState *s, PCIBus *bus, int32_t devfn)
741 uint64_t key = (uintptr_t)bus;
742 S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key);
743 S390PCIIOMMU *iommu = table ? table->iommu[PCI_SLOT(devfn)] : NULL;
745 if (!table || !iommu) {
746 return;
749 table->iommu[PCI_SLOT(devfn)] = NULL;
750 g_hash_table_destroy(iommu->iotlb);
752 * An attached PCI device may have memory listeners, eg. VFIO PCI.
753 * The associated subregion will already have been unmapped in
754 * s390_pci_iommu_disable in response to the guest deconfigure request.
755 * Remove the listeners now before destroying the address space.
757 address_space_remove_listeners(&iommu->as);
758 address_space_destroy(&iommu->as);
759 object_unparent(OBJECT(&iommu->mr));
760 object_unparent(OBJECT(iommu));
761 object_unref(OBJECT(iommu));
764 S390PCIGroup *s390_group_create(int id, int host_id)
766 S390PCIGroup *group;
767 S390pciState *s = s390_get_phb();
769 group = g_new0(S390PCIGroup, 1);
770 group->id = id;
771 group->host_id = host_id;
772 QTAILQ_INSERT_TAIL(&s->zpci_groups, group, link);
773 return group;
776 S390PCIGroup *s390_group_find(int id)
778 S390PCIGroup *group;
779 S390pciState *s = s390_get_phb();
781 QTAILQ_FOREACH(group, &s->zpci_groups, link) {
782 if (group->id == id) {
783 return group;
786 return NULL;
789 S390PCIGroup *s390_group_find_host_sim(int host_id)
791 S390PCIGroup *group;
792 S390pciState *s = s390_get_phb();
794 QTAILQ_FOREACH(group, &s->zpci_groups, link) {
795 if (group->id >= ZPCI_SIM_GRP_START && group->host_id == host_id) {
796 return group;
799 return NULL;
802 static void s390_pci_init_default_group(void)
804 S390PCIGroup *group;
805 ClpRspQueryPciGrp *resgrp;
807 group = s390_group_create(ZPCI_DEFAULT_FN_GRP, ZPCI_DEFAULT_FN_GRP);
808 resgrp = &group->zpci_group;
809 resgrp->fr = 1;
810 resgrp->dasm = 0;
811 resgrp->msia = ZPCI_MSI_ADDR;
812 resgrp->mui = DEFAULT_MUI;
813 resgrp->i = 128;
814 resgrp->maxstbl = 128;
815 resgrp->version = 0;
816 resgrp->dtsm = ZPCI_DTSM;
819 static void set_pbdev_info(S390PCIBusDevice *pbdev)
821 pbdev->zpci_fn.sdma = ZPCI_SDMA_ADDR;
822 pbdev->zpci_fn.edma = ZPCI_EDMA_ADDR;
823 pbdev->zpci_fn.pchid = 0;
824 pbdev->zpci_fn.pfgid = ZPCI_DEFAULT_FN_GRP;
825 pbdev->zpci_fn.fid = pbdev->fid;
826 pbdev->zpci_fn.uid = pbdev->uid;
827 pbdev->pci_group = s390_group_find(ZPCI_DEFAULT_FN_GRP);
830 static void s390_pcihost_realize(DeviceState *dev, Error **errp)
832 PCIBus *b;
833 BusState *bus;
834 PCIHostState *phb = PCI_HOST_BRIDGE(dev);
835 S390pciState *s = S390_PCI_HOST_BRIDGE(dev);
837 trace_s390_pcihost("realize");
839 b = pci_register_root_bus(dev, NULL, s390_pci_set_irq, s390_pci_map_irq,
840 NULL, get_system_memory(), get_system_io(), 0,
841 64, TYPE_PCI_BUS);
842 pci_setup_iommu(b, s390_pci_dma_iommu, s);
844 bus = BUS(b);
845 qbus_set_hotplug_handler(bus, OBJECT(dev));
846 phb->bus = b;
848 s->bus = S390_PCI_BUS(qbus_new(TYPE_S390_PCI_BUS, dev, NULL));
849 qbus_set_hotplug_handler(BUS(s->bus), OBJECT(dev));
851 s->iommu_table = g_hash_table_new_full(g_int64_hash, g_int64_equal,
852 NULL, g_free);
853 s->zpci_table = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, NULL);
854 s->bus_no = 0;
855 s->next_sim_grp = ZPCI_SIM_GRP_START;
856 QTAILQ_INIT(&s->pending_sei);
857 QTAILQ_INIT(&s->zpci_devs);
858 QTAILQ_INIT(&s->zpci_dma_limit);
859 QTAILQ_INIT(&s->zpci_groups);
861 s390_pci_init_default_group();
862 css_register_io_adapters(CSS_IO_ADAPTER_PCI, true, false,
863 S390_ADAPTER_SUPPRESSIBLE, errp);
866 static void s390_pcihost_unrealize(DeviceState *dev)
868 S390PCIGroup *group;
869 S390pciState *s = S390_PCI_HOST_BRIDGE(dev);
871 while (!QTAILQ_EMPTY(&s->zpci_groups)) {
872 group = QTAILQ_FIRST(&s->zpci_groups);
873 QTAILQ_REMOVE(&s->zpci_groups, group, link);
877 static int s390_pci_msix_init(S390PCIBusDevice *pbdev)
879 char *name;
880 uint8_t pos;
881 uint16_t ctrl;
882 uint32_t table, pba;
884 pos = pci_find_capability(pbdev->pdev, PCI_CAP_ID_MSIX);
885 if (!pos) {
886 return -1;
889 ctrl = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_FLAGS,
890 pci_config_size(pbdev->pdev), sizeof(ctrl));
891 table = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_TABLE,
892 pci_config_size(pbdev->pdev), sizeof(table));
893 pba = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_PBA,
894 pci_config_size(pbdev->pdev), sizeof(pba));
896 pbdev->msix.table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
897 pbdev->msix.table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
898 pbdev->msix.pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
899 pbdev->msix.pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
900 pbdev->msix.entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
902 name = g_strdup_printf("msix-s390-%04x", pbdev->uid);
903 memory_region_init_io(&pbdev->msix_notify_mr, OBJECT(pbdev),
904 &s390_msi_ctrl_ops, pbdev, name, TARGET_PAGE_SIZE);
905 memory_region_add_subregion(&pbdev->iommu->mr,
906 pbdev->pci_group->zpci_group.msia,
907 &pbdev->msix_notify_mr);
908 g_free(name);
910 return 0;
913 static void s390_pci_msix_free(S390PCIBusDevice *pbdev)
915 if (pbdev->msix.entries == 0) {
916 return;
919 memory_region_del_subregion(&pbdev->iommu->mr, &pbdev->msix_notify_mr);
920 object_unparent(OBJECT(&pbdev->msix_notify_mr));
923 static S390PCIBusDevice *s390_pci_device_new(S390pciState *s,
924 const char *target, Error **errp)
926 Error *local_err = NULL;
927 DeviceState *dev;
929 dev = qdev_try_new(TYPE_S390_PCI_DEVICE);
930 if (!dev) {
931 error_setg(errp, "zPCI device could not be created");
932 return NULL;
935 if (!object_property_set_str(OBJECT(dev), "target", target, &local_err)) {
936 object_unparent(OBJECT(dev));
937 error_propagate_prepend(errp, local_err,
938 "zPCI device could not be created: ");
939 return NULL;
941 if (!qdev_realize_and_unref(dev, BUS(s->bus), &local_err)) {
942 object_unparent(OBJECT(dev));
943 error_propagate_prepend(errp, local_err,
944 "zPCI device could not be created: ");
945 return NULL;
948 return S390_PCI_DEVICE(dev);
951 static bool s390_pci_alloc_idx(S390pciState *s, S390PCIBusDevice *pbdev)
953 uint32_t idx;
955 idx = s->next_idx;
956 while (s390_pci_find_dev_by_idx(s, idx)) {
957 idx = (idx + 1) & FH_MASK_INDEX;
958 if (idx == s->next_idx) {
959 return false;
963 pbdev->idx = idx;
964 return true;
967 static void s390_pcihost_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
968 Error **errp)
970 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev);
972 if (!s390_has_feat(S390_FEAT_ZPCI)) {
973 warn_report("Plugging a PCI/zPCI device without the 'zpci' CPU "
974 "feature enabled; the guest will not be able to see/use "
975 "this device");
978 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
979 PCIDevice *pdev = PCI_DEVICE(dev);
981 if (pdev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
982 error_setg(errp, "multifunction not supported in s390");
983 return;
985 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
986 S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev);
988 if (!s390_pci_alloc_idx(s, pbdev)) {
989 error_setg(errp, "no slot for plugging zpci device");
990 return;
995 static void s390_pci_update_subordinate(PCIDevice *dev, uint32_t nr)
997 uint32_t old_nr;
999 pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1);
1000 while (!pci_bus_is_root(pci_get_bus(dev))) {
1001 dev = pci_get_bus(dev)->parent_dev;
1003 old_nr = pci_default_read_config(dev, PCI_SUBORDINATE_BUS, 1);
1004 if (old_nr < nr) {
1005 pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1);
1010 static int s390_pci_interp_plug(S390pciState *s, S390PCIBusDevice *pbdev)
1012 uint32_t idx, fh;
1014 if (!s390_pci_get_host_fh(pbdev, &fh)) {
1015 return -EPERM;
1019 * The host device is already in an enabled state, but we always present
1020 * the initial device state to the guest as disabled (ZPCI_FS_DISABLED).
1021 * Therefore, mask off the enable bit from the passthrough handle until
1022 * the guest issues a CLP SET PCI FN later to enable the device.
1024 pbdev->fh = fh & ~FH_MASK_ENABLE;
1026 /* Next, see if the idx is already in-use */
1027 idx = pbdev->fh & FH_MASK_INDEX;
1028 if (pbdev->idx != idx) {
1029 if (s390_pci_find_dev_by_idx(s, idx)) {
1030 return -EINVAL;
1033 * Update the idx entry with the passed through idx
1034 * If the relinquished idx is lower than next_idx, use it
1035 * to replace next_idx
1037 g_hash_table_remove(s->zpci_table, &pbdev->idx);
1038 if (idx < s->next_idx) {
1039 s->next_idx = idx;
1041 pbdev->idx = idx;
1042 g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev);
1045 return 0;
1048 static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
1049 Error **errp)
1051 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev);
1052 PCIDevice *pdev = NULL;
1053 S390PCIBusDevice *pbdev = NULL;
1054 int rc;
1056 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) {
1057 PCIBridge *pb = PCI_BRIDGE(dev);
1059 pdev = PCI_DEVICE(dev);
1060 pci_bridge_map_irq(pb, dev->id, s390_pci_map_irq);
1061 pci_setup_iommu(&pb->sec_bus, s390_pci_dma_iommu, s);
1063 qbus_set_hotplug_handler(BUS(&pb->sec_bus), OBJECT(s));
1065 if (dev->hotplugged) {
1066 pci_default_write_config(pdev, PCI_PRIMARY_BUS,
1067 pci_dev_bus_num(pdev), 1);
1068 s->bus_no += 1;
1069 pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1);
1071 s390_pci_update_subordinate(pdev, s->bus_no);
1073 } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
1074 pdev = PCI_DEVICE(dev);
1076 if (!dev->id) {
1077 /* In the case the PCI device does not define an id */
1078 /* we generate one based on the PCI address */
1079 dev->id = g_strdup_printf("auto_%02x:%02x.%01x",
1080 pci_dev_bus_num(pdev),
1081 PCI_SLOT(pdev->devfn),
1082 PCI_FUNC(pdev->devfn));
1085 pbdev = s390_pci_find_dev_by_target(s, dev->id);
1086 if (!pbdev) {
1087 pbdev = s390_pci_device_new(s, dev->id, errp);
1088 if (!pbdev) {
1089 return;
1093 pbdev->pdev = pdev;
1094 pbdev->iommu = s390_pci_get_iommu(s, pci_get_bus(pdev), pdev->devfn);
1095 pbdev->iommu->pbdev = pbdev;
1096 pbdev->state = ZPCI_FS_DISABLED;
1097 set_pbdev_info(pbdev);
1099 if (object_dynamic_cast(OBJECT(dev), "vfio-pci")) {
1101 * By default, interpretation is always requested; if the available
1102 * facilities indicate it is not available, fallback to the
1103 * interception model.
1105 if (pbdev->interp) {
1106 if (s390_pci_kvm_interp_allowed()) {
1107 rc = s390_pci_interp_plug(s, pbdev);
1108 if (rc) {
1109 error_setg(errp, "Plug failed for zPCI device in "
1110 "interpretation mode: %d", rc);
1111 return;
1113 } else {
1114 trace_s390_pcihost("zPCI interpretation missing");
1115 pbdev->interp = false;
1116 pbdev->forwarding_assist = false;
1119 pbdev->iommu->dma_limit = s390_pci_start_dma_count(s, pbdev);
1120 /* Fill in CLP information passed via the vfio region */
1121 s390_pci_get_clp_info(pbdev);
1122 if (!pbdev->interp) {
1123 /* Do vfio passthrough but intercept for I/O */
1124 pbdev->fh |= FH_SHM_VFIO;
1125 pbdev->forwarding_assist = false;
1127 /* Register shutdown notifier and reset callback for ISM devices */
1128 if (pbdev->pft == ZPCI_PFT_ISM) {
1129 pbdev->shutdown_notifier.notify = s390_pci_shutdown_notifier;
1130 qemu_register_shutdown_notifier(&pbdev->shutdown_notifier);
1131 qemu_register_reset(s390_pci_reset_cb, pbdev);
1133 } else {
1134 pbdev->fh |= FH_SHM_EMUL;
1135 /* Always intercept emulated devices */
1136 pbdev->interp = false;
1137 pbdev->forwarding_assist = false;
1140 if (s390_pci_msix_init(pbdev) && !pbdev->interp) {
1141 error_setg(errp, "MSI-X support is mandatory "
1142 "in the S390 architecture");
1143 return;
1146 if (dev->hotplugged) {
1147 s390_pci_generate_plug_event(HP_EVENT_TO_CONFIGURED ,
1148 pbdev->fh, pbdev->fid);
1150 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
1151 pbdev = S390_PCI_DEVICE(dev);
1153 /* the allocated idx is actually getting used */
1154 s->next_idx = (pbdev->idx + 1) & FH_MASK_INDEX;
1155 pbdev->fh = pbdev->idx;
1156 QTAILQ_INSERT_TAIL(&s->zpci_devs, pbdev, link);
1157 g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev);
1158 } else {
1159 g_assert_not_reached();
1163 static void s390_pcihost_unplug(HotplugHandler *hotplug_dev, DeviceState *dev,
1164 Error **errp)
1166 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev);
1167 S390PCIBusDevice *pbdev = NULL;
1169 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
1170 PCIDevice *pci_dev = PCI_DEVICE(dev);
1171 PCIBus *bus;
1172 int32_t devfn;
1174 pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev));
1175 g_assert(pbdev);
1177 s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED,
1178 pbdev->fh, pbdev->fid);
1179 bus = pci_get_bus(pci_dev);
1180 devfn = pci_dev->devfn;
1181 qdev_unrealize(dev);
1183 s390_pci_msix_free(pbdev);
1184 s390_pci_iommu_free(s, bus, devfn);
1185 pbdev->pdev = NULL;
1186 pbdev->state = ZPCI_FS_RESERVED;
1187 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
1188 pbdev = S390_PCI_DEVICE(dev);
1189 pbdev->fid = 0;
1190 QTAILQ_REMOVE(&s->zpci_devs, pbdev, link);
1191 g_hash_table_remove(s->zpci_table, &pbdev->idx);
1192 if (pbdev->iommu->dma_limit) {
1193 s390_pci_end_dma_count(s, pbdev->iommu->dma_limit);
1195 qdev_unrealize(dev);
1199 static void s390_pcihost_unplug_request(HotplugHandler *hotplug_dev,
1200 DeviceState *dev,
1201 Error **errp)
1203 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev);
1204 S390PCIBusDevice *pbdev;
1206 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) {
1207 error_setg(errp, "PCI bridge hot unplug currently not supported");
1208 } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
1210 * Redirect the unplug request to the zPCI device and remember that
1211 * we've checked the PCI device already (to prevent endless recursion).
1213 pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev));
1214 g_assert(pbdev);
1215 pbdev->pci_unplug_request_processed = true;
1216 qdev_unplug(DEVICE(pbdev), errp);
1217 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
1218 pbdev = S390_PCI_DEVICE(dev);
1221 * If unplug was initially requested for the zPCI device, we
1222 * first have to redirect to the PCI device, which will in return
1223 * redirect back to us after performing its checks (if the request
1224 * is not blocked, e.g. because it's a PCI bridge).
1226 if (pbdev->pdev && !pbdev->pci_unplug_request_processed) {
1227 qdev_unplug(DEVICE(pbdev->pdev), errp);
1228 return;
1230 pbdev->pci_unplug_request_processed = false;
1232 switch (pbdev->state) {
1233 case ZPCI_FS_STANDBY:
1234 case ZPCI_FS_RESERVED:
1235 s390_pci_perform_unplug(pbdev);
1236 break;
1237 default:
1239 * Allow to send multiple requests, e.g. if the guest crashed
1240 * before releasing the device, we would not be able to send
1241 * another request to the same VM (e.g. fresh OS).
1243 pbdev->unplug_requested = true;
1244 s390_pci_generate_plug_event(HP_EVENT_DECONFIGURE_REQUEST,
1245 pbdev->fh, pbdev->fid);
1247 } else {
1248 g_assert_not_reached();
1252 static void s390_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev,
1253 void *opaque)
1255 S390pciState *s = opaque;
1256 PCIBus *sec_bus = NULL;
1258 if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
1259 PCI_HEADER_TYPE_BRIDGE)) {
1260 return;
1263 (s->bus_no)++;
1264 pci_default_write_config(pdev, PCI_PRIMARY_BUS, pci_dev_bus_num(pdev), 1);
1265 pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1);
1266 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1);
1268 sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
1269 if (!sec_bus) {
1270 return;
1273 /* Assign numbers to all child bridges. The last is the highest number. */
1274 pci_for_each_device_under_bus(sec_bus, s390_pci_enumerate_bridge, s);
1275 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1);
1278 static void s390_pcihost_reset(DeviceState *dev)
1280 S390pciState *s = S390_PCI_HOST_BRIDGE(dev);
1281 PCIBus *bus = s->parent_obj.bus;
1282 S390PCIBusDevice *pbdev, *next;
1284 /* Process all pending unplug requests */
1285 QTAILQ_FOREACH_SAFE(pbdev, &s->zpci_devs, link, next) {
1286 if (pbdev->unplug_requested) {
1287 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) {
1288 /* Interpreted devices were using interrupt forwarding */
1289 s390_pci_kvm_aif_disable(pbdev);
1290 } else if (pbdev->summary_ind) {
1291 pci_dereg_irqs(pbdev);
1293 if (pbdev->iommu->enabled) {
1294 pci_dereg_ioat(pbdev->iommu);
1296 pbdev->state = ZPCI_FS_STANDBY;
1297 s390_pci_perform_unplug(pbdev);
1302 * When resetting a PCI bridge, the assigned numbers are set to 0. So
1303 * on every system reset, we also have to reassign numbers.
1305 s->bus_no = 0;
1306 pci_for_each_device_under_bus(bus, s390_pci_enumerate_bridge, s);
1309 static void s390_pcihost_class_init(ObjectClass *klass, void *data)
1311 DeviceClass *dc = DEVICE_CLASS(klass);
1312 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
1314 dc->reset = s390_pcihost_reset;
1315 dc->realize = s390_pcihost_realize;
1316 dc->unrealize = s390_pcihost_unrealize;
1317 hc->pre_plug = s390_pcihost_pre_plug;
1318 hc->plug = s390_pcihost_plug;
1319 hc->unplug_request = s390_pcihost_unplug_request;
1320 hc->unplug = s390_pcihost_unplug;
1321 msi_nonbroken = true;
1324 static const TypeInfo s390_pcihost_info = {
1325 .name = TYPE_S390_PCI_HOST_BRIDGE,
1326 .parent = TYPE_PCI_HOST_BRIDGE,
1327 .instance_size = sizeof(S390pciState),
1328 .class_init = s390_pcihost_class_init,
1329 .interfaces = (InterfaceInfo[]) {
1330 { TYPE_HOTPLUG_HANDLER },
1335 static const TypeInfo s390_pcibus_info = {
1336 .name = TYPE_S390_PCI_BUS,
1337 .parent = TYPE_BUS,
1338 .instance_size = sizeof(S390PCIBus),
1341 static uint16_t s390_pci_generate_uid(S390pciState *s)
1343 uint16_t uid = 0;
1345 do {
1346 uid++;
1347 if (!s390_pci_find_dev_by_uid(s, uid)) {
1348 return uid;
1350 } while (uid < ZPCI_MAX_UID);
1352 return UID_UNDEFINED;
1355 static uint32_t s390_pci_generate_fid(S390pciState *s, Error **errp)
1357 uint32_t fid = 0;
1359 do {
1360 if (!s390_pci_find_dev_by_fid(s, fid)) {
1361 return fid;
1363 } while (fid++ != ZPCI_MAX_FID);
1365 error_setg(errp, "no free fid could be found");
1366 return 0;
1369 static void s390_pci_device_realize(DeviceState *dev, Error **errp)
1371 S390PCIBusDevice *zpci = S390_PCI_DEVICE(dev);
1372 S390pciState *s = s390_get_phb();
1374 if (!zpci->target) {
1375 error_setg(errp, "target must be defined");
1376 return;
1379 if (s390_pci_find_dev_by_target(s, zpci->target)) {
1380 error_setg(errp, "target %s already has an associated zpci device",
1381 zpci->target);
1382 return;
1385 if (zpci->uid == UID_UNDEFINED) {
1386 zpci->uid = s390_pci_generate_uid(s);
1387 if (!zpci->uid) {
1388 error_setg(errp, "no free uid could be found");
1389 return;
1391 } else if (s390_pci_find_dev_by_uid(s, zpci->uid)) {
1392 error_setg(errp, "uid %u already in use", zpci->uid);
1393 return;
1396 if (!zpci->fid_defined) {
1397 Error *local_error = NULL;
1399 zpci->fid = s390_pci_generate_fid(s, &local_error);
1400 if (local_error) {
1401 error_propagate(errp, local_error);
1402 return;
1404 } else if (s390_pci_find_dev_by_fid(s, zpci->fid)) {
1405 error_setg(errp, "fid %u already in use", zpci->fid);
1406 return;
1409 zpci->state = ZPCI_FS_RESERVED;
1410 zpci->fmb.format = ZPCI_FMB_FORMAT;
1413 static void s390_pci_device_reset(DeviceState *dev)
1415 S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev);
1417 switch (pbdev->state) {
1418 case ZPCI_FS_RESERVED:
1419 return;
1420 case ZPCI_FS_STANDBY:
1421 break;
1422 default:
1423 pbdev->fh &= ~FH_MASK_ENABLE;
1424 pbdev->state = ZPCI_FS_DISABLED;
1425 break;
1428 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) {
1429 /* Interpreted devices were using interrupt forwarding */
1430 s390_pci_kvm_aif_disable(pbdev);
1431 } else if (pbdev->summary_ind) {
1432 pci_dereg_irqs(pbdev);
1434 if (pbdev->iommu->enabled) {
1435 pci_dereg_ioat(pbdev->iommu);
1438 fmb_timer_free(pbdev);
1441 static void s390_pci_get_fid(Object *obj, Visitor *v, const char *name,
1442 void *opaque, Error **errp)
1444 Property *prop = opaque;
1445 uint32_t *ptr = object_field_prop_ptr(obj, prop);
1447 visit_type_uint32(v, name, ptr, errp);
1450 static void s390_pci_set_fid(Object *obj, Visitor *v, const char *name,
1451 void *opaque, Error **errp)
1453 S390PCIBusDevice *zpci = S390_PCI_DEVICE(obj);
1454 Property *prop = opaque;
1455 uint32_t *ptr = object_field_prop_ptr(obj, prop);
1457 if (!visit_type_uint32(v, name, ptr, errp)) {
1458 return;
1460 zpci->fid_defined = true;
1463 static const PropertyInfo s390_pci_fid_propinfo = {
1464 .name = "zpci_fid",
1465 .get = s390_pci_get_fid,
1466 .set = s390_pci_set_fid,
1469 #define DEFINE_PROP_S390_PCI_FID(_n, _s, _f) \
1470 DEFINE_PROP(_n, _s, _f, s390_pci_fid_propinfo, uint32_t)
1472 static Property s390_pci_device_properties[] = {
1473 DEFINE_PROP_UINT16("uid", S390PCIBusDevice, uid, UID_UNDEFINED),
1474 DEFINE_PROP_S390_PCI_FID("fid", S390PCIBusDevice, fid),
1475 DEFINE_PROP_STRING("target", S390PCIBusDevice, target),
1476 DEFINE_PROP_BOOL("interpret", S390PCIBusDevice, interp, true),
1477 DEFINE_PROP_BOOL("forwarding-assist", S390PCIBusDevice, forwarding_assist,
1478 true),
1479 DEFINE_PROP_END_OF_LIST(),
1482 static const VMStateDescription s390_pci_device_vmstate = {
1483 .name = TYPE_S390_PCI_DEVICE,
1485 * TODO: add state handling here, so migration works at least with
1486 * emulated pci devices on s390x
1488 .unmigratable = 1,
1491 static void s390_pci_device_class_init(ObjectClass *klass, void *data)
1493 DeviceClass *dc = DEVICE_CLASS(klass);
1495 dc->desc = "zpci device";
1496 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1497 dc->reset = s390_pci_device_reset;
1498 dc->bus_type = TYPE_S390_PCI_BUS;
1499 dc->realize = s390_pci_device_realize;
1500 device_class_set_props(dc, s390_pci_device_properties);
1501 dc->vmsd = &s390_pci_device_vmstate;
1504 static const TypeInfo s390_pci_device_info = {
1505 .name = TYPE_S390_PCI_DEVICE,
1506 .parent = TYPE_DEVICE,
1507 .instance_size = sizeof(S390PCIBusDevice),
1508 .class_init = s390_pci_device_class_init,
1511 static const TypeInfo s390_pci_iommu_info = {
1512 .name = TYPE_S390_PCI_IOMMU,
1513 .parent = TYPE_OBJECT,
1514 .instance_size = sizeof(S390PCIIOMMU),
1517 static void s390_iommu_memory_region_class_init(ObjectClass *klass, void *data)
1519 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1521 imrc->translate = s390_translate_iommu;
1522 imrc->replay = s390_pci_iommu_replay;
1525 static const TypeInfo s390_iommu_memory_region_info = {
1526 .parent = TYPE_IOMMU_MEMORY_REGION,
1527 .name = TYPE_S390_IOMMU_MEMORY_REGION,
1528 .class_init = s390_iommu_memory_region_class_init,
1531 static void s390_pci_register_types(void)
1533 type_register_static(&s390_pcihost_info);
1534 type_register_static(&s390_pcibus_info);
1535 type_register_static(&s390_pci_device_info);
1536 type_register_static(&s390_pci_iommu_info);
1537 type_register_static(&s390_iommu_memory_region_info);
1540 type_init(s390_pci_register_types)