linux-user: Use do_munmap for target_mmap failure
[qemu/kevin.git] / hw / s390x / s390-pci-bus.c
blob3e57d5faca18dec46f947fa30d5f69efb6c2620c
1 /*
2 * s390 PCI BUS
4 * Copyright 2014 IBM Corp.
5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
6 * Hong Bo Li <lihbbj@cn.ibm.com>
7 * Yi Min Zhao <zyimin@cn.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at
10 * your option) any later version. See the COPYING file in the top-level
11 * directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qapi/visitor.h"
17 #include "hw/s390x/s390-pci-bus.h"
18 #include "hw/s390x/s390-pci-inst.h"
19 #include "hw/s390x/s390-pci-kvm.h"
20 #include "hw/s390x/s390-pci-vfio.h"
21 #include "hw/pci/pci_bus.h"
22 #include "hw/qdev-properties.h"
23 #include "hw/pci/pci_bridge.h"
24 #include "hw/pci/msi.h"
25 #include "qemu/error-report.h"
26 #include "qemu/module.h"
27 #include "sysemu/reset.h"
28 #include "sysemu/runstate.h"
30 #include "trace.h"
32 S390pciState *s390_get_phb(void)
34 static S390pciState *phb;
36 if (!phb) {
37 phb = S390_PCI_HOST_BRIDGE(
38 object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL));
39 assert(phb != NULL);
42 return phb;
45 int pci_chsc_sei_nt2_get_event(void *res)
47 ChscSeiNt2Res *nt2_res = (ChscSeiNt2Res *)res;
48 PciCcdfAvail *accdf;
49 PciCcdfErr *eccdf;
50 int rc = 1;
51 SeiContainer *sei_cont;
52 S390pciState *s = s390_get_phb();
54 sei_cont = QTAILQ_FIRST(&s->pending_sei);
55 if (sei_cont) {
56 QTAILQ_REMOVE(&s->pending_sei, sei_cont, link);
57 nt2_res->nt = 2;
58 nt2_res->cc = sei_cont->cc;
59 nt2_res->length = cpu_to_be16(sizeof(ChscSeiNt2Res));
60 switch (sei_cont->cc) {
61 case 1: /* error event */
62 eccdf = (PciCcdfErr *)nt2_res->ccdf;
63 eccdf->fid = cpu_to_be32(sei_cont->fid);
64 eccdf->fh = cpu_to_be32(sei_cont->fh);
65 eccdf->e = cpu_to_be32(sei_cont->e);
66 eccdf->faddr = cpu_to_be64(sei_cont->faddr);
67 eccdf->pec = cpu_to_be16(sei_cont->pec);
68 break;
69 case 2: /* availability event */
70 accdf = (PciCcdfAvail *)nt2_res->ccdf;
71 accdf->fid = cpu_to_be32(sei_cont->fid);
72 accdf->fh = cpu_to_be32(sei_cont->fh);
73 accdf->pec = cpu_to_be16(sei_cont->pec);
74 break;
75 default:
76 abort();
78 g_free(sei_cont);
79 rc = 0;
82 return rc;
85 int pci_chsc_sei_nt2_have_event(void)
87 S390pciState *s = s390_get_phb();
89 return !QTAILQ_EMPTY(&s->pending_sei);
92 S390PCIBusDevice *s390_pci_find_next_avail_dev(S390pciState *s,
93 S390PCIBusDevice *pbdev)
95 S390PCIBusDevice *ret = pbdev ? QTAILQ_NEXT(pbdev, link) :
96 QTAILQ_FIRST(&s->zpci_devs);
98 while (ret && ret->state == ZPCI_FS_RESERVED) {
99 ret = QTAILQ_NEXT(ret, link);
102 return ret;
105 S390PCIBusDevice *s390_pci_find_dev_by_fid(S390pciState *s, uint32_t fid)
107 S390PCIBusDevice *pbdev;
109 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
110 if (pbdev->fid == fid) {
111 return pbdev;
115 return NULL;
118 void s390_pci_sclp_configure(SCCB *sccb)
120 IoaCfgSccb *psccb = (IoaCfgSccb *)sccb;
121 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(),
122 be32_to_cpu(psccb->aid));
123 uint16_t rc;
125 if (!pbdev) {
126 trace_s390_pci_sclp_nodev("configure", be32_to_cpu(psccb->aid));
127 rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED;
128 goto out;
131 switch (pbdev->state) {
132 case ZPCI_FS_RESERVED:
133 rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE;
134 break;
135 case ZPCI_FS_STANDBY:
136 pbdev->state = ZPCI_FS_DISABLED;
137 rc = SCLP_RC_NORMAL_COMPLETION;
138 break;
139 default:
140 rc = SCLP_RC_NO_ACTION_REQUIRED;
142 out:
143 psccb->header.response_code = cpu_to_be16(rc);
146 static void s390_pci_shutdown_notifier(Notifier *n, void *opaque)
148 S390PCIBusDevice *pbdev = container_of(n, S390PCIBusDevice,
149 shutdown_notifier);
151 pci_device_reset(pbdev->pdev);
154 static void s390_pci_perform_unplug(S390PCIBusDevice *pbdev)
156 HotplugHandler *hotplug_ctrl;
158 if (pbdev->pft == ZPCI_PFT_ISM) {
159 notifier_remove(&pbdev->shutdown_notifier);
162 /* Unplug the PCI device */
163 if (pbdev->pdev) {
164 DeviceState *pdev = DEVICE(pbdev->pdev);
166 hotplug_ctrl = qdev_get_hotplug_handler(pdev);
167 hotplug_handler_unplug(hotplug_ctrl, pdev, &error_abort);
168 object_unparent(OBJECT(pdev));
171 /* Unplug the zPCI device */
172 hotplug_ctrl = qdev_get_hotplug_handler(DEVICE(pbdev));
173 hotplug_handler_unplug(hotplug_ctrl, DEVICE(pbdev), &error_abort);
174 object_unparent(OBJECT(pbdev));
177 void s390_pci_sclp_deconfigure(SCCB *sccb)
179 IoaCfgSccb *psccb = (IoaCfgSccb *)sccb;
180 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(),
181 be32_to_cpu(psccb->aid));
182 uint16_t rc;
184 if (!pbdev) {
185 trace_s390_pci_sclp_nodev("deconfigure", be32_to_cpu(psccb->aid));
186 rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED;
187 goto out;
190 switch (pbdev->state) {
191 case ZPCI_FS_RESERVED:
192 rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE;
193 break;
194 case ZPCI_FS_STANDBY:
195 rc = SCLP_RC_NO_ACTION_REQUIRED;
196 break;
197 default:
198 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) {
199 /* Interpreted devices were using interrupt forwarding */
200 s390_pci_kvm_aif_disable(pbdev);
201 } else if (pbdev->summary_ind) {
202 pci_dereg_irqs(pbdev);
204 if (pbdev->iommu->enabled) {
205 pci_dereg_ioat(pbdev->iommu);
207 pbdev->state = ZPCI_FS_STANDBY;
208 rc = SCLP_RC_NORMAL_COMPLETION;
210 if (pbdev->unplug_requested) {
211 s390_pci_perform_unplug(pbdev);
214 out:
215 psccb->header.response_code = cpu_to_be16(rc);
218 static S390PCIBusDevice *s390_pci_find_dev_by_uid(S390pciState *s, uint16_t uid)
220 S390PCIBusDevice *pbdev;
222 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
223 if (pbdev->uid == uid) {
224 return pbdev;
228 return NULL;
231 S390PCIBusDevice *s390_pci_find_dev_by_target(S390pciState *s,
232 const char *target)
234 S390PCIBusDevice *pbdev;
236 if (!target) {
237 return NULL;
240 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
241 if (!strcmp(pbdev->target, target)) {
242 return pbdev;
246 return NULL;
249 static S390PCIBusDevice *s390_pci_find_dev_by_pci(S390pciState *s,
250 PCIDevice *pci_dev)
252 S390PCIBusDevice *pbdev;
254 if (!pci_dev) {
255 return NULL;
258 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
259 if (pbdev->pdev == pci_dev) {
260 return pbdev;
264 return NULL;
267 S390PCIBusDevice *s390_pci_find_dev_by_idx(S390pciState *s, uint32_t idx)
269 return g_hash_table_lookup(s->zpci_table, &idx);
272 S390PCIBusDevice *s390_pci_find_dev_by_fh(S390pciState *s, uint32_t fh)
274 uint32_t idx = FH_MASK_INDEX & fh;
275 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_idx(s, idx);
277 if (pbdev && pbdev->fh == fh) {
278 return pbdev;
281 return NULL;
284 static void s390_pci_generate_event(uint8_t cc, uint16_t pec, uint32_t fh,
285 uint32_t fid, uint64_t faddr, uint32_t e)
287 SeiContainer *sei_cont;
288 S390pciState *s = s390_get_phb();
290 sei_cont = g_new0(SeiContainer, 1);
291 sei_cont->fh = fh;
292 sei_cont->fid = fid;
293 sei_cont->cc = cc;
294 sei_cont->pec = pec;
295 sei_cont->faddr = faddr;
296 sei_cont->e = e;
298 QTAILQ_INSERT_TAIL(&s->pending_sei, sei_cont, link);
299 css_generate_css_crws(0);
302 static void s390_pci_generate_plug_event(uint16_t pec, uint32_t fh,
303 uint32_t fid)
305 s390_pci_generate_event(2, pec, fh, fid, 0, 0);
308 void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid,
309 uint64_t faddr, uint32_t e)
311 s390_pci_generate_event(1, pec, fh, fid, faddr, e);
314 static void s390_pci_set_irq(void *opaque, int irq, int level)
316 /* nothing to do */
319 static int s390_pci_map_irq(PCIDevice *pci_dev, int irq_num)
321 /* nothing to do */
322 return 0;
325 static uint64_t s390_pci_get_table_origin(uint64_t iota)
327 return iota & ~ZPCI_IOTA_RTTO_FLAG;
330 static unsigned int calc_rtx(dma_addr_t ptr)
332 return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
335 static unsigned int calc_sx(dma_addr_t ptr)
337 return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
340 static unsigned int calc_px(dma_addr_t ptr)
342 return ((unsigned long) ptr >> TARGET_PAGE_BITS) & ZPCI_PT_MASK;
345 static uint64_t get_rt_sto(uint64_t entry)
347 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
348 ? (entry & ZPCI_RTE_ADDR_MASK)
349 : 0;
352 static uint64_t get_st_pto(uint64_t entry)
354 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
355 ? (entry & ZPCI_STE_ADDR_MASK)
356 : 0;
359 static bool rt_entry_isvalid(uint64_t entry)
361 return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
364 static bool pt_entry_isvalid(uint64_t entry)
366 return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
369 static bool entry_isprotected(uint64_t entry)
371 return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED;
374 /* ett is expected table type, -1 page table, 0 segment table, 1 region table */
375 static uint64_t get_table_index(uint64_t iova, int8_t ett)
377 switch (ett) {
378 case ZPCI_ETT_PT:
379 return calc_px(iova);
380 case ZPCI_ETT_ST:
381 return calc_sx(iova);
382 case ZPCI_ETT_RT:
383 return calc_rtx(iova);
386 return -1;
389 static bool entry_isvalid(uint64_t entry, int8_t ett)
391 switch (ett) {
392 case ZPCI_ETT_PT:
393 return pt_entry_isvalid(entry);
394 case ZPCI_ETT_ST:
395 case ZPCI_ETT_RT:
396 return rt_entry_isvalid(entry);
399 return false;
402 /* Return true if address translation is done */
403 static bool translate_iscomplete(uint64_t entry, int8_t ett)
405 switch (ett) {
406 case 0:
407 return (entry & ZPCI_TABLE_FC) ? true : false;
408 case 1:
409 return false;
412 return true;
415 static uint64_t get_frame_size(int8_t ett)
417 switch (ett) {
418 case ZPCI_ETT_PT:
419 return 1ULL << 12;
420 case ZPCI_ETT_ST:
421 return 1ULL << 20;
422 case ZPCI_ETT_RT:
423 return 1ULL << 31;
426 return 0;
429 static uint64_t get_next_table_origin(uint64_t entry, int8_t ett)
431 switch (ett) {
432 case ZPCI_ETT_PT:
433 return entry & ZPCI_PTE_ADDR_MASK;
434 case ZPCI_ETT_ST:
435 return get_st_pto(entry);
436 case ZPCI_ETT_RT:
437 return get_rt_sto(entry);
440 return 0;
444 * table_translate: do translation within one table and return the following
445 * table origin
447 * @entry: the entry being translated, the result is stored in this.
448 * @to: the address of table origin.
449 * @ett: expected table type, 1 region table, 0 segment table and -1 page table.
450 * @error: error code
452 static uint64_t table_translate(S390IOTLBEntry *entry, uint64_t to, int8_t ett,
453 uint16_t *error)
455 uint64_t tx, te, nto = 0;
456 uint16_t err = 0;
458 tx = get_table_index(entry->iova, ett);
459 te = address_space_ldq(&address_space_memory, to + tx * sizeof(uint64_t),
460 MEMTXATTRS_UNSPECIFIED, NULL);
462 if (!te) {
463 err = ERR_EVENT_INVALTE;
464 goto out;
467 if (!entry_isvalid(te, ett)) {
468 entry->perm &= IOMMU_NONE;
469 goto out;
472 if (ett == ZPCI_ETT_RT && ((te & ZPCI_TABLE_LEN_RTX) != ZPCI_TABLE_LEN_RTX
473 || te & ZPCI_TABLE_OFFSET_MASK)) {
474 err = ERR_EVENT_INVALTL;
475 goto out;
478 nto = get_next_table_origin(te, ett);
479 if (!nto) {
480 err = ERR_EVENT_TT;
481 goto out;
484 if (entry_isprotected(te)) {
485 entry->perm &= IOMMU_RO;
486 } else {
487 entry->perm &= IOMMU_RW;
490 if (translate_iscomplete(te, ett)) {
491 switch (ett) {
492 case ZPCI_ETT_PT:
493 entry->translated_addr = te & ZPCI_PTE_ADDR_MASK;
494 break;
495 case ZPCI_ETT_ST:
496 entry->translated_addr = (te & ZPCI_SFAA_MASK) |
497 (entry->iova & ~ZPCI_SFAA_MASK);
498 break;
500 nto = 0;
502 out:
503 if (err) {
504 entry->perm = IOMMU_NONE;
505 *error = err;
507 entry->len = get_frame_size(ett);
508 return nto;
511 uint16_t s390_guest_io_table_walk(uint64_t g_iota, hwaddr addr,
512 S390IOTLBEntry *entry)
514 uint64_t to = s390_pci_get_table_origin(g_iota);
515 int8_t ett = 1;
516 uint16_t error = 0;
518 entry->iova = addr & TARGET_PAGE_MASK;
519 entry->translated_addr = 0;
520 entry->perm = IOMMU_RW;
522 if (entry_isprotected(g_iota)) {
523 entry->perm &= IOMMU_RO;
526 while (to) {
527 to = table_translate(entry, to, ett--, &error);
530 return error;
533 static IOMMUTLBEntry s390_translate_iommu(IOMMUMemoryRegion *mr, hwaddr addr,
534 IOMMUAccessFlags flag, int iommu_idx)
536 S390PCIIOMMU *iommu = container_of(mr, S390PCIIOMMU, iommu_mr);
537 S390IOTLBEntry *entry;
538 uint64_t iova = addr & TARGET_PAGE_MASK;
539 uint16_t error = 0;
540 IOMMUTLBEntry ret = {
541 .target_as = &address_space_memory,
542 .iova = 0,
543 .translated_addr = 0,
544 .addr_mask = ~(hwaddr)0,
545 .perm = IOMMU_NONE,
548 switch (iommu->pbdev->state) {
549 case ZPCI_FS_ENABLED:
550 case ZPCI_FS_BLOCKED:
551 if (!iommu->enabled) {
552 return ret;
554 break;
555 default:
556 return ret;
559 trace_s390_pci_iommu_xlate(addr);
561 if (addr < iommu->pba || addr > iommu->pal) {
562 error = ERR_EVENT_OORANGE;
563 goto err;
566 entry = g_hash_table_lookup(iommu->iotlb, &iova);
567 if (entry) {
568 ret.iova = entry->iova;
569 ret.translated_addr = entry->translated_addr;
570 ret.addr_mask = entry->len - 1;
571 ret.perm = entry->perm;
572 } else {
573 ret.iova = iova;
574 ret.addr_mask = ~TARGET_PAGE_MASK;
575 ret.perm = IOMMU_NONE;
578 if (flag != IOMMU_NONE && !(flag & ret.perm)) {
579 error = ERR_EVENT_TPROTE;
581 err:
582 if (error) {
583 iommu->pbdev->state = ZPCI_FS_ERROR;
584 s390_pci_generate_error_event(error, iommu->pbdev->fh,
585 iommu->pbdev->fid, addr, 0);
587 return ret;
590 static void s390_pci_iommu_replay(IOMMUMemoryRegion *iommu,
591 IOMMUNotifier *notifier)
593 /* It's impossible to plug a pci device on s390x that already has iommu
594 * mappings which need to be replayed, that is due to the "one iommu per
595 * zpci device" construct. But when we support migration of vfio-pci
596 * devices in future, we need to revisit this.
598 return;
601 static S390PCIIOMMU *s390_pci_get_iommu(S390pciState *s, PCIBus *bus,
602 int devfn)
604 uint64_t key = (uintptr_t)bus;
605 S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key);
606 S390PCIIOMMU *iommu;
608 if (!table) {
609 table = g_new0(S390PCIIOMMUTable, 1);
610 table->key = key;
611 g_hash_table_insert(s->iommu_table, &table->key, table);
614 iommu = table->iommu[PCI_SLOT(devfn)];
615 if (!iommu) {
616 iommu = S390_PCI_IOMMU(object_new(TYPE_S390_PCI_IOMMU));
618 char *mr_name = g_strdup_printf("iommu-root-%02x:%02x.%01x",
619 pci_bus_num(bus),
620 PCI_SLOT(devfn),
621 PCI_FUNC(devfn));
622 char *as_name = g_strdup_printf("iommu-pci-%02x:%02x.%01x",
623 pci_bus_num(bus),
624 PCI_SLOT(devfn),
625 PCI_FUNC(devfn));
626 memory_region_init(&iommu->mr, OBJECT(iommu), mr_name, UINT64_MAX);
627 address_space_init(&iommu->as, &iommu->mr, as_name);
628 iommu->iotlb = g_hash_table_new_full(g_int64_hash, g_int64_equal,
629 NULL, g_free);
630 table->iommu[PCI_SLOT(devfn)] = iommu;
632 g_free(mr_name);
633 g_free(as_name);
636 return iommu;
639 static AddressSpace *s390_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
641 S390pciState *s = opaque;
642 S390PCIIOMMU *iommu = s390_pci_get_iommu(s, bus, devfn);
644 return &iommu->as;
647 static const PCIIOMMUOps s390_iommu_ops = {
648 .get_address_space = s390_pci_dma_iommu,
651 static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set)
653 uint8_t expected, actual;
654 hwaddr len = 1;
655 /* avoid multiple fetches */
656 uint8_t volatile *ind_addr;
658 ind_addr = cpu_physical_memory_map(ind_loc, &len, true);
659 if (!ind_addr) {
660 s390_pci_generate_error_event(ERR_EVENT_AIRERR, 0, 0, 0, 0);
661 return -1;
663 actual = *ind_addr;
664 do {
665 expected = actual;
666 actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set);
667 } while (actual != expected);
668 cpu_physical_memory_unmap((void *)ind_addr, len, 1, len);
670 return actual;
673 static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data,
674 unsigned int size)
676 S390PCIBusDevice *pbdev = opaque;
677 uint32_t vec = data & ZPCI_MSI_VEC_MASK;
678 uint64_t ind_bit;
679 uint32_t sum_bit;
681 assert(pbdev);
683 trace_s390_pci_msi_ctrl_write(data, pbdev->idx, vec);
685 if (pbdev->state != ZPCI_FS_ENABLED) {
686 return;
689 ind_bit = pbdev->routes.adapter.ind_offset;
690 sum_bit = pbdev->routes.adapter.summary_offset;
692 set_ind_atomic(pbdev->routes.adapter.ind_addr + (ind_bit + vec) / 8,
693 0x80 >> ((ind_bit + vec) % 8));
694 if (!set_ind_atomic(pbdev->routes.adapter.summary_addr + sum_bit / 8,
695 0x80 >> (sum_bit % 8))) {
696 css_adapter_interrupt(CSS_IO_ADAPTER_PCI, pbdev->isc);
700 static uint64_t s390_msi_ctrl_read(void *opaque, hwaddr addr, unsigned size)
702 return 0xffffffff;
705 static const MemoryRegionOps s390_msi_ctrl_ops = {
706 .write = s390_msi_ctrl_write,
707 .read = s390_msi_ctrl_read,
708 .endianness = DEVICE_LITTLE_ENDIAN,
711 void s390_pci_iommu_enable(S390PCIIOMMU *iommu)
714 * The iommu region is initialized against a 0-mapped address space,
715 * so the smallest IOMMU region we can define runs from 0 to the end
716 * of the PCI address space.
718 char *name = g_strdup_printf("iommu-s390-%04x", iommu->pbdev->uid);
719 memory_region_init_iommu(&iommu->iommu_mr, sizeof(iommu->iommu_mr),
720 TYPE_S390_IOMMU_MEMORY_REGION, OBJECT(&iommu->mr),
721 name, iommu->pal + 1);
722 iommu->enabled = true;
723 memory_region_add_subregion(&iommu->mr, 0, MEMORY_REGION(&iommu->iommu_mr));
724 g_free(name);
727 void s390_pci_iommu_disable(S390PCIIOMMU *iommu)
729 iommu->enabled = false;
730 g_hash_table_remove_all(iommu->iotlb);
731 memory_region_del_subregion(&iommu->mr, MEMORY_REGION(&iommu->iommu_mr));
732 object_unparent(OBJECT(&iommu->iommu_mr));
735 static void s390_pci_iommu_free(S390pciState *s, PCIBus *bus, int32_t devfn)
737 uint64_t key = (uintptr_t)bus;
738 S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key);
739 S390PCIIOMMU *iommu = table ? table->iommu[PCI_SLOT(devfn)] : NULL;
741 if (!table || !iommu) {
742 return;
745 table->iommu[PCI_SLOT(devfn)] = NULL;
746 g_hash_table_destroy(iommu->iotlb);
748 * An attached PCI device may have memory listeners, eg. VFIO PCI.
749 * The associated subregion will already have been unmapped in
750 * s390_pci_iommu_disable in response to the guest deconfigure request.
751 * Remove the listeners now before destroying the address space.
753 address_space_remove_listeners(&iommu->as);
754 address_space_destroy(&iommu->as);
755 object_unparent(OBJECT(&iommu->mr));
756 object_unparent(OBJECT(iommu));
757 object_unref(OBJECT(iommu));
760 S390PCIGroup *s390_group_create(int id, int host_id)
762 S390PCIGroup *group;
763 S390pciState *s = s390_get_phb();
765 group = g_new0(S390PCIGroup, 1);
766 group->id = id;
767 group->host_id = host_id;
768 QTAILQ_INSERT_TAIL(&s->zpci_groups, group, link);
769 return group;
772 S390PCIGroup *s390_group_find(int id)
774 S390PCIGroup *group;
775 S390pciState *s = s390_get_phb();
777 QTAILQ_FOREACH(group, &s->zpci_groups, link) {
778 if (group->id == id) {
779 return group;
782 return NULL;
785 S390PCIGroup *s390_group_find_host_sim(int host_id)
787 S390PCIGroup *group;
788 S390pciState *s = s390_get_phb();
790 QTAILQ_FOREACH(group, &s->zpci_groups, link) {
791 if (group->id >= ZPCI_SIM_GRP_START && group->host_id == host_id) {
792 return group;
795 return NULL;
798 static void s390_pci_init_default_group(void)
800 S390PCIGroup *group;
801 ClpRspQueryPciGrp *resgrp;
803 group = s390_group_create(ZPCI_DEFAULT_FN_GRP, ZPCI_DEFAULT_FN_GRP);
804 resgrp = &group->zpci_group;
805 resgrp->fr = 1;
806 resgrp->dasm = 0;
807 resgrp->msia = ZPCI_MSI_ADDR;
808 resgrp->mui = DEFAULT_MUI;
809 resgrp->i = 128;
810 resgrp->maxstbl = 128;
811 resgrp->version = 0;
812 resgrp->dtsm = ZPCI_DTSM;
815 static void set_pbdev_info(S390PCIBusDevice *pbdev)
817 pbdev->zpci_fn.sdma = ZPCI_SDMA_ADDR;
818 pbdev->zpci_fn.edma = ZPCI_EDMA_ADDR;
819 pbdev->zpci_fn.pchid = 0;
820 pbdev->zpci_fn.pfgid = ZPCI_DEFAULT_FN_GRP;
821 pbdev->zpci_fn.fid = pbdev->fid;
822 pbdev->zpci_fn.uid = pbdev->uid;
823 pbdev->pci_group = s390_group_find(ZPCI_DEFAULT_FN_GRP);
826 static void s390_pcihost_realize(DeviceState *dev, Error **errp)
828 PCIBus *b;
829 BusState *bus;
830 PCIHostState *phb = PCI_HOST_BRIDGE(dev);
831 S390pciState *s = S390_PCI_HOST_BRIDGE(dev);
833 trace_s390_pcihost("realize");
835 b = pci_register_root_bus(dev, NULL, s390_pci_set_irq, s390_pci_map_irq,
836 NULL, get_system_memory(), get_system_io(), 0,
837 64, TYPE_PCI_BUS);
838 pci_setup_iommu(b, &s390_iommu_ops, s);
840 bus = BUS(b);
841 qbus_set_hotplug_handler(bus, OBJECT(dev));
842 phb->bus = b;
844 s->bus = S390_PCI_BUS(qbus_new(TYPE_S390_PCI_BUS, dev, NULL));
845 qbus_set_hotplug_handler(BUS(s->bus), OBJECT(dev));
847 s->iommu_table = g_hash_table_new_full(g_int64_hash, g_int64_equal,
848 NULL, g_free);
849 s->zpci_table = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, NULL);
850 s->bus_no = 0;
851 s->next_sim_grp = ZPCI_SIM_GRP_START;
852 QTAILQ_INIT(&s->pending_sei);
853 QTAILQ_INIT(&s->zpci_devs);
854 QTAILQ_INIT(&s->zpci_dma_limit);
855 QTAILQ_INIT(&s->zpci_groups);
857 s390_pci_init_default_group();
858 css_register_io_adapters(CSS_IO_ADAPTER_PCI, true, false,
859 S390_ADAPTER_SUPPRESSIBLE, errp);
862 static void s390_pcihost_unrealize(DeviceState *dev)
864 S390PCIGroup *group;
865 S390pciState *s = S390_PCI_HOST_BRIDGE(dev);
867 while (!QTAILQ_EMPTY(&s->zpci_groups)) {
868 group = QTAILQ_FIRST(&s->zpci_groups);
869 QTAILQ_REMOVE(&s->zpci_groups, group, link);
873 static int s390_pci_msix_init(S390PCIBusDevice *pbdev)
875 char *name;
876 uint8_t pos;
877 uint16_t ctrl;
878 uint32_t table, pba;
880 pos = pci_find_capability(pbdev->pdev, PCI_CAP_ID_MSIX);
881 if (!pos) {
882 return -1;
885 ctrl = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_FLAGS,
886 pci_config_size(pbdev->pdev), sizeof(ctrl));
887 table = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_TABLE,
888 pci_config_size(pbdev->pdev), sizeof(table));
889 pba = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_PBA,
890 pci_config_size(pbdev->pdev), sizeof(pba));
892 pbdev->msix.table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
893 pbdev->msix.table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
894 pbdev->msix.pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
895 pbdev->msix.pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
896 pbdev->msix.entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
898 name = g_strdup_printf("msix-s390-%04x", pbdev->uid);
899 memory_region_init_io(&pbdev->msix_notify_mr, OBJECT(pbdev),
900 &s390_msi_ctrl_ops, pbdev, name, TARGET_PAGE_SIZE);
901 memory_region_add_subregion(&pbdev->iommu->mr,
902 pbdev->pci_group->zpci_group.msia,
903 &pbdev->msix_notify_mr);
904 g_free(name);
906 return 0;
909 static void s390_pci_msix_free(S390PCIBusDevice *pbdev)
911 if (pbdev->msix.entries == 0) {
912 return;
915 memory_region_del_subregion(&pbdev->iommu->mr, &pbdev->msix_notify_mr);
916 object_unparent(OBJECT(&pbdev->msix_notify_mr));
919 static S390PCIBusDevice *s390_pci_device_new(S390pciState *s,
920 const char *target, Error **errp)
922 Error *local_err = NULL;
923 DeviceState *dev;
925 dev = qdev_try_new(TYPE_S390_PCI_DEVICE);
926 if (!dev) {
927 error_setg(errp, "zPCI device could not be created");
928 return NULL;
931 if (!object_property_set_str(OBJECT(dev), "target", target, &local_err)) {
932 object_unparent(OBJECT(dev));
933 error_propagate_prepend(errp, local_err,
934 "zPCI device could not be created: ");
935 return NULL;
937 if (!qdev_realize_and_unref(dev, BUS(s->bus), &local_err)) {
938 object_unparent(OBJECT(dev));
939 error_propagate_prepend(errp, local_err,
940 "zPCI device could not be created: ");
941 return NULL;
944 return S390_PCI_DEVICE(dev);
947 static bool s390_pci_alloc_idx(S390pciState *s, S390PCIBusDevice *pbdev)
949 uint32_t idx;
951 idx = s->next_idx;
952 while (s390_pci_find_dev_by_idx(s, idx)) {
953 idx = (idx + 1) & FH_MASK_INDEX;
954 if (idx == s->next_idx) {
955 return false;
959 pbdev->idx = idx;
960 return true;
963 static void s390_pcihost_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
964 Error **errp)
966 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev);
968 if (!s390_has_feat(S390_FEAT_ZPCI)) {
969 warn_report("Plugging a PCI/zPCI device without the 'zpci' CPU "
970 "feature enabled; the guest will not be able to see/use "
971 "this device");
974 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
975 PCIDevice *pdev = PCI_DEVICE(dev);
977 if (pdev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
978 error_setg(errp, "multifunction not supported in s390");
979 return;
981 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
982 S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev);
984 if (!s390_pci_alloc_idx(s, pbdev)) {
985 error_setg(errp, "no slot for plugging zpci device");
986 return;
991 static void s390_pci_update_subordinate(PCIDevice *dev, uint32_t nr)
993 uint32_t old_nr;
995 pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1);
996 while (!pci_bus_is_root(pci_get_bus(dev))) {
997 dev = pci_get_bus(dev)->parent_dev;
999 old_nr = pci_default_read_config(dev, PCI_SUBORDINATE_BUS, 1);
1000 if (old_nr < nr) {
1001 pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1);
1006 static int s390_pci_interp_plug(S390pciState *s, S390PCIBusDevice *pbdev)
1008 uint32_t idx, fh;
1010 if (!s390_pci_get_host_fh(pbdev, &fh)) {
1011 return -EPERM;
1015 * The host device is already in an enabled state, but we always present
1016 * the initial device state to the guest as disabled (ZPCI_FS_DISABLED).
1017 * Therefore, mask off the enable bit from the passthrough handle until
1018 * the guest issues a CLP SET PCI FN later to enable the device.
1020 pbdev->fh = fh & ~FH_MASK_ENABLE;
1022 /* Next, see if the idx is already in-use */
1023 idx = pbdev->fh & FH_MASK_INDEX;
1024 if (pbdev->idx != idx) {
1025 if (s390_pci_find_dev_by_idx(s, idx)) {
1026 return -EINVAL;
1029 * Update the idx entry with the passed through idx
1030 * If the relinquished idx is lower than next_idx, use it
1031 * to replace next_idx
1033 g_hash_table_remove(s->zpci_table, &pbdev->idx);
1034 if (idx < s->next_idx) {
1035 s->next_idx = idx;
1037 pbdev->idx = idx;
1038 g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev);
1041 return 0;
1044 static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
1045 Error **errp)
1047 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev);
1048 PCIDevice *pdev = NULL;
1049 S390PCIBusDevice *pbdev = NULL;
1050 int rc;
1052 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) {
1053 PCIBridge *pb = PCI_BRIDGE(dev);
1055 pdev = PCI_DEVICE(dev);
1056 pci_bridge_map_irq(pb, dev->id, s390_pci_map_irq);
1057 pci_setup_iommu(&pb->sec_bus, &s390_iommu_ops, s);
1059 qbus_set_hotplug_handler(BUS(&pb->sec_bus), OBJECT(s));
1061 if (dev->hotplugged) {
1062 pci_default_write_config(pdev, PCI_PRIMARY_BUS,
1063 pci_dev_bus_num(pdev), 1);
1064 s->bus_no += 1;
1065 pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1);
1067 s390_pci_update_subordinate(pdev, s->bus_no);
1069 } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
1070 pdev = PCI_DEVICE(dev);
1072 if (!dev->id) {
1073 /* In the case the PCI device does not define an id */
1074 /* we generate one based on the PCI address */
1075 dev->id = g_strdup_printf("auto_%02x:%02x.%01x",
1076 pci_dev_bus_num(pdev),
1077 PCI_SLOT(pdev->devfn),
1078 PCI_FUNC(pdev->devfn));
1081 pbdev = s390_pci_find_dev_by_target(s, dev->id);
1082 if (!pbdev) {
1083 pbdev = s390_pci_device_new(s, dev->id, errp);
1084 if (!pbdev) {
1085 return;
1089 pbdev->pdev = pdev;
1090 pbdev->iommu = s390_pci_get_iommu(s, pci_get_bus(pdev), pdev->devfn);
1091 pbdev->iommu->pbdev = pbdev;
1092 pbdev->state = ZPCI_FS_DISABLED;
1093 set_pbdev_info(pbdev);
1095 if (object_dynamic_cast(OBJECT(dev), "vfio-pci")) {
1097 * By default, interpretation is always requested; if the available
1098 * facilities indicate it is not available, fallback to the
1099 * interception model.
1101 if (pbdev->interp) {
1102 if (s390_pci_kvm_interp_allowed()) {
1103 rc = s390_pci_interp_plug(s, pbdev);
1104 if (rc) {
1105 error_setg(errp, "Plug failed for zPCI device in "
1106 "interpretation mode: %d", rc);
1107 return;
1109 } else {
1110 trace_s390_pcihost("zPCI interpretation missing");
1111 pbdev->interp = false;
1112 pbdev->forwarding_assist = false;
1115 pbdev->iommu->dma_limit = s390_pci_start_dma_count(s, pbdev);
1116 /* Fill in CLP information passed via the vfio region */
1117 s390_pci_get_clp_info(pbdev);
1118 if (!pbdev->interp) {
1119 /* Do vfio passthrough but intercept for I/O */
1120 pbdev->fh |= FH_SHM_VFIO;
1121 pbdev->forwarding_assist = false;
1123 /* Register shutdown notifier and reset callback for ISM devices */
1124 if (pbdev->pft == ZPCI_PFT_ISM) {
1125 pbdev->shutdown_notifier.notify = s390_pci_shutdown_notifier;
1126 qemu_register_shutdown_notifier(&pbdev->shutdown_notifier);
1128 } else {
1129 pbdev->fh |= FH_SHM_EMUL;
1130 /* Always intercept emulated devices */
1131 pbdev->interp = false;
1132 pbdev->forwarding_assist = false;
1135 if (s390_pci_msix_init(pbdev) && !pbdev->interp) {
1136 error_setg(errp, "MSI-X support is mandatory "
1137 "in the S390 architecture");
1138 return;
1141 if (dev->hotplugged) {
1142 s390_pci_generate_plug_event(HP_EVENT_TO_CONFIGURED ,
1143 pbdev->fh, pbdev->fid);
1145 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
1146 pbdev = S390_PCI_DEVICE(dev);
1148 /* the allocated idx is actually getting used */
1149 s->next_idx = (pbdev->idx + 1) & FH_MASK_INDEX;
1150 pbdev->fh = pbdev->idx;
1151 QTAILQ_INSERT_TAIL(&s->zpci_devs, pbdev, link);
1152 g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev);
1153 } else {
1154 g_assert_not_reached();
1158 static void s390_pcihost_unplug(HotplugHandler *hotplug_dev, DeviceState *dev,
1159 Error **errp)
1161 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev);
1162 S390PCIBusDevice *pbdev = NULL;
1164 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
1165 PCIDevice *pci_dev = PCI_DEVICE(dev);
1166 PCIBus *bus;
1167 int32_t devfn;
1169 pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev));
1170 g_assert(pbdev);
1172 s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED,
1173 pbdev->fh, pbdev->fid);
1174 bus = pci_get_bus(pci_dev);
1175 devfn = pci_dev->devfn;
1176 qdev_unrealize(dev);
1178 s390_pci_msix_free(pbdev);
1179 s390_pci_iommu_free(s, bus, devfn);
1180 pbdev->pdev = NULL;
1181 pbdev->state = ZPCI_FS_RESERVED;
1182 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
1183 pbdev = S390_PCI_DEVICE(dev);
1184 pbdev->fid = 0;
1185 QTAILQ_REMOVE(&s->zpci_devs, pbdev, link);
1186 g_hash_table_remove(s->zpci_table, &pbdev->idx);
1187 if (pbdev->iommu->dma_limit) {
1188 s390_pci_end_dma_count(s, pbdev->iommu->dma_limit);
1190 qdev_unrealize(dev);
1194 static void s390_pcihost_unplug_request(HotplugHandler *hotplug_dev,
1195 DeviceState *dev,
1196 Error **errp)
1198 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev);
1199 S390PCIBusDevice *pbdev;
1201 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) {
1202 error_setg(errp, "PCI bridge hot unplug currently not supported");
1203 } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
1205 * Redirect the unplug request to the zPCI device and remember that
1206 * we've checked the PCI device already (to prevent endless recursion).
1208 pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev));
1209 g_assert(pbdev);
1210 pbdev->pci_unplug_request_processed = true;
1211 qdev_unplug(DEVICE(pbdev), errp);
1212 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
1213 pbdev = S390_PCI_DEVICE(dev);
1216 * If unplug was initially requested for the zPCI device, we
1217 * first have to redirect to the PCI device, which will in return
1218 * redirect back to us after performing its checks (if the request
1219 * is not blocked, e.g. because it's a PCI bridge).
1221 if (pbdev->pdev && !pbdev->pci_unplug_request_processed) {
1222 qdev_unplug(DEVICE(pbdev->pdev), errp);
1223 return;
1225 pbdev->pci_unplug_request_processed = false;
1227 switch (pbdev->state) {
1228 case ZPCI_FS_STANDBY:
1229 case ZPCI_FS_RESERVED:
1230 s390_pci_perform_unplug(pbdev);
1231 break;
1232 default:
1234 * Allow to send multiple requests, e.g. if the guest crashed
1235 * before releasing the device, we would not be able to send
1236 * another request to the same VM (e.g. fresh OS).
1238 pbdev->unplug_requested = true;
1239 s390_pci_generate_plug_event(HP_EVENT_DECONFIGURE_REQUEST,
1240 pbdev->fh, pbdev->fid);
1242 } else {
1243 g_assert_not_reached();
1247 static void s390_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev,
1248 void *opaque)
1250 S390pciState *s = opaque;
1251 PCIBus *sec_bus = NULL;
1253 if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
1254 PCI_HEADER_TYPE_BRIDGE)) {
1255 return;
1258 (s->bus_no)++;
1259 pci_default_write_config(pdev, PCI_PRIMARY_BUS, pci_dev_bus_num(pdev), 1);
1260 pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1);
1261 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1);
1263 sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
1264 if (!sec_bus) {
1265 return;
1268 /* Assign numbers to all child bridges. The last is the highest number. */
1269 pci_for_each_device_under_bus(sec_bus, s390_pci_enumerate_bridge, s);
1270 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1);
1273 void s390_pci_ism_reset(void)
1275 S390pciState *s = s390_get_phb();
1277 S390PCIBusDevice *pbdev, *next;
1279 /* Trigger reset event for each passthrough ISM device currently in-use */
1280 QTAILQ_FOREACH_SAFE(pbdev, &s->zpci_devs, link, next) {
1281 if (pbdev->interp && pbdev->pft == ZPCI_PFT_ISM &&
1282 pbdev->fh & FH_MASK_ENABLE) {
1283 s390_pci_kvm_aif_disable(pbdev);
1285 pci_device_reset(pbdev->pdev);
1290 static void s390_pcihost_reset(DeviceState *dev)
1292 S390pciState *s = S390_PCI_HOST_BRIDGE(dev);
1293 PCIBus *bus = s->parent_obj.bus;
1294 S390PCIBusDevice *pbdev, *next;
1296 /* Process all pending unplug requests */
1297 QTAILQ_FOREACH_SAFE(pbdev, &s->zpci_devs, link, next) {
1298 if (pbdev->unplug_requested) {
1299 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) {
1300 /* Interpreted devices were using interrupt forwarding */
1301 s390_pci_kvm_aif_disable(pbdev);
1302 } else if (pbdev->summary_ind) {
1303 pci_dereg_irqs(pbdev);
1305 if (pbdev->iommu->enabled) {
1306 pci_dereg_ioat(pbdev->iommu);
1308 pbdev->state = ZPCI_FS_STANDBY;
1309 s390_pci_perform_unplug(pbdev);
1314 * When resetting a PCI bridge, the assigned numbers are set to 0. So
1315 * on every system reset, we also have to reassign numbers.
1317 s->bus_no = 0;
1318 pci_for_each_device_under_bus(bus, s390_pci_enumerate_bridge, s);
1321 static void s390_pcihost_class_init(ObjectClass *klass, void *data)
1323 DeviceClass *dc = DEVICE_CLASS(klass);
1324 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
1326 dc->reset = s390_pcihost_reset;
1327 dc->realize = s390_pcihost_realize;
1328 dc->unrealize = s390_pcihost_unrealize;
1329 hc->pre_plug = s390_pcihost_pre_plug;
1330 hc->plug = s390_pcihost_plug;
1331 hc->unplug_request = s390_pcihost_unplug_request;
1332 hc->unplug = s390_pcihost_unplug;
1333 msi_nonbroken = true;
1336 static const TypeInfo s390_pcihost_info = {
1337 .name = TYPE_S390_PCI_HOST_BRIDGE,
1338 .parent = TYPE_PCI_HOST_BRIDGE,
1339 .instance_size = sizeof(S390pciState),
1340 .class_init = s390_pcihost_class_init,
1341 .interfaces = (InterfaceInfo[]) {
1342 { TYPE_HOTPLUG_HANDLER },
1347 static const TypeInfo s390_pcibus_info = {
1348 .name = TYPE_S390_PCI_BUS,
1349 .parent = TYPE_BUS,
1350 .instance_size = sizeof(S390PCIBus),
1353 static uint16_t s390_pci_generate_uid(S390pciState *s)
1355 uint16_t uid = 0;
1357 do {
1358 uid++;
1359 if (!s390_pci_find_dev_by_uid(s, uid)) {
1360 return uid;
1362 } while (uid < ZPCI_MAX_UID);
1364 return UID_UNDEFINED;
1367 static uint32_t s390_pci_generate_fid(S390pciState *s, Error **errp)
1369 uint32_t fid = 0;
1371 do {
1372 if (!s390_pci_find_dev_by_fid(s, fid)) {
1373 return fid;
1375 } while (fid++ != ZPCI_MAX_FID);
1377 error_setg(errp, "no free fid could be found");
1378 return 0;
1381 static void s390_pci_device_realize(DeviceState *dev, Error **errp)
1383 S390PCIBusDevice *zpci = S390_PCI_DEVICE(dev);
1384 S390pciState *s = s390_get_phb();
1386 if (!zpci->target) {
1387 error_setg(errp, "target must be defined");
1388 return;
1391 if (s390_pci_find_dev_by_target(s, zpci->target)) {
1392 error_setg(errp, "target %s already has an associated zpci device",
1393 zpci->target);
1394 return;
1397 if (zpci->uid == UID_UNDEFINED) {
1398 zpci->uid = s390_pci_generate_uid(s);
1399 if (!zpci->uid) {
1400 error_setg(errp, "no free uid could be found");
1401 return;
1403 } else if (s390_pci_find_dev_by_uid(s, zpci->uid)) {
1404 error_setg(errp, "uid %u already in use", zpci->uid);
1405 return;
1408 if (!zpci->fid_defined) {
1409 Error *local_error = NULL;
1411 zpci->fid = s390_pci_generate_fid(s, &local_error);
1412 if (local_error) {
1413 error_propagate(errp, local_error);
1414 return;
1416 } else if (s390_pci_find_dev_by_fid(s, zpci->fid)) {
1417 error_setg(errp, "fid %u already in use", zpci->fid);
1418 return;
1421 zpci->state = ZPCI_FS_RESERVED;
1422 zpci->fmb.format = ZPCI_FMB_FORMAT;
1425 static void s390_pci_device_reset(DeviceState *dev)
1427 S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev);
1429 switch (pbdev->state) {
1430 case ZPCI_FS_RESERVED:
1431 return;
1432 case ZPCI_FS_STANDBY:
1433 break;
1434 default:
1435 pbdev->fh &= ~FH_MASK_ENABLE;
1436 pbdev->state = ZPCI_FS_DISABLED;
1437 break;
1440 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) {
1441 /* Interpreted devices were using interrupt forwarding */
1442 s390_pci_kvm_aif_disable(pbdev);
1443 } else if (pbdev->summary_ind) {
1444 pci_dereg_irqs(pbdev);
1446 if (pbdev->iommu->enabled) {
1447 pci_dereg_ioat(pbdev->iommu);
1450 fmb_timer_free(pbdev);
1453 static void s390_pci_get_fid(Object *obj, Visitor *v, const char *name,
1454 void *opaque, Error **errp)
1456 Property *prop = opaque;
1457 uint32_t *ptr = object_field_prop_ptr(obj, prop);
1459 visit_type_uint32(v, name, ptr, errp);
1462 static void s390_pci_set_fid(Object *obj, Visitor *v, const char *name,
1463 void *opaque, Error **errp)
1465 S390PCIBusDevice *zpci = S390_PCI_DEVICE(obj);
1466 Property *prop = opaque;
1467 uint32_t *ptr = object_field_prop_ptr(obj, prop);
1469 if (!visit_type_uint32(v, name, ptr, errp)) {
1470 return;
1472 zpci->fid_defined = true;
1475 static const PropertyInfo s390_pci_fid_propinfo = {
1476 .name = "zpci_fid",
1477 .get = s390_pci_get_fid,
1478 .set = s390_pci_set_fid,
1481 #define DEFINE_PROP_S390_PCI_FID(_n, _s, _f) \
1482 DEFINE_PROP(_n, _s, _f, s390_pci_fid_propinfo, uint32_t)
1484 static Property s390_pci_device_properties[] = {
1485 DEFINE_PROP_UINT16("uid", S390PCIBusDevice, uid, UID_UNDEFINED),
1486 DEFINE_PROP_S390_PCI_FID("fid", S390PCIBusDevice, fid),
1487 DEFINE_PROP_STRING("target", S390PCIBusDevice, target),
1488 DEFINE_PROP_BOOL("interpret", S390PCIBusDevice, interp, true),
1489 DEFINE_PROP_BOOL("forwarding-assist", S390PCIBusDevice, forwarding_assist,
1490 true),
1491 DEFINE_PROP_END_OF_LIST(),
1494 static const VMStateDescription s390_pci_device_vmstate = {
1495 .name = TYPE_S390_PCI_DEVICE,
1497 * TODO: add state handling here, so migration works at least with
1498 * emulated pci devices on s390x
1500 .unmigratable = 1,
1503 static void s390_pci_device_class_init(ObjectClass *klass, void *data)
1505 DeviceClass *dc = DEVICE_CLASS(klass);
1507 dc->desc = "zpci device";
1508 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1509 dc->reset = s390_pci_device_reset;
1510 dc->bus_type = TYPE_S390_PCI_BUS;
1511 dc->realize = s390_pci_device_realize;
1512 device_class_set_props(dc, s390_pci_device_properties);
1513 dc->vmsd = &s390_pci_device_vmstate;
1516 static const TypeInfo s390_pci_device_info = {
1517 .name = TYPE_S390_PCI_DEVICE,
1518 .parent = TYPE_DEVICE,
1519 .instance_size = sizeof(S390PCIBusDevice),
1520 .class_init = s390_pci_device_class_init,
1523 static const TypeInfo s390_pci_iommu_info = {
1524 .name = TYPE_S390_PCI_IOMMU,
1525 .parent = TYPE_OBJECT,
1526 .instance_size = sizeof(S390PCIIOMMU),
1529 static void s390_iommu_memory_region_class_init(ObjectClass *klass, void *data)
1531 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1533 imrc->translate = s390_translate_iommu;
1534 imrc->replay = s390_pci_iommu_replay;
1537 static const TypeInfo s390_iommu_memory_region_info = {
1538 .parent = TYPE_IOMMU_MEMORY_REGION,
1539 .name = TYPE_S390_IOMMU_MEMORY_REGION,
1540 .class_init = s390_iommu_memory_region_class_init,
1543 static void s390_pci_register_types(void)
1545 type_register_static(&s390_pcihost_info);
1546 type_register_static(&s390_pcibus_info);
1547 type_register_static(&s390_pci_device_info);
1548 type_register_static(&s390_pci_iommu_info);
1549 type_register_static(&s390_iommu_memory_region_info);
1552 type_init(s390_pci_register_types)