s390x/css: only suspend when enabled by orb
[qemu/ar7.git] / hw / ppc / spapr_pci_vfio.c
blob2f3752ea1855ff7b93f5984d30eda463322e8cd5
1 /*
2 * QEMU sPAPR PCI host for VFIO
4 * Copyright (c) 2011-2014 Alexey Kardashevskiy, IBM Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License,
9 * or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "hw/ppc/spapr.h"
22 #include "hw/pci-host/spapr.h"
23 #include "hw/pci/msix.h"
24 #include "linux/vfio.h"
25 #include "hw/vfio/vfio.h"
27 static Property spapr_phb_vfio_properties[] = {
28 DEFINE_PROP_INT32("iommu", sPAPRPHBVFIOState, iommugroupid, -1),
29 DEFINE_PROP_END_OF_LIST(),
32 static void spapr_phb_vfio_finish_realize(sPAPRPHBState *sphb, Error **errp)
34 sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb);
35 struct vfio_iommu_spapr_tce_info info = { .argsz = sizeof(info) };
36 int ret;
37 sPAPRTCETable *tcet;
38 uint32_t liobn = svphb->phb.dma_liobn;
40 if (svphb->iommugroupid == -1) {
41 error_setg(errp, "Wrong IOMMU group ID %d", svphb->iommugroupid);
42 return;
45 ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
46 VFIO_CHECK_EXTENSION,
47 (void *) VFIO_SPAPR_TCE_IOMMU);
48 if (ret != 1) {
49 error_setg_errno(errp, -ret,
50 "spapr-vfio: SPAPR extension is not supported");
51 return;
54 ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
55 VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
56 if (ret) {
57 error_setg_errno(errp, -ret,
58 "spapr-vfio: get info from container failed");
59 return;
62 tcet = spapr_tce_new_table(DEVICE(sphb), liobn, info.dma32_window_start,
63 SPAPR_TCE_PAGE_SHIFT,
64 info.dma32_window_size >> SPAPR_TCE_PAGE_SHIFT,
65 true);
66 if (!tcet) {
67 error_setg(errp, "spapr-vfio: failed to create VFIO TCE table");
68 return;
71 /* Register default 32bit DMA window */
72 memory_region_add_subregion(&sphb->iommu_root, tcet->bus_offset,
73 spapr_tce_get_iommu(tcet));
76 static void spapr_phb_vfio_eeh_reenable(sPAPRPHBVFIOState *svphb)
78 struct vfio_eeh_pe_op op = {
79 .argsz = sizeof(op),
80 .op = VFIO_EEH_PE_ENABLE
83 vfio_container_ioctl(&svphb->phb.iommu_as,
84 svphb->iommugroupid, VFIO_EEH_PE_OP, &op);
87 static void spapr_phb_vfio_reset(DeviceState *qdev)
90 * The PE might be in frozen state. To reenable the EEH
91 * functionality on it will clean the frozen state, which
92 * ensures that the contained PCI devices will work properly
93 * after reboot.
95 spapr_phb_vfio_eeh_reenable(SPAPR_PCI_VFIO_HOST_BRIDGE(qdev));
98 static int spapr_phb_vfio_eeh_set_option(sPAPRPHBState *sphb,
99 unsigned int addr, int option)
101 sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb);
102 struct vfio_eeh_pe_op op = { .argsz = sizeof(op) };
103 int ret;
105 switch (option) {
106 case RTAS_EEH_DISABLE:
107 op.op = VFIO_EEH_PE_DISABLE;
108 break;
109 case RTAS_EEH_ENABLE: {
110 PCIHostState *phb;
111 PCIDevice *pdev;
114 * The EEH functionality is enabled on basis of PCI device,
115 * instead of PE. We need check the validity of the PCI
116 * device address.
118 phb = PCI_HOST_BRIDGE(sphb);
119 pdev = pci_find_device(phb->bus,
120 (addr >> 16) & 0xFF, (addr >> 8) & 0xFF);
121 if (!pdev || !object_dynamic_cast(OBJECT(pdev), "vfio-pci")) {
122 return RTAS_OUT_PARAM_ERROR;
125 op.op = VFIO_EEH_PE_ENABLE;
126 break;
128 case RTAS_EEH_THAW_IO:
129 op.op = VFIO_EEH_PE_UNFREEZE_IO;
130 break;
131 case RTAS_EEH_THAW_DMA:
132 op.op = VFIO_EEH_PE_UNFREEZE_DMA;
133 break;
134 default:
135 return RTAS_OUT_PARAM_ERROR;
138 ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
139 VFIO_EEH_PE_OP, &op);
140 if (ret < 0) {
141 return RTAS_OUT_HW_ERROR;
144 return RTAS_OUT_SUCCESS;
147 static int spapr_phb_vfio_eeh_get_state(sPAPRPHBState *sphb, int *state)
149 sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb);
150 struct vfio_eeh_pe_op op = { .argsz = sizeof(op) };
151 int ret;
153 op.op = VFIO_EEH_PE_GET_STATE;
154 ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
155 VFIO_EEH_PE_OP, &op);
156 if (ret < 0) {
157 return RTAS_OUT_PARAM_ERROR;
160 *state = ret;
161 return RTAS_OUT_SUCCESS;
164 static void spapr_phb_vfio_eeh_clear_dev_msix(PCIBus *bus,
165 PCIDevice *pdev,
166 void *opaque)
168 /* Check if the device is VFIO PCI device */
169 if (!object_dynamic_cast(OBJECT(pdev), "vfio-pci")) {
170 return;
174 * The MSIx table will be cleaned out by reset. We need
175 * disable it so that it can be reenabled properly. Also,
176 * the cached MSIx table should be cleared as it's not
177 * reflecting the contents in hardware.
179 if (msix_enabled(pdev)) {
180 uint16_t flags;
182 flags = pci_host_config_read_common(pdev,
183 pdev->msix_cap + PCI_MSIX_FLAGS,
184 pci_config_size(pdev), 2);
185 flags &= ~PCI_MSIX_FLAGS_ENABLE;
186 pci_host_config_write_common(pdev,
187 pdev->msix_cap + PCI_MSIX_FLAGS,
188 pci_config_size(pdev), flags, 2);
191 msix_reset(pdev);
194 static void spapr_phb_vfio_eeh_clear_bus_msix(PCIBus *bus, void *opaque)
196 pci_for_each_device(bus, pci_bus_num(bus),
197 spapr_phb_vfio_eeh_clear_dev_msix, NULL);
200 static void spapr_phb_vfio_eeh_pre_reset(sPAPRPHBState *sphb)
202 PCIHostState *phb = PCI_HOST_BRIDGE(sphb);
204 pci_for_each_bus(phb->bus, spapr_phb_vfio_eeh_clear_bus_msix, NULL);
207 static int spapr_phb_vfio_eeh_reset(sPAPRPHBState *sphb, int option)
209 sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb);
210 struct vfio_eeh_pe_op op = { .argsz = sizeof(op) };
211 int ret;
213 switch (option) {
214 case RTAS_SLOT_RESET_DEACTIVATE:
215 op.op = VFIO_EEH_PE_RESET_DEACTIVATE;
216 break;
217 case RTAS_SLOT_RESET_HOT:
218 spapr_phb_vfio_eeh_pre_reset(sphb);
219 op.op = VFIO_EEH_PE_RESET_HOT;
220 break;
221 case RTAS_SLOT_RESET_FUNDAMENTAL:
222 spapr_phb_vfio_eeh_pre_reset(sphb);
223 op.op = VFIO_EEH_PE_RESET_FUNDAMENTAL;
224 break;
225 default:
226 return RTAS_OUT_PARAM_ERROR;
229 ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
230 VFIO_EEH_PE_OP, &op);
231 if (ret < 0) {
232 return RTAS_OUT_HW_ERROR;
235 return RTAS_OUT_SUCCESS;
238 static int spapr_phb_vfio_eeh_configure(sPAPRPHBState *sphb)
240 sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb);
241 struct vfio_eeh_pe_op op = { .argsz = sizeof(op) };
242 int ret;
244 op.op = VFIO_EEH_PE_CONFIGURE;
245 ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
246 VFIO_EEH_PE_OP, &op);
247 if (ret < 0) {
248 return RTAS_OUT_PARAM_ERROR;
251 return RTAS_OUT_SUCCESS;
254 static void spapr_phb_vfio_class_init(ObjectClass *klass, void *data)
256 DeviceClass *dc = DEVICE_CLASS(klass);
257 sPAPRPHBClass *spc = SPAPR_PCI_HOST_BRIDGE_CLASS(klass);
259 dc->props = spapr_phb_vfio_properties;
260 dc->reset = spapr_phb_vfio_reset;
261 spc->finish_realize = spapr_phb_vfio_finish_realize;
262 spc->eeh_set_option = spapr_phb_vfio_eeh_set_option;
263 spc->eeh_get_state = spapr_phb_vfio_eeh_get_state;
264 spc->eeh_reset = spapr_phb_vfio_eeh_reset;
265 spc->eeh_configure = spapr_phb_vfio_eeh_configure;
268 static const TypeInfo spapr_phb_vfio_info = {
269 .name = TYPE_SPAPR_PCI_VFIO_HOST_BRIDGE,
270 .parent = TYPE_SPAPR_PCI_HOST_BRIDGE,
271 .instance_size = sizeof(sPAPRPHBVFIOState),
272 .class_init = spapr_phb_vfio_class_init,
273 .class_size = sizeof(sPAPRPHBClass),
276 static void spapr_pci_vfio_register_types(void)
278 type_register_static(&spapr_phb_vfio_info);
281 type_init(spapr_pci_vfio_register_types)