migration: do floating-point division
[qemu.git] / hw / ppc / spapr_pci_vfio.c
bloba61b41813184ef7613224a9d8ec901b7a3f3782f
1 /*
2 * QEMU sPAPR PCI host for VFIO
4 * Copyright (c) 2011-2014 Alexey Kardashevskiy, IBM Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License,
9 * or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "hw/ppc/spapr.h"
21 #include "hw/pci-host/spapr.h"
22 #include "hw/pci/msix.h"
23 #include "linux/vfio.h"
24 #include "hw/vfio/vfio.h"
26 static Property spapr_phb_vfio_properties[] = {
27 DEFINE_PROP_INT32("iommu", sPAPRPHBVFIOState, iommugroupid, -1),
28 DEFINE_PROP_END_OF_LIST(),
31 static void spapr_phb_vfio_finish_realize(sPAPRPHBState *sphb, Error **errp)
33 sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb);
34 struct vfio_iommu_spapr_tce_info info = { .argsz = sizeof(info) };
35 int ret;
36 sPAPRTCETable *tcet;
37 uint32_t liobn = svphb->phb.dma_liobn;
39 if (svphb->iommugroupid == -1) {
40 error_setg(errp, "Wrong IOMMU group ID %d", svphb->iommugroupid);
41 return;
44 ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
45 VFIO_CHECK_EXTENSION,
46 (void *) VFIO_SPAPR_TCE_IOMMU);
47 if (ret != 1) {
48 error_setg_errno(errp, -ret,
49 "spapr-vfio: SPAPR extension is not supported");
50 return;
53 ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
54 VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
55 if (ret) {
56 error_setg_errno(errp, -ret,
57 "spapr-vfio: get info from container failed");
58 return;
61 tcet = spapr_tce_new_table(DEVICE(sphb), liobn, info.dma32_window_start,
62 SPAPR_TCE_PAGE_SHIFT,
63 info.dma32_window_size >> SPAPR_TCE_PAGE_SHIFT,
64 true);
65 if (!tcet) {
66 error_setg(errp, "spapr-vfio: failed to create VFIO TCE table");
67 return;
70 /* Register default 32bit DMA window */
71 memory_region_add_subregion(&sphb->iommu_root, tcet->bus_offset,
72 spapr_tce_get_iommu(tcet));
75 static void spapr_phb_vfio_eeh_reenable(sPAPRPHBVFIOState *svphb)
77 struct vfio_eeh_pe_op op = {
78 .argsz = sizeof(op),
79 .op = VFIO_EEH_PE_ENABLE
82 vfio_container_ioctl(&svphb->phb.iommu_as,
83 svphb->iommugroupid, VFIO_EEH_PE_OP, &op);
86 static void spapr_phb_vfio_reset(DeviceState *qdev)
89 * The PE might be in frozen state. To reenable the EEH
90 * functionality on it will clean the frozen state, which
91 * ensures that the contained PCI devices will work properly
92 * after reboot.
94 spapr_phb_vfio_eeh_reenable(SPAPR_PCI_VFIO_HOST_BRIDGE(qdev));
97 static int spapr_phb_vfio_eeh_set_option(sPAPRPHBState *sphb,
98 unsigned int addr, int option)
100 sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb);
101 struct vfio_eeh_pe_op op = { .argsz = sizeof(op) };
102 int ret;
104 switch (option) {
105 case RTAS_EEH_DISABLE:
106 op.op = VFIO_EEH_PE_DISABLE;
107 break;
108 case RTAS_EEH_ENABLE: {
109 PCIHostState *phb;
110 PCIDevice *pdev;
113 * The EEH functionality is enabled on basis of PCI device,
114 * instead of PE. We need check the validity of the PCI
115 * device address.
117 phb = PCI_HOST_BRIDGE(sphb);
118 pdev = pci_find_device(phb->bus,
119 (addr >> 16) & 0xFF, (addr >> 8) & 0xFF);
120 if (!pdev || !object_dynamic_cast(OBJECT(pdev), "vfio-pci")) {
121 return RTAS_OUT_PARAM_ERROR;
124 op.op = VFIO_EEH_PE_ENABLE;
125 break;
127 case RTAS_EEH_THAW_IO:
128 op.op = VFIO_EEH_PE_UNFREEZE_IO;
129 break;
130 case RTAS_EEH_THAW_DMA:
131 op.op = VFIO_EEH_PE_UNFREEZE_DMA;
132 break;
133 default:
134 return RTAS_OUT_PARAM_ERROR;
137 ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
138 VFIO_EEH_PE_OP, &op);
139 if (ret < 0) {
140 return RTAS_OUT_HW_ERROR;
143 return RTAS_OUT_SUCCESS;
146 static int spapr_phb_vfio_eeh_get_state(sPAPRPHBState *sphb, int *state)
148 sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb);
149 struct vfio_eeh_pe_op op = { .argsz = sizeof(op) };
150 int ret;
152 op.op = VFIO_EEH_PE_GET_STATE;
153 ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
154 VFIO_EEH_PE_OP, &op);
155 if (ret < 0) {
156 return RTAS_OUT_PARAM_ERROR;
159 *state = ret;
160 return RTAS_OUT_SUCCESS;
163 static void spapr_phb_vfio_eeh_clear_dev_msix(PCIBus *bus,
164 PCIDevice *pdev,
165 void *opaque)
167 /* Check if the device is VFIO PCI device */
168 if (!object_dynamic_cast(OBJECT(pdev), "vfio-pci")) {
169 return;
173 * The MSIx table will be cleaned out by reset. We need
174 * disable it so that it can be reenabled properly. Also,
175 * the cached MSIx table should be cleared as it's not
176 * reflecting the contents in hardware.
178 if (msix_enabled(pdev)) {
179 uint16_t flags;
181 flags = pci_host_config_read_common(pdev,
182 pdev->msix_cap + PCI_MSIX_FLAGS,
183 pci_config_size(pdev), 2);
184 flags &= ~PCI_MSIX_FLAGS_ENABLE;
185 pci_host_config_write_common(pdev,
186 pdev->msix_cap + PCI_MSIX_FLAGS,
187 pci_config_size(pdev), flags, 2);
190 msix_reset(pdev);
193 static void spapr_phb_vfio_eeh_clear_bus_msix(PCIBus *bus, void *opaque)
195 pci_for_each_device(bus, pci_bus_num(bus),
196 spapr_phb_vfio_eeh_clear_dev_msix, NULL);
199 static void spapr_phb_vfio_eeh_pre_reset(sPAPRPHBState *sphb)
201 PCIHostState *phb = PCI_HOST_BRIDGE(sphb);
203 pci_for_each_bus(phb->bus, spapr_phb_vfio_eeh_clear_bus_msix, NULL);
206 static int spapr_phb_vfio_eeh_reset(sPAPRPHBState *sphb, int option)
208 sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb);
209 struct vfio_eeh_pe_op op = { .argsz = sizeof(op) };
210 int ret;
212 switch (option) {
213 case RTAS_SLOT_RESET_DEACTIVATE:
214 op.op = VFIO_EEH_PE_RESET_DEACTIVATE;
215 break;
216 case RTAS_SLOT_RESET_HOT:
217 spapr_phb_vfio_eeh_pre_reset(sphb);
218 op.op = VFIO_EEH_PE_RESET_HOT;
219 break;
220 case RTAS_SLOT_RESET_FUNDAMENTAL:
221 spapr_phb_vfio_eeh_pre_reset(sphb);
222 op.op = VFIO_EEH_PE_RESET_FUNDAMENTAL;
223 break;
224 default:
225 return RTAS_OUT_PARAM_ERROR;
228 ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
229 VFIO_EEH_PE_OP, &op);
230 if (ret < 0) {
231 return RTAS_OUT_HW_ERROR;
234 return RTAS_OUT_SUCCESS;
237 static int spapr_phb_vfio_eeh_configure(sPAPRPHBState *sphb)
239 sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb);
240 struct vfio_eeh_pe_op op = { .argsz = sizeof(op) };
241 int ret;
243 op.op = VFIO_EEH_PE_CONFIGURE;
244 ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
245 VFIO_EEH_PE_OP, &op);
246 if (ret < 0) {
247 return RTAS_OUT_PARAM_ERROR;
250 return RTAS_OUT_SUCCESS;
253 static void spapr_phb_vfio_class_init(ObjectClass *klass, void *data)
255 DeviceClass *dc = DEVICE_CLASS(klass);
256 sPAPRPHBClass *spc = SPAPR_PCI_HOST_BRIDGE_CLASS(klass);
258 dc->props = spapr_phb_vfio_properties;
259 dc->reset = spapr_phb_vfio_reset;
260 spc->finish_realize = spapr_phb_vfio_finish_realize;
261 spc->eeh_set_option = spapr_phb_vfio_eeh_set_option;
262 spc->eeh_get_state = spapr_phb_vfio_eeh_get_state;
263 spc->eeh_reset = spapr_phb_vfio_eeh_reset;
264 spc->eeh_configure = spapr_phb_vfio_eeh_configure;
267 static const TypeInfo spapr_phb_vfio_info = {
268 .name = TYPE_SPAPR_PCI_VFIO_HOST_BRIDGE,
269 .parent = TYPE_SPAPR_PCI_HOST_BRIDGE,
270 .instance_size = sizeof(sPAPRPHBVFIOState),
271 .class_init = spapr_phb_vfio_class_init,
272 .class_size = sizeof(sPAPRPHBClass),
275 static void spapr_pci_vfio_register_types(void)
277 type_register_static(&spapr_phb_vfio_info);
280 type_init(spapr_pci_vfio_register_types)