vfio/ccw: Allow the selection of a given iommu backend
[qemu/ar7.git] / hw / vfio / ccw.c
blobd2d58bb677cfad38a6965ea6a43783256407207b
1 /*
2 * vfio based subchannel assignment support
4 * Copyright 2017 IBM Corp.
5 * Copyright 2019 Red Hat, Inc.
7 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
8 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
9 * Pierre Morel <pmorel@linux.vnet.ibm.com>
10 * Cornelia Huck <cohuck@redhat.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or (at
13 * your option) any later version. See the COPYING file in the top-level
14 * directory.
17 #include "qemu/osdep.h"
18 #include CONFIG_DEVICES /* CONFIG_IOMMUFD */
19 #include <linux/vfio.h>
20 #include <linux/vfio_ccw.h>
21 #include <sys/ioctl.h>
23 #include "qapi/error.h"
24 #include "hw/vfio/vfio-common.h"
25 #include "sysemu/iommufd.h"
26 #include "hw/s390x/s390-ccw.h"
27 #include "hw/s390x/vfio-ccw.h"
28 #include "hw/qdev-properties.h"
29 #include "hw/s390x/ccw-device.h"
30 #include "exec/address-spaces.h"
31 #include "qemu/error-report.h"
32 #include "qemu/main-loop.h"
33 #include "qemu/module.h"
35 struct VFIOCCWDevice {
36 S390CCWDevice cdev;
37 VFIODevice vdev;
38 uint64_t io_region_size;
39 uint64_t io_region_offset;
40 struct ccw_io_region *io_region;
41 uint64_t async_cmd_region_size;
42 uint64_t async_cmd_region_offset;
43 struct ccw_cmd_region *async_cmd_region;
44 uint64_t schib_region_size;
45 uint64_t schib_region_offset;
46 struct ccw_schib_region *schib_region;
47 uint64_t crw_region_size;
48 uint64_t crw_region_offset;
49 struct ccw_crw_region *crw_region;
50 EventNotifier io_notifier;
51 EventNotifier crw_notifier;
52 EventNotifier req_notifier;
53 bool force_orb_pfch;
54 bool warned_orb_pfch;
57 static inline void warn_once_pfch(VFIOCCWDevice *vcdev, SubchDev *sch,
58 const char *msg)
60 warn_report_once_cond(&vcdev->warned_orb_pfch,
61 "vfio-ccw (devno %x.%x.%04x): %s",
62 sch->cssid, sch->ssid, sch->devno, msg);
65 static void vfio_ccw_compute_needs_reset(VFIODevice *vdev)
67 vdev->needs_reset = false;
71 * We don't need vfio_hot_reset_multi and vfio_eoi operations for
72 * vfio_ccw device now.
74 struct VFIODeviceOps vfio_ccw_ops = {
75 .vfio_compute_needs_reset = vfio_ccw_compute_needs_reset,
78 static IOInstEnding vfio_ccw_handle_request(SubchDev *sch)
80 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
81 struct ccw_io_region *region = vcdev->io_region;
82 int ret;
84 if (!(sch->orb.ctrl0 & ORB_CTRL0_MASK_PFCH) && vcdev->force_orb_pfch) {
85 sch->orb.ctrl0 |= ORB_CTRL0_MASK_PFCH;
86 warn_once_pfch(vcdev, sch, "PFCH flag forced");
89 QEMU_BUILD_BUG_ON(sizeof(region->orb_area) != sizeof(ORB));
90 QEMU_BUILD_BUG_ON(sizeof(region->scsw_area) != sizeof(SCSW));
91 QEMU_BUILD_BUG_ON(sizeof(region->irb_area) != sizeof(IRB));
93 memset(region, 0, sizeof(*region));
95 memcpy(region->orb_area, &sch->orb, sizeof(ORB));
96 memcpy(region->scsw_area, &sch->curr_status.scsw, sizeof(SCSW));
98 again:
99 ret = pwrite(vcdev->vdev.fd, region,
100 vcdev->io_region_size, vcdev->io_region_offset);
101 if (ret != vcdev->io_region_size) {
102 if (errno == EAGAIN) {
103 goto again;
105 error_report("vfio-ccw: write I/O region failed with errno=%d", errno);
106 ret = errno ? -errno : -EFAULT;
107 } else {
108 ret = 0;
110 switch (ret) {
111 case 0:
112 return IOINST_CC_EXPECTED;
113 case -EBUSY:
114 return IOINST_CC_BUSY;
115 case -ENODEV:
116 case -EACCES:
117 return IOINST_CC_NOT_OPERATIONAL;
118 case -EFAULT:
119 default:
120 sch_gen_unit_exception(sch);
121 css_inject_io_interrupt(sch);
122 return IOINST_CC_EXPECTED;
126 static IOInstEnding vfio_ccw_handle_store(SubchDev *sch)
128 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
129 SCHIB *schib = &sch->curr_status;
130 struct ccw_schib_region *region = vcdev->schib_region;
131 SCHIB *s;
132 int ret;
134 /* schib region not available so nothing else to do */
135 if (!region) {
136 return IOINST_CC_EXPECTED;
139 memset(region, 0, sizeof(*region));
140 ret = pread(vcdev->vdev.fd, region, vcdev->schib_region_size,
141 vcdev->schib_region_offset);
143 if (ret == -1) {
145 * Device is probably damaged, but store subchannel does not
146 * have a nonzero cc defined for this scenario. Log an error,
147 * and presume things are otherwise fine.
149 error_report("vfio-ccw: store region read failed with errno=%d", errno);
150 return IOINST_CC_EXPECTED;
154 * Selectively copy path-related bits of the SCHIB,
155 * rather than copying the entire struct.
157 s = (SCHIB *)region->schib_area;
158 schib->pmcw.pnom = s->pmcw.pnom;
159 schib->pmcw.lpum = s->pmcw.lpum;
160 schib->pmcw.pam = s->pmcw.pam;
161 schib->pmcw.pom = s->pmcw.pom;
163 if (s->scsw.flags & SCSW_FLAGS_MASK_PNO) {
164 schib->scsw.flags |= SCSW_FLAGS_MASK_PNO;
167 return IOINST_CC_EXPECTED;
170 static int vfio_ccw_handle_clear(SubchDev *sch)
172 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
173 struct ccw_cmd_region *region = vcdev->async_cmd_region;
174 int ret;
176 if (!vcdev->async_cmd_region) {
177 /* Async command region not available, fall back to emulation */
178 return -ENOSYS;
181 memset(region, 0, sizeof(*region));
182 region->command = VFIO_CCW_ASYNC_CMD_CSCH;
184 again:
185 ret = pwrite(vcdev->vdev.fd, region,
186 vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
187 if (ret != vcdev->async_cmd_region_size) {
188 if (errno == EAGAIN) {
189 goto again;
191 error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
192 ret = errno ? -errno : -EFAULT;
193 } else {
194 ret = 0;
196 switch (ret) {
197 case 0:
198 case -ENODEV:
199 case -EACCES:
200 return ret;
201 case -EFAULT:
202 default:
203 sch_gen_unit_exception(sch);
204 css_inject_io_interrupt(sch);
205 return 0;
209 static int vfio_ccw_handle_halt(SubchDev *sch)
211 VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
212 struct ccw_cmd_region *region = vcdev->async_cmd_region;
213 int ret;
215 if (!vcdev->async_cmd_region) {
216 /* Async command region not available, fall back to emulation */
217 return -ENOSYS;
220 memset(region, 0, sizeof(*region));
221 region->command = VFIO_CCW_ASYNC_CMD_HSCH;
223 again:
224 ret = pwrite(vcdev->vdev.fd, region,
225 vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset);
226 if (ret != vcdev->async_cmd_region_size) {
227 if (errno == EAGAIN) {
228 goto again;
230 error_report("vfio-ccw: write cmd region failed with errno=%d", errno);
231 ret = errno ? -errno : -EFAULT;
232 } else {
233 ret = 0;
235 switch (ret) {
236 case 0:
237 case -EBUSY:
238 case -ENODEV:
239 case -EACCES:
240 return ret;
241 case -EFAULT:
242 default:
243 sch_gen_unit_exception(sch);
244 css_inject_io_interrupt(sch);
245 return 0;
249 static void vfio_ccw_reset(DeviceState *dev)
251 VFIOCCWDevice *vcdev = VFIO_CCW(dev);
253 ioctl(vcdev->vdev.fd, VFIO_DEVICE_RESET);
256 static void vfio_ccw_crw_read(VFIOCCWDevice *vcdev)
258 struct ccw_crw_region *region = vcdev->crw_region;
259 CRW crw;
260 int size;
262 /* Keep reading CRWs as long as data is returned */
263 do {
264 memset(region, 0, sizeof(*region));
265 size = pread(vcdev->vdev.fd, region, vcdev->crw_region_size,
266 vcdev->crw_region_offset);
268 if (size == -1) {
269 error_report("vfio-ccw: Read crw region failed with errno=%d",
270 errno);
271 break;
274 if (region->crw == 0) {
275 /* No more CRWs to queue */
276 break;
279 memcpy(&crw, &region->crw, sizeof(CRW));
281 css_crw_add_to_queue(crw);
282 } while (1);
285 static void vfio_ccw_req_notifier_handler(void *opaque)
287 VFIOCCWDevice *vcdev = opaque;
288 Error *err = NULL;
290 if (!event_notifier_test_and_clear(&vcdev->req_notifier)) {
291 return;
294 qdev_unplug(DEVICE(vcdev), &err);
295 if (err) {
296 warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
300 static void vfio_ccw_crw_notifier_handler(void *opaque)
302 VFIOCCWDevice *vcdev = opaque;
304 while (event_notifier_test_and_clear(&vcdev->crw_notifier)) {
305 vfio_ccw_crw_read(vcdev);
309 static void vfio_ccw_io_notifier_handler(void *opaque)
311 VFIOCCWDevice *vcdev = opaque;
312 struct ccw_io_region *region = vcdev->io_region;
313 CcwDevice *ccw_dev = CCW_DEVICE(vcdev);
314 SubchDev *sch = ccw_dev->sch;
315 SCHIB *schib = &sch->curr_status;
316 SCSW s;
317 IRB irb;
318 ESW esw;
319 int size;
321 if (!event_notifier_test_and_clear(&vcdev->io_notifier)) {
322 return;
325 size = pread(vcdev->vdev.fd, region, vcdev->io_region_size,
326 vcdev->io_region_offset);
327 if (size == -1) {
328 switch (errno) {
329 case ENODEV:
330 /* Generate a deferred cc 3 condition. */
331 schib->scsw.flags |= SCSW_FLAGS_MASK_CC;
332 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
333 schib->scsw.ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
334 goto read_err;
335 case EFAULT:
336 /* Memory problem, generate channel data check. */
337 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
338 schib->scsw.cstat = SCSW_CSTAT_DATA_CHECK;
339 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
340 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
341 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
342 goto read_err;
343 default:
344 /* Error, generate channel program check. */
345 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
346 schib->scsw.cstat = SCSW_CSTAT_PROG_CHECK;
347 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
348 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
349 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
350 goto read_err;
352 } else if (size != vcdev->io_region_size) {
353 /* Information transfer error, generate channel-control check. */
354 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
355 schib->scsw.cstat = SCSW_CSTAT_CHN_CTRL_CHK;
356 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
357 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
358 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
359 goto read_err;
362 memcpy(&irb, region->irb_area, sizeof(IRB));
364 /* Update control block via irb. */
365 s = schib->scsw;
366 copy_scsw_to_guest(&s, &irb.scsw);
367 schib->scsw = s;
369 copy_esw_to_guest(&esw, &irb.esw);
370 sch->esw = esw;
372 /* If a uint check is pending, copy sense data. */
373 if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) &&
374 (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) {
375 memcpy(sch->sense_data, irb.ecw, sizeof(irb.ecw));
378 read_err:
379 css_inject_io_interrupt(sch);
382 static void vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev,
383 unsigned int irq,
384 Error **errp)
386 VFIODevice *vdev = &vcdev->vdev;
387 struct vfio_irq_info *irq_info;
388 size_t argsz;
389 int fd;
390 EventNotifier *notifier;
391 IOHandler *fd_read;
393 switch (irq) {
394 case VFIO_CCW_IO_IRQ_INDEX:
395 notifier = &vcdev->io_notifier;
396 fd_read = vfio_ccw_io_notifier_handler;
397 break;
398 case VFIO_CCW_CRW_IRQ_INDEX:
399 notifier = &vcdev->crw_notifier;
400 fd_read = vfio_ccw_crw_notifier_handler;
401 break;
402 case VFIO_CCW_REQ_IRQ_INDEX:
403 notifier = &vcdev->req_notifier;
404 fd_read = vfio_ccw_req_notifier_handler;
405 break;
406 default:
407 error_setg(errp, "vfio: Unsupported device irq(%d)", irq);
408 return;
411 if (vdev->num_irqs < irq + 1) {
412 error_setg(errp, "vfio: IRQ %u not available (number of irqs %u)",
413 irq, vdev->num_irqs);
414 return;
417 argsz = sizeof(*irq_info);
418 irq_info = g_malloc0(argsz);
419 irq_info->index = irq;
420 irq_info->argsz = argsz;
421 if (ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO,
422 irq_info) < 0 || irq_info->count < 1) {
423 error_setg_errno(errp, errno, "vfio: Error getting irq info");
424 goto out_free_info;
427 if (event_notifier_init(notifier, 0)) {
428 error_setg_errno(errp, errno,
429 "vfio: Unable to init event notifier for irq (%d)",
430 irq);
431 goto out_free_info;
434 fd = event_notifier_get_fd(notifier);
435 qemu_set_fd_handler(fd, fd_read, NULL, vcdev);
437 if (vfio_set_irq_signaling(vdev, irq, 0,
438 VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) {
439 qemu_set_fd_handler(fd, NULL, NULL, vcdev);
440 event_notifier_cleanup(notifier);
443 out_free_info:
444 g_free(irq_info);
447 static void vfio_ccw_unregister_irq_notifier(VFIOCCWDevice *vcdev,
448 unsigned int irq)
450 Error *err = NULL;
451 EventNotifier *notifier;
453 switch (irq) {
454 case VFIO_CCW_IO_IRQ_INDEX:
455 notifier = &vcdev->io_notifier;
456 break;
457 case VFIO_CCW_CRW_IRQ_INDEX:
458 notifier = &vcdev->crw_notifier;
459 break;
460 case VFIO_CCW_REQ_IRQ_INDEX:
461 notifier = &vcdev->req_notifier;
462 break;
463 default:
464 error_report("vfio: Unsupported device irq(%d)", irq);
465 return;
468 if (vfio_set_irq_signaling(&vcdev->vdev, irq, 0,
469 VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) {
470 warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name);
473 qemu_set_fd_handler(event_notifier_get_fd(notifier),
474 NULL, NULL, vcdev);
475 event_notifier_cleanup(notifier);
478 static void vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp)
480 VFIODevice *vdev = &vcdev->vdev;
481 struct vfio_region_info *info;
482 int ret;
484 /* Sanity check device */
485 if (!(vdev->flags & VFIO_DEVICE_FLAGS_CCW)) {
486 error_setg(errp, "vfio: Um, this isn't a vfio-ccw device");
487 return;
491 * We always expect at least the I/O region to be present. We also
492 * may have a variable number of regions governed by capabilities.
494 if (vdev->num_regions < VFIO_CCW_CONFIG_REGION_INDEX + 1) {
495 error_setg(errp, "vfio: too few regions (%u), expected at least %u",
496 vdev->num_regions, VFIO_CCW_CONFIG_REGION_INDEX + 1);
497 return;
500 ret = vfio_get_region_info(vdev, VFIO_CCW_CONFIG_REGION_INDEX, &info);
501 if (ret) {
502 error_setg_errno(errp, -ret, "vfio: Error getting config info");
503 return;
506 vcdev->io_region_size = info->size;
507 if (sizeof(*vcdev->io_region) != vcdev->io_region_size) {
508 error_setg(errp, "vfio: Unexpected size of the I/O region");
509 goto out_err;
512 vcdev->io_region_offset = info->offset;
513 vcdev->io_region = g_malloc0(info->size);
514 g_free(info);
516 /* check for the optional async command region */
517 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
518 VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD, &info);
519 if (!ret) {
520 vcdev->async_cmd_region_size = info->size;
521 if (sizeof(*vcdev->async_cmd_region) != vcdev->async_cmd_region_size) {
522 error_setg(errp, "vfio: Unexpected size of the async cmd region");
523 goto out_err;
525 vcdev->async_cmd_region_offset = info->offset;
526 vcdev->async_cmd_region = g_malloc0(info->size);
527 g_free(info);
530 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
531 VFIO_REGION_SUBTYPE_CCW_SCHIB, &info);
532 if (!ret) {
533 vcdev->schib_region_size = info->size;
534 if (sizeof(*vcdev->schib_region) != vcdev->schib_region_size) {
535 error_setg(errp, "vfio: Unexpected size of the schib region");
536 goto out_err;
538 vcdev->schib_region_offset = info->offset;
539 vcdev->schib_region = g_malloc(info->size);
540 g_free(info);
543 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW,
544 VFIO_REGION_SUBTYPE_CCW_CRW, &info);
546 if (!ret) {
547 vcdev->crw_region_size = info->size;
548 if (sizeof(*vcdev->crw_region) != vcdev->crw_region_size) {
549 error_setg(errp, "vfio: Unexpected size of the CRW region");
550 goto out_err;
552 vcdev->crw_region_offset = info->offset;
553 vcdev->crw_region = g_malloc(info->size);
554 g_free(info);
557 return;
559 out_err:
560 g_free(vcdev->crw_region);
561 g_free(vcdev->schib_region);
562 g_free(vcdev->async_cmd_region);
563 g_free(vcdev->io_region);
564 g_free(info);
565 return;
568 static void vfio_ccw_put_region(VFIOCCWDevice *vcdev)
570 g_free(vcdev->crw_region);
571 g_free(vcdev->schib_region);
572 g_free(vcdev->async_cmd_region);
573 g_free(vcdev->io_region);
576 static void vfio_ccw_realize(DeviceState *dev, Error **errp)
578 S390CCWDevice *cdev = S390_CCW_DEVICE(dev);
579 VFIOCCWDevice *vcdev = VFIO_CCW(cdev);
580 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
581 VFIODevice *vbasedev = &vcdev->vdev;
582 Error *err = NULL;
583 int ret;
585 /* Call the class init function for subchannel. */
586 if (cdc->realize) {
587 cdc->realize(cdev, vcdev->vdev.sysfsdev, &err);
588 if (err) {
589 goto out_err_propagate;
593 vbasedev->ops = &vfio_ccw_ops;
594 vbasedev->type = VFIO_DEVICE_TYPE_CCW;
595 vbasedev->name = g_strdup_printf("%x.%x.%04x", vcdev->cdev.hostid.cssid,
596 vcdev->cdev.hostid.ssid,
597 vcdev->cdev.hostid.devid);
598 vbasedev->dev = dev;
601 * All vfio-ccw devices are believed to operate in a way compatible with
602 * discarding of memory in RAM blocks, ie. pages pinned in the host are
603 * in the current working set of the guest driver and therefore never
604 * overlap e.g., with pages available to the guest balloon driver. This
605 * needs to be set before vfio_get_device() for vfio common to handle
606 * ram_block_discard_disable().
608 vbasedev->ram_block_discard_allowed = true;
610 ret = vfio_attach_device(cdev->mdevid, vbasedev,
611 &address_space_memory, errp);
612 if (ret) {
613 goto out_attach_dev_err;
616 vfio_ccw_get_region(vcdev, &err);
617 if (err) {
618 goto out_region_err;
621 vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX, &err);
622 if (err) {
623 goto out_io_notifier_err;
626 if (vcdev->crw_region) {
627 vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX, &err);
628 if (err) {
629 goto out_irq_notifier_err;
633 vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX, &err);
634 if (err) {
636 * Report this error, but do not make it a failing condition.
637 * Lack of this IRQ in the host does not prevent normal operation.
639 error_report_err(err);
642 return;
644 out_irq_notifier_err:
645 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX);
646 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX);
647 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX);
648 out_io_notifier_err:
649 vfio_ccw_put_region(vcdev);
650 out_region_err:
651 vfio_detach_device(vbasedev);
652 out_attach_dev_err:
653 g_free(vbasedev->name);
654 if (cdc->unrealize) {
655 cdc->unrealize(cdev);
657 out_err_propagate:
658 error_propagate(errp, err);
661 static void vfio_ccw_unrealize(DeviceState *dev)
663 S390CCWDevice *cdev = S390_CCW_DEVICE(dev);
664 VFIOCCWDevice *vcdev = VFIO_CCW(cdev);
665 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
667 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX);
668 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX);
669 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX);
670 vfio_ccw_put_region(vcdev);
671 vfio_detach_device(&vcdev->vdev);
672 g_free(vcdev->vdev.name);
674 if (cdc->unrealize) {
675 cdc->unrealize(cdev);
679 static Property vfio_ccw_properties[] = {
680 DEFINE_PROP_STRING("sysfsdev", VFIOCCWDevice, vdev.sysfsdev),
681 DEFINE_PROP_BOOL("force-orb-pfch", VFIOCCWDevice, force_orb_pfch, false),
682 #ifdef CONFIG_IOMMUFD
683 DEFINE_PROP_LINK("iommufd", VFIOCCWDevice, vdev.iommufd,
684 TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
685 #endif
686 DEFINE_PROP_END_OF_LIST(),
689 static const VMStateDescription vfio_ccw_vmstate = {
690 .name = "vfio-ccw",
691 .unmigratable = 1,
694 static void vfio_ccw_class_init(ObjectClass *klass, void *data)
696 DeviceClass *dc = DEVICE_CLASS(klass);
697 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass);
699 device_class_set_props(dc, vfio_ccw_properties);
700 dc->vmsd = &vfio_ccw_vmstate;
701 dc->desc = "VFIO-based subchannel assignment";
702 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
703 dc->realize = vfio_ccw_realize;
704 dc->unrealize = vfio_ccw_unrealize;
705 dc->reset = vfio_ccw_reset;
707 cdc->handle_request = vfio_ccw_handle_request;
708 cdc->handle_halt = vfio_ccw_handle_halt;
709 cdc->handle_clear = vfio_ccw_handle_clear;
710 cdc->handle_store = vfio_ccw_handle_store;
713 static const TypeInfo vfio_ccw_info = {
714 .name = TYPE_VFIO_CCW,
715 .parent = TYPE_S390_CCW,
716 .instance_size = sizeof(VFIOCCWDevice),
717 .class_init = vfio_ccw_class_init,
720 static void register_vfio_ccw_type(void)
722 type_register_static(&vfio_ccw_info);
725 type_init(register_vfio_ccw_type)