target/mips: Make check_cp0_enabled() return a boolean
[qemu/ar7.git] / hw / s390x / virtio-ccw.c
blob8195f3546e438f556cdd1c7de78fb93a635ecc25
1 /*
2 * virtio ccw target implementation
4 * Copyright 2012,2015 IBM Corp.
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Pierre Morel <pmorel@linux.vnet.ibm.com>
8 * This work is licensed under the terms of the GNU GPL, version 2 or (at
9 * your option) any later version. See the COPYING file in the top-level
10 * directory.
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "sysemu/kvm.h"
16 #include "net/net.h"
17 #include "hw/virtio/virtio.h"
18 #include "migration/qemu-file-types.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "hw/sysbus.h"
21 #include "qemu/bitops.h"
22 #include "qemu/error-report.h"
23 #include "qemu/module.h"
24 #include "hw/virtio/virtio-access.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/s390x/adapter.h"
27 #include "hw/s390x/s390_flic.h"
29 #include "hw/s390x/ioinst.h"
30 #include "hw/s390x/css.h"
31 #include "virtio-ccw.h"
32 #include "trace.h"
33 #include "hw/s390x/css-bridge.h"
34 #include "hw/s390x/s390-virtio-ccw.h"
36 #define NR_CLASSIC_INDICATOR_BITS 64
38 bool have_virtio_ccw = true;
40 static int virtio_ccw_dev_post_load(void *opaque, int version_id)
42 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(opaque);
43 CcwDevice *ccw_dev = CCW_DEVICE(dev);
44 CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev);
46 ccw_dev->sch->driver_data = dev;
47 if (ccw_dev->sch->thinint_active) {
48 dev->routes.adapter.adapter_id = css_get_adapter_id(
49 CSS_IO_ADAPTER_VIRTIO,
50 dev->thinint_isc);
52 /* Re-fill subch_id after loading the subchannel states.*/
53 if (ck->refill_ids) {
54 ck->refill_ids(ccw_dev);
56 return 0;
59 typedef struct VirtioCcwDeviceTmp {
60 VirtioCcwDevice *parent;
61 uint16_t config_vector;
62 } VirtioCcwDeviceTmp;
64 static int virtio_ccw_dev_tmp_pre_save(void *opaque)
66 VirtioCcwDeviceTmp *tmp = opaque;
67 VirtioCcwDevice *dev = tmp->parent;
68 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
70 tmp->config_vector = vdev->config_vector;
72 return 0;
75 static int virtio_ccw_dev_tmp_post_load(void *opaque, int version_id)
77 VirtioCcwDeviceTmp *tmp = opaque;
78 VirtioCcwDevice *dev = tmp->parent;
79 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
81 vdev->config_vector = tmp->config_vector;
82 return 0;
85 const VMStateDescription vmstate_virtio_ccw_dev_tmp = {
86 .name = "s390_virtio_ccw_dev_tmp",
87 .pre_save = virtio_ccw_dev_tmp_pre_save,
88 .post_load = virtio_ccw_dev_tmp_post_load,
89 .fields = (VMStateField[]) {
90 VMSTATE_UINT16(config_vector, VirtioCcwDeviceTmp),
91 VMSTATE_END_OF_LIST()
95 const VMStateDescription vmstate_virtio_ccw_dev = {
96 .name = "s390_virtio_ccw_dev",
97 .version_id = 1,
98 .minimum_version_id = 1,
99 .post_load = virtio_ccw_dev_post_load,
100 .fields = (VMStateField[]) {
101 VMSTATE_CCW_DEVICE(parent_obj, VirtioCcwDevice),
102 VMSTATE_PTR_TO_IND_ADDR(indicators, VirtioCcwDevice),
103 VMSTATE_PTR_TO_IND_ADDR(indicators2, VirtioCcwDevice),
104 VMSTATE_PTR_TO_IND_ADDR(summary_indicator, VirtioCcwDevice),
106 * Ugly hack because VirtIODevice does not migrate itself.
107 * This also makes legacy via vmstate_save_state possible.
109 VMSTATE_WITH_TMP(VirtioCcwDevice, VirtioCcwDeviceTmp,
110 vmstate_virtio_ccw_dev_tmp),
111 VMSTATE_STRUCT(routes, VirtioCcwDevice, 1, vmstate_adapter_routes,
112 AdapterRoutes),
113 VMSTATE_UINT8(thinint_isc, VirtioCcwDevice),
114 VMSTATE_INT32(revision, VirtioCcwDevice),
115 VMSTATE_END_OF_LIST()
119 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
120 VirtioCcwDevice *dev);
122 VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch)
124 VirtIODevice *vdev = NULL;
125 VirtioCcwDevice *dev = sch->driver_data;
127 if (dev) {
128 vdev = virtio_bus_get_device(&dev->bus);
130 return vdev;
133 static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
135 virtio_bus_start_ioeventfd(&dev->bus);
138 static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
140 virtio_bus_stop_ioeventfd(&dev->bus);
143 static bool virtio_ccw_ioeventfd_enabled(DeviceState *d)
145 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
147 return (dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) != 0;
150 static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
151 int n, bool assign)
153 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
154 CcwDevice *ccw_dev = CCW_DEVICE(dev);
155 SubchDev *sch = ccw_dev->sch;
156 uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid;
158 return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
161 /* Communication blocks used by several channel commands. */
162 typedef struct VqInfoBlockLegacy {
163 uint64_t queue;
164 uint32_t align;
165 uint16_t index;
166 uint16_t num;
167 } QEMU_PACKED VqInfoBlockLegacy;
169 typedef struct VqInfoBlock {
170 uint64_t desc;
171 uint32_t res0;
172 uint16_t index;
173 uint16_t num;
174 uint64_t avail;
175 uint64_t used;
176 } QEMU_PACKED VqInfoBlock;
178 typedef struct VqConfigBlock {
179 uint16_t index;
180 uint16_t num_max;
181 } QEMU_PACKED VqConfigBlock;
183 typedef struct VirtioFeatDesc {
184 uint32_t features;
185 uint8_t index;
186 } QEMU_PACKED VirtioFeatDesc;
188 typedef struct VirtioThinintInfo {
189 hwaddr summary_indicator;
190 hwaddr device_indicator;
191 uint64_t ind_bit;
192 uint8_t isc;
193 } QEMU_PACKED VirtioThinintInfo;
195 typedef struct VirtioRevInfo {
196 uint16_t revision;
197 uint16_t length;
198 uint8_t data[];
199 } QEMU_PACKED VirtioRevInfo;
201 /* Specify where the virtqueues for the subchannel are in guest memory. */
202 static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info,
203 VqInfoBlockLegacy *linfo)
205 VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
206 uint16_t index = info ? info->index : linfo->index;
207 uint16_t num = info ? info->num : linfo->num;
208 uint64_t desc = info ? info->desc : linfo->queue;
210 if (index >= VIRTIO_QUEUE_MAX) {
211 return -EINVAL;
214 /* Current code in virtio.c relies on 4K alignment. */
215 if (linfo && desc && (linfo->align != 4096)) {
216 return -EINVAL;
219 if (!vdev) {
220 return -EINVAL;
223 if (info) {
224 virtio_queue_set_rings(vdev, index, desc, info->avail, info->used);
225 } else {
226 virtio_queue_set_addr(vdev, index, desc);
228 if (!desc) {
229 virtio_queue_set_vector(vdev, index, VIRTIO_NO_VECTOR);
230 } else {
231 if (info) {
232 /* virtio-1 allows changing the ring size. */
233 if (virtio_queue_get_max_num(vdev, index) < num) {
234 /* Fail if we exceed the maximum number. */
235 return -EINVAL;
237 virtio_queue_set_num(vdev, index, num);
238 } else if (virtio_queue_get_num(vdev, index) > num) {
239 /* Fail if we don't have a big enough queue. */
240 return -EINVAL;
242 /* We ignore possible increased num for legacy for compatibility. */
243 virtio_queue_set_vector(vdev, index, index);
245 /* tell notify handler in case of config change */
246 vdev->config_vector = VIRTIO_QUEUE_MAX;
247 return 0;
250 static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev, VirtIODevice *vdev)
252 CcwDevice *ccw_dev = CCW_DEVICE(dev);
254 virtio_ccw_stop_ioeventfd(dev);
255 virtio_reset(vdev);
256 if (dev->indicators) {
257 release_indicator(&dev->routes.adapter, dev->indicators);
258 dev->indicators = NULL;
260 if (dev->indicators2) {
261 release_indicator(&dev->routes.adapter, dev->indicators2);
262 dev->indicators2 = NULL;
264 if (dev->summary_indicator) {
265 release_indicator(&dev->routes.adapter, dev->summary_indicator);
266 dev->summary_indicator = NULL;
268 ccw_dev->sch->thinint_active = false;
271 static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len,
272 bool is_legacy)
274 int ret;
275 VqInfoBlock info;
276 VqInfoBlockLegacy linfo;
277 size_t info_len = is_legacy ? sizeof(linfo) : sizeof(info);
279 if (check_len) {
280 if (ccw.count != info_len) {
281 return -EINVAL;
283 } else if (ccw.count < info_len) {
284 /* Can't execute command. */
285 return -EINVAL;
287 if (!ccw.cda) {
288 return -EFAULT;
290 if (is_legacy) {
291 ret = ccw_dstream_read(&sch->cds, linfo);
292 if (ret) {
293 return ret;
295 linfo.queue = be64_to_cpu(linfo.queue);
296 linfo.align = be32_to_cpu(linfo.align);
297 linfo.index = be16_to_cpu(linfo.index);
298 linfo.num = be16_to_cpu(linfo.num);
299 ret = virtio_ccw_set_vqs(sch, NULL, &linfo);
300 } else {
301 ret = ccw_dstream_read(&sch->cds, info);
302 if (ret) {
303 return ret;
305 info.desc = be64_to_cpu(info.desc);
306 info.index = be16_to_cpu(info.index);
307 info.num = be16_to_cpu(info.num);
308 info.avail = be64_to_cpu(info.avail);
309 info.used = be64_to_cpu(info.used);
310 ret = virtio_ccw_set_vqs(sch, &info, NULL);
312 sch->curr_status.scsw.count = 0;
313 return ret;
316 static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
318 int ret;
319 VirtioRevInfo revinfo;
320 uint8_t status;
321 VirtioFeatDesc features;
322 hwaddr indicators;
323 VqConfigBlock vq_config;
324 VirtioCcwDevice *dev = sch->driver_data;
325 VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
326 bool check_len;
327 int len;
328 VirtioThinintInfo thinint;
330 if (!dev) {
331 return -EINVAL;
334 trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid,
335 ccw.cmd_code);
336 check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
338 if (dev->revision < 0 && ccw.cmd_code != CCW_CMD_SET_VIRTIO_REV) {
339 if (dev->force_revision_1) {
341 * virtio-1 drivers must start with negotiating to a revision >= 1,
342 * so post a command reject for all other commands
344 return -ENOSYS;
345 } else {
347 * If the driver issues any command that is not SET_VIRTIO_REV,
348 * we'll have to operate the device in legacy mode.
350 dev->revision = 0;
354 /* Look at the command. */
355 switch (ccw.cmd_code) {
356 case CCW_CMD_SET_VQ:
357 ret = virtio_ccw_handle_set_vq(sch, ccw, check_len, dev->revision < 1);
358 break;
359 case CCW_CMD_VDEV_RESET:
360 virtio_ccw_reset_virtio(dev, vdev);
361 ret = 0;
362 break;
363 case CCW_CMD_READ_FEAT:
364 if (check_len) {
365 if (ccw.count != sizeof(features)) {
366 ret = -EINVAL;
367 break;
369 } else if (ccw.count < sizeof(features)) {
370 /* Can't execute command. */
371 ret = -EINVAL;
372 break;
374 if (!ccw.cda) {
375 ret = -EFAULT;
376 } else {
377 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
379 ccw_dstream_advance(&sch->cds, sizeof(features.features));
380 ret = ccw_dstream_read(&sch->cds, features.index);
381 if (ret) {
382 break;
384 if (features.index == 0) {
385 if (dev->revision >= 1) {
386 /* Don't offer legacy features for modern devices. */
387 features.features = (uint32_t)
388 (vdev->host_features & ~vdc->legacy_features);
389 } else {
390 features.features = (uint32_t)vdev->host_features;
392 } else if ((features.index == 1) && (dev->revision >= 1)) {
394 * Only offer feature bits beyond 31 if the guest has
395 * negotiated at least revision 1.
397 features.features = (uint32_t)(vdev->host_features >> 32);
398 } else {
399 /* Return zeroes if the guest supports more feature bits. */
400 features.features = 0;
402 ccw_dstream_rewind(&sch->cds);
403 features.features = cpu_to_le32(features.features);
404 ret = ccw_dstream_write(&sch->cds, features.features);
405 if (!ret) {
406 sch->curr_status.scsw.count = ccw.count - sizeof(features);
409 break;
410 case CCW_CMD_WRITE_FEAT:
411 if (check_len) {
412 if (ccw.count != sizeof(features)) {
413 ret = -EINVAL;
414 break;
416 } else if (ccw.count < sizeof(features)) {
417 /* Can't execute command. */
418 ret = -EINVAL;
419 break;
421 if (!ccw.cda) {
422 ret = -EFAULT;
423 } else {
424 ret = ccw_dstream_read(&sch->cds, features);
425 if (ret) {
426 break;
428 features.features = le32_to_cpu(features.features);
429 if (features.index == 0) {
430 virtio_set_features(vdev,
431 (vdev->guest_features & 0xffffffff00000000ULL) |
432 features.features);
433 } else if ((features.index == 1) && (dev->revision >= 1)) {
435 * If the guest did not negotiate at least revision 1,
436 * we did not offer it any feature bits beyond 31. Such a
437 * guest passing us any bit here is therefore buggy.
439 virtio_set_features(vdev,
440 (vdev->guest_features & 0x00000000ffffffffULL) |
441 ((uint64_t)features.features << 32));
442 } else {
444 * If the guest supports more feature bits, assert that it
445 * passes us zeroes for those we don't support.
447 if (features.features) {
448 qemu_log_mask(LOG_GUEST_ERROR,
449 "Guest bug: features[%i]=%x (expected 0)",
450 features.index, features.features);
451 /* XXX: do a unit check here? */
454 sch->curr_status.scsw.count = ccw.count - sizeof(features);
455 ret = 0;
457 break;
458 case CCW_CMD_READ_CONF:
459 if (check_len) {
460 if (ccw.count > vdev->config_len) {
461 ret = -EINVAL;
462 break;
465 len = MIN(ccw.count, vdev->config_len);
466 if (!ccw.cda) {
467 ret = -EFAULT;
468 } else {
469 virtio_bus_get_vdev_config(&dev->bus, vdev->config);
470 ret = ccw_dstream_write_buf(&sch->cds, vdev->config, len);
471 if (ret) {
472 sch->curr_status.scsw.count = ccw.count - len;
475 break;
476 case CCW_CMD_WRITE_CONF:
477 if (check_len) {
478 if (ccw.count > vdev->config_len) {
479 ret = -EINVAL;
480 break;
483 len = MIN(ccw.count, vdev->config_len);
484 if (!ccw.cda) {
485 ret = -EFAULT;
486 } else {
487 ret = ccw_dstream_read_buf(&sch->cds, vdev->config, len);
488 if (!ret) {
489 virtio_bus_set_vdev_config(&dev->bus, vdev->config);
490 sch->curr_status.scsw.count = ccw.count - len;
493 break;
494 case CCW_CMD_READ_STATUS:
495 if (check_len) {
496 if (ccw.count != sizeof(status)) {
497 ret = -EINVAL;
498 break;
500 } else if (ccw.count < sizeof(status)) {
501 /* Can't execute command. */
502 ret = -EINVAL;
503 break;
505 if (!ccw.cda) {
506 ret = -EFAULT;
507 } else {
508 address_space_stb(&address_space_memory, ccw.cda, vdev->status,
509 MEMTXATTRS_UNSPECIFIED, NULL);
510 sch->curr_status.scsw.count = ccw.count - sizeof(vdev->status);
511 ret = 0;
513 break;
514 case CCW_CMD_WRITE_STATUS:
515 if (check_len) {
516 if (ccw.count != sizeof(status)) {
517 ret = -EINVAL;
518 break;
520 } else if (ccw.count < sizeof(status)) {
521 /* Can't execute command. */
522 ret = -EINVAL;
523 break;
525 if (!ccw.cda) {
526 ret = -EFAULT;
527 } else {
528 ret = ccw_dstream_read(&sch->cds, status);
529 if (ret) {
530 break;
532 if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
533 virtio_ccw_stop_ioeventfd(dev);
535 if (virtio_set_status(vdev, status) == 0) {
536 if (vdev->status == 0) {
537 virtio_ccw_reset_virtio(dev, vdev);
539 if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
540 virtio_ccw_start_ioeventfd(dev);
542 sch->curr_status.scsw.count = ccw.count - sizeof(status);
543 ret = 0;
544 } else {
545 /* Trigger a command reject. */
546 ret = -ENOSYS;
549 break;
550 case CCW_CMD_SET_IND:
551 if (check_len) {
552 if (ccw.count != sizeof(indicators)) {
553 ret = -EINVAL;
554 break;
556 } else if (ccw.count < sizeof(indicators)) {
557 /* Can't execute command. */
558 ret = -EINVAL;
559 break;
561 if (sch->thinint_active) {
562 /* Trigger a command reject. */
563 ret = -ENOSYS;
564 break;
566 if (virtio_get_num_queues(vdev) > NR_CLASSIC_INDICATOR_BITS) {
567 /* More queues than indicator bits --> trigger a reject */
568 ret = -ENOSYS;
569 break;
571 if (!ccw.cda) {
572 ret = -EFAULT;
573 } else {
574 ret = ccw_dstream_read(&sch->cds, indicators);
575 if (ret) {
576 break;
578 indicators = be64_to_cpu(indicators);
579 dev->indicators = get_indicator(indicators, sizeof(uint64_t));
580 sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
581 ret = 0;
583 break;
584 case CCW_CMD_SET_CONF_IND:
585 if (check_len) {
586 if (ccw.count != sizeof(indicators)) {
587 ret = -EINVAL;
588 break;
590 } else if (ccw.count < sizeof(indicators)) {
591 /* Can't execute command. */
592 ret = -EINVAL;
593 break;
595 if (!ccw.cda) {
596 ret = -EFAULT;
597 } else {
598 ret = ccw_dstream_read(&sch->cds, indicators);
599 if (ret) {
600 break;
602 indicators = be64_to_cpu(indicators);
603 dev->indicators2 = get_indicator(indicators, sizeof(uint64_t));
604 sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
605 ret = 0;
607 break;
608 case CCW_CMD_READ_VQ_CONF:
609 if (check_len) {
610 if (ccw.count != sizeof(vq_config)) {
611 ret = -EINVAL;
612 break;
614 } else if (ccw.count < sizeof(vq_config)) {
615 /* Can't execute command. */
616 ret = -EINVAL;
617 break;
619 if (!ccw.cda) {
620 ret = -EFAULT;
621 } else {
622 ret = ccw_dstream_read(&sch->cds, vq_config.index);
623 if (ret) {
624 break;
626 vq_config.index = be16_to_cpu(vq_config.index);
627 if (vq_config.index >= VIRTIO_QUEUE_MAX) {
628 ret = -EINVAL;
629 break;
631 vq_config.num_max = virtio_queue_get_num(vdev,
632 vq_config.index);
633 vq_config.num_max = cpu_to_be16(vq_config.num_max);
634 ret = ccw_dstream_write(&sch->cds, vq_config.num_max);
635 if (!ret) {
636 sch->curr_status.scsw.count = ccw.count - sizeof(vq_config);
639 break;
640 case CCW_CMD_SET_IND_ADAPTER:
641 if (check_len) {
642 if (ccw.count != sizeof(thinint)) {
643 ret = -EINVAL;
644 break;
646 } else if (ccw.count < sizeof(thinint)) {
647 /* Can't execute command. */
648 ret = -EINVAL;
649 break;
651 if (!ccw.cda) {
652 ret = -EFAULT;
653 } else if (dev->indicators && !sch->thinint_active) {
654 /* Trigger a command reject. */
655 ret = -ENOSYS;
656 } else {
657 if (ccw_dstream_read(&sch->cds, thinint)) {
658 ret = -EFAULT;
659 } else {
660 thinint.ind_bit = be64_to_cpu(thinint.ind_bit);
661 thinint.summary_indicator =
662 be64_to_cpu(thinint.summary_indicator);
663 thinint.device_indicator =
664 be64_to_cpu(thinint.device_indicator);
666 dev->summary_indicator =
667 get_indicator(thinint.summary_indicator, sizeof(uint8_t));
668 dev->indicators =
669 get_indicator(thinint.device_indicator,
670 thinint.ind_bit / 8 + 1);
671 dev->thinint_isc = thinint.isc;
672 dev->routes.adapter.ind_offset = thinint.ind_bit;
673 dev->routes.adapter.summary_offset = 7;
674 dev->routes.adapter.adapter_id = css_get_adapter_id(
675 CSS_IO_ADAPTER_VIRTIO,
676 dev->thinint_isc);
677 sch->thinint_active = ((dev->indicators != NULL) &&
678 (dev->summary_indicator != NULL));
679 sch->curr_status.scsw.count = ccw.count - sizeof(thinint);
680 ret = 0;
683 break;
684 case CCW_CMD_SET_VIRTIO_REV:
685 len = sizeof(revinfo);
686 if (ccw.count < len) {
687 ret = -EINVAL;
688 break;
690 if (!ccw.cda) {
691 ret = -EFAULT;
692 break;
694 ret = ccw_dstream_read_buf(&sch->cds, &revinfo, 4);
695 if (ret < 0) {
696 break;
698 revinfo.revision = be16_to_cpu(revinfo.revision);
699 revinfo.length = be16_to_cpu(revinfo.length);
700 if (ccw.count < len + revinfo.length ||
701 (check_len && ccw.count > len + revinfo.length)) {
702 ret = -EINVAL;
703 break;
706 * Once we start to support revisions with additional data, we'll
707 * need to fetch it here. Nothing to do for now, though.
709 if (dev->revision >= 0 ||
710 revinfo.revision > virtio_ccw_rev_max(dev) ||
711 (dev->force_revision_1 && !revinfo.revision)) {
712 ret = -ENOSYS;
713 break;
715 ret = 0;
716 dev->revision = revinfo.revision;
717 break;
718 default:
719 ret = -ENOSYS;
720 break;
722 return ret;
725 static void virtio_sch_disable_cb(SubchDev *sch)
727 VirtioCcwDevice *dev = sch->driver_data;
729 dev->revision = -1;
732 static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp)
734 VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
735 CcwDevice *ccw_dev = CCW_DEVICE(dev);
736 CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev);
737 SubchDev *sch;
738 Error *err = NULL;
739 int i;
741 sch = css_create_sch(ccw_dev->devno, errp);
742 if (!sch) {
743 return;
745 if (!virtio_ccw_rev_max(dev) && dev->force_revision_1) {
746 error_setg(&err, "Invalid value of property max_rev "
747 "(is %d expected >= 1)", virtio_ccw_rev_max(dev));
748 goto out_err;
751 sch->driver_data = dev;
752 sch->ccw_cb = virtio_ccw_cb;
753 sch->disable_cb = virtio_sch_disable_cb;
754 sch->id.reserved = 0xff;
755 sch->id.cu_type = VIRTIO_CCW_CU_TYPE;
756 sch->do_subchannel_work = do_subchannel_work_virtual;
757 ccw_dev->sch = sch;
758 dev->indicators = NULL;
759 dev->revision = -1;
760 for (i = 0; i < ADAPTER_ROUTES_MAX_GSI; i++) {
761 dev->routes.gsi[i] = -1;
763 css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE);
765 trace_virtio_ccw_new_device(
766 sch->cssid, sch->ssid, sch->schid, sch->devno,
767 ccw_dev->devno.valid ? "user-configured" : "auto-configured");
769 if (kvm_enabled() && !kvm_eventfds_enabled()) {
770 dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
773 if (k->realize) {
774 k->realize(dev, &err);
775 if (err) {
776 goto out_err;
780 ck->realize(ccw_dev, &err);
781 if (err) {
782 goto out_err;
785 return;
787 out_err:
788 error_propagate(errp, err);
789 css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL);
790 ccw_dev->sch = NULL;
791 g_free(sch);
794 static void virtio_ccw_device_unrealize(VirtioCcwDevice *dev)
796 VirtIOCCWDeviceClass *dc = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
797 CcwDevice *ccw_dev = CCW_DEVICE(dev);
798 SubchDev *sch = ccw_dev->sch;
800 if (dc->unrealize) {
801 dc->unrealize(dev);
804 if (sch) {
805 css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL);
806 g_free(sch);
807 ccw_dev->sch = NULL;
809 if (dev->indicators) {
810 release_indicator(&dev->routes.adapter, dev->indicators);
811 dev->indicators = NULL;
815 /* DeviceState to VirtioCcwDevice. Note: used on datapath,
816 * be careful and test performance if you change this.
818 static inline VirtioCcwDevice *to_virtio_ccw_dev_fast(DeviceState *d)
820 CcwDevice *ccw_dev = to_ccw_dev_fast(d);
822 return container_of(ccw_dev, VirtioCcwDevice, parent_obj);
825 static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc,
826 uint8_t to_be_set)
828 uint8_t expected, actual;
829 hwaddr len = 1;
830 /* avoid multiple fetches */
831 uint8_t volatile *ind_addr;
833 ind_addr = cpu_physical_memory_map(ind_loc, &len, true);
834 if (!ind_addr) {
835 error_report("%s(%x.%x.%04x): unable to access indicator",
836 __func__, sch->cssid, sch->ssid, sch->schid);
837 return -1;
839 actual = *ind_addr;
840 do {
841 expected = actual;
842 actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set);
843 } while (actual != expected);
844 trace_virtio_ccw_set_ind(ind_loc, actual, actual | to_be_set);
845 cpu_physical_memory_unmap((void *)ind_addr, len, 1, len);
847 return actual;
850 static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
852 VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d);
853 CcwDevice *ccw_dev = to_ccw_dev_fast(d);
854 SubchDev *sch = ccw_dev->sch;
855 uint64_t indicators;
857 if (vector == VIRTIO_NO_VECTOR) {
858 return;
861 * vector < VIRTIO_QUEUE_MAX: notification for a virtqueue
862 * vector == VIRTIO_QUEUE_MAX: configuration change notification
863 * bits beyond that are unused and should never be notified for
865 assert(vector <= VIRTIO_QUEUE_MAX);
867 if (vector < VIRTIO_QUEUE_MAX) {
868 if (!dev->indicators) {
869 return;
871 if (sch->thinint_active) {
873 * In the adapter interrupt case, indicators points to a
874 * memory area that may be (way) larger than 64 bit and
875 * ind_bit indicates the start of the indicators in a big
876 * endian notation.
878 uint64_t ind_bit = dev->routes.adapter.ind_offset;
880 virtio_set_ind_atomic(sch, dev->indicators->addr +
881 (ind_bit + vector) / 8,
882 0x80 >> ((ind_bit + vector) % 8));
883 if (!virtio_set_ind_atomic(sch, dev->summary_indicator->addr,
884 0x01)) {
885 css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO, dev->thinint_isc);
887 } else {
888 assert(vector < NR_CLASSIC_INDICATOR_BITS);
889 indicators = address_space_ldq(&address_space_memory,
890 dev->indicators->addr,
891 MEMTXATTRS_UNSPECIFIED,
892 NULL);
893 indicators |= 1ULL << vector;
894 address_space_stq(&address_space_memory, dev->indicators->addr,
895 indicators, MEMTXATTRS_UNSPECIFIED, NULL);
896 css_conditional_io_interrupt(sch);
898 } else {
899 if (!dev->indicators2) {
900 return;
902 indicators = address_space_ldq(&address_space_memory,
903 dev->indicators2->addr,
904 MEMTXATTRS_UNSPECIFIED,
905 NULL);
906 indicators |= 1ULL;
907 address_space_stq(&address_space_memory, dev->indicators2->addr,
908 indicators, MEMTXATTRS_UNSPECIFIED, NULL);
909 css_conditional_io_interrupt(sch);
913 static void virtio_ccw_reset(DeviceState *d)
915 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
916 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
917 VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
919 virtio_ccw_reset_virtio(dev, vdev);
920 if (vdc->parent_reset) {
921 vdc->parent_reset(d);
925 static void virtio_ccw_vmstate_change(DeviceState *d, bool running)
927 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
929 if (running) {
930 virtio_ccw_start_ioeventfd(dev);
931 } else {
932 virtio_ccw_stop_ioeventfd(dev);
936 static bool virtio_ccw_query_guest_notifiers(DeviceState *d)
938 CcwDevice *dev = CCW_DEVICE(d);
940 return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA);
943 static int virtio_ccw_get_mappings(VirtioCcwDevice *dev)
945 int r;
946 CcwDevice *ccw_dev = CCW_DEVICE(dev);
948 if (!ccw_dev->sch->thinint_active) {
949 return -EINVAL;
952 r = map_indicator(&dev->routes.adapter, dev->summary_indicator);
953 if (r) {
954 return r;
956 r = map_indicator(&dev->routes.adapter, dev->indicators);
957 if (r) {
958 return r;
960 dev->routes.adapter.summary_addr = dev->summary_indicator->map;
961 dev->routes.adapter.ind_addr = dev->indicators->map;
963 return 0;
966 static int virtio_ccw_setup_irqroutes(VirtioCcwDevice *dev, int nvqs)
968 int i;
969 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
970 int ret;
971 S390FLICState *fs = s390_get_flic();
972 S390FLICStateClass *fsc = s390_get_flic_class(fs);
974 ret = virtio_ccw_get_mappings(dev);
975 if (ret) {
976 return ret;
978 for (i = 0; i < nvqs; i++) {
979 if (!virtio_queue_get_num(vdev, i)) {
980 break;
983 dev->routes.num_routes = i;
984 return fsc->add_adapter_routes(fs, &dev->routes);
987 static void virtio_ccw_release_irqroutes(VirtioCcwDevice *dev, int nvqs)
989 S390FLICState *fs = s390_get_flic();
990 S390FLICStateClass *fsc = s390_get_flic_class(fs);
992 fsc->release_adapter_routes(fs, &dev->routes);
995 static int virtio_ccw_add_irqfd(VirtioCcwDevice *dev, int n)
997 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
998 VirtQueue *vq = virtio_get_queue(vdev, n);
999 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
1001 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, notifier, NULL,
1002 dev->routes.gsi[n]);
1005 static void virtio_ccw_remove_irqfd(VirtioCcwDevice *dev, int n)
1007 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1008 VirtQueue *vq = virtio_get_queue(vdev, n);
1009 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
1010 int ret;
1012 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, notifier,
1013 dev->routes.gsi[n]);
1014 assert(ret == 0);
1017 static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
1018 bool assign, bool with_irqfd)
1020 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1021 VirtQueue *vq = virtio_get_queue(vdev, n);
1022 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
1023 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1025 if (assign) {
1026 int r = event_notifier_init(notifier, 0);
1028 if (r < 0) {
1029 return r;
1031 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
1032 if (with_irqfd) {
1033 r = virtio_ccw_add_irqfd(dev, n);
1034 if (r) {
1035 virtio_queue_set_guest_notifier_fd_handler(vq, false,
1036 with_irqfd);
1037 return r;
1041 * We do not support individual masking for channel devices, so we
1042 * need to manually trigger any guest masking callbacks here.
1044 if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
1045 k->guest_notifier_mask(vdev, n, false);
1047 /* get lost events and re-inject */
1048 if (k->guest_notifier_pending &&
1049 k->guest_notifier_pending(vdev, n)) {
1050 event_notifier_set(notifier);
1052 } else {
1053 if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
1054 k->guest_notifier_mask(vdev, n, true);
1056 if (with_irqfd) {
1057 virtio_ccw_remove_irqfd(dev, n);
1059 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
1060 event_notifier_cleanup(notifier);
1062 return 0;
1065 static int virtio_ccw_set_guest_notifiers(DeviceState *d, int nvqs,
1066 bool assigned)
1068 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1069 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1070 CcwDevice *ccw_dev = CCW_DEVICE(d);
1071 bool with_irqfd = ccw_dev->sch->thinint_active && kvm_irqfds_enabled();
1072 int r, n;
1074 if (with_irqfd && assigned) {
1075 /* irq routes need to be set up before assigning irqfds */
1076 r = virtio_ccw_setup_irqroutes(dev, nvqs);
1077 if (r < 0) {
1078 goto irqroute_error;
1081 for (n = 0; n < nvqs; n++) {
1082 if (!virtio_queue_get_num(vdev, n)) {
1083 break;
1085 r = virtio_ccw_set_guest_notifier(dev, n, assigned, with_irqfd);
1086 if (r < 0) {
1087 goto assign_error;
1090 if (with_irqfd && !assigned) {
1091 /* release irq routes after irqfds have been released */
1092 virtio_ccw_release_irqroutes(dev, nvqs);
1094 return 0;
1096 assign_error:
1097 while (--n >= 0) {
1098 virtio_ccw_set_guest_notifier(dev, n, !assigned, false);
1100 irqroute_error:
1101 if (with_irqfd && assigned) {
1102 virtio_ccw_release_irqroutes(dev, nvqs);
1104 return r;
1107 static void virtio_ccw_save_queue(DeviceState *d, int n, QEMUFile *f)
1109 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1110 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1112 qemu_put_be16(f, virtio_queue_vector(vdev, n));
1115 static int virtio_ccw_load_queue(DeviceState *d, int n, QEMUFile *f)
1117 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1118 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1119 uint16_t vector;
1121 qemu_get_be16s(f, &vector);
1122 virtio_queue_set_vector(vdev, n , vector);
1124 return 0;
1127 static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f)
1129 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1130 vmstate_save_state(f, &vmstate_virtio_ccw_dev, dev, NULL);
1133 static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f)
1135 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1136 return vmstate_load_state(f, &vmstate_virtio_ccw_dev, dev, 1);
1139 static void virtio_ccw_pre_plugged(DeviceState *d, Error **errp)
1141 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1142 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1144 if (dev->max_rev >= 1) {
1145 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
1149 /* This is called by virtio-bus just after the device is plugged. */
1150 static void virtio_ccw_device_plugged(DeviceState *d, Error **errp)
1152 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1153 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1154 CcwDevice *ccw_dev = CCW_DEVICE(d);
1155 SubchDev *sch = ccw_dev->sch;
1156 int n = virtio_get_num_queues(vdev);
1157 S390FLICState *flic = s390_get_flic();
1159 if (!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
1160 dev->max_rev = 0;
1163 if (!virtio_ccw_rev_max(dev) && !virtio_legacy_allowed(vdev)) {
1165 * To avoid migration issues, we allow legacy mode when legacy
1166 * check is disabled in the old machine types (< 5.1).
1168 if (virtio_legacy_check_disabled(vdev)) {
1169 warn_report("device requires revision >= 1, but for backward "
1170 "compatibility max_revision=0 is allowed");
1171 } else {
1172 error_setg(errp, "Invalid value of property max_rev "
1173 "(is %d expected >= 1)", virtio_ccw_rev_max(dev));
1174 return;
1178 if (virtio_get_num_queues(vdev) > VIRTIO_QUEUE_MAX) {
1179 error_setg(errp, "The number of virtqueues %d "
1180 "exceeds virtio limit %d", n,
1181 VIRTIO_QUEUE_MAX);
1182 return;
1184 if (virtio_get_num_queues(vdev) > flic->adapter_routes_max_batch) {
1185 error_setg(errp, "The number of virtqueues %d "
1186 "exceeds flic adapter route limit %d", n,
1187 flic->adapter_routes_max_batch);
1188 return;
1191 sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus);
1194 css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid,
1195 d->hotplugged, 1);
1198 static void virtio_ccw_device_unplugged(DeviceState *d)
1200 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1202 virtio_ccw_stop_ioeventfd(dev);
1204 /**************** Virtio-ccw Bus Device Descriptions *******************/
1206 static void virtio_ccw_busdev_realize(DeviceState *dev, Error **errp)
1208 VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
1210 virtio_ccw_bus_new(&_dev->bus, sizeof(_dev->bus), _dev);
1211 virtio_ccw_device_realize(_dev, errp);
1214 static void virtio_ccw_busdev_unrealize(DeviceState *dev)
1216 VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
1218 virtio_ccw_device_unrealize(_dev);
1221 static void virtio_ccw_busdev_unplug(HotplugHandler *hotplug_dev,
1222 DeviceState *dev, Error **errp)
1224 VirtioCcwDevice *_dev = to_virtio_ccw_dev_fast(dev);
1226 virtio_ccw_stop_ioeventfd(_dev);
1229 static void virtio_ccw_device_class_init(ObjectClass *klass, void *data)
1231 DeviceClass *dc = DEVICE_CLASS(klass);
1232 CCWDeviceClass *k = CCW_DEVICE_CLASS(dc);
1233 VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_CLASS(klass);
1235 k->unplug = virtio_ccw_busdev_unplug;
1236 dc->realize = virtio_ccw_busdev_realize;
1237 dc->unrealize = virtio_ccw_busdev_unrealize;
1238 dc->bus_type = TYPE_VIRTUAL_CSS_BUS;
1239 device_class_set_parent_reset(dc, virtio_ccw_reset, &vdc->parent_reset);
1242 static const TypeInfo virtio_ccw_device_info = {
1243 .name = TYPE_VIRTIO_CCW_DEVICE,
1244 .parent = TYPE_CCW_DEVICE,
1245 .instance_size = sizeof(VirtioCcwDevice),
1246 .class_init = virtio_ccw_device_class_init,
1247 .class_size = sizeof(VirtIOCCWDeviceClass),
1248 .abstract = true,
1251 /* virtio-ccw-bus */
1253 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
1254 VirtioCcwDevice *dev)
1256 DeviceState *qdev = DEVICE(dev);
1257 char virtio_bus_name[] = "virtio-bus";
1259 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_CCW_BUS,
1260 qdev, virtio_bus_name);
1263 static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
1265 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
1266 BusClass *bus_class = BUS_CLASS(klass);
1268 bus_class->max_dev = 1;
1269 k->notify = virtio_ccw_notify;
1270 k->vmstate_change = virtio_ccw_vmstate_change;
1271 k->query_guest_notifiers = virtio_ccw_query_guest_notifiers;
1272 k->set_guest_notifiers = virtio_ccw_set_guest_notifiers;
1273 k->save_queue = virtio_ccw_save_queue;
1274 k->load_queue = virtio_ccw_load_queue;
1275 k->save_config = virtio_ccw_save_config;
1276 k->load_config = virtio_ccw_load_config;
1277 k->pre_plugged = virtio_ccw_pre_plugged;
1278 k->device_plugged = virtio_ccw_device_plugged;
1279 k->device_unplugged = virtio_ccw_device_unplugged;
1280 k->ioeventfd_enabled = virtio_ccw_ioeventfd_enabled;
1281 k->ioeventfd_assign = virtio_ccw_ioeventfd_assign;
1284 static const TypeInfo virtio_ccw_bus_info = {
1285 .name = TYPE_VIRTIO_CCW_BUS,
1286 .parent = TYPE_VIRTIO_BUS,
1287 .instance_size = sizeof(VirtioCcwBusState),
1288 .class_size = sizeof(VirtioCcwBusClass),
1289 .class_init = virtio_ccw_bus_class_init,
1292 static void virtio_ccw_register(void)
1294 type_register_static(&virtio_ccw_bus_info);
1295 type_register_static(&virtio_ccw_device_info);
1298 type_init(virtio_ccw_register)