2 * virtio ccw target implementation
4 * Copyright 2012,2015 IBM Corp.
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Pierre Morel <pmorel@linux.vnet.ibm.com>
8 * This work is licensed under the terms of the GNU GPL, version 2 or (at
9 * your option) any later version. See the COPYING file in the top-level
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "exec/address-spaces.h"
16 #include "sysemu/kvm.h"
18 #include "hw/virtio/virtio.h"
19 #include "migration/qemu-file-types.h"
20 #include "hw/virtio/virtio-net.h"
21 #include "qemu/bitops.h"
22 #include "qemu/error-report.h"
24 #include "qemu/module.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/s390x/adapter.h"
27 #include "hw/s390x/s390_flic.h"
29 #include "hw/s390x/ioinst.h"
30 #include "hw/s390x/css.h"
31 #include "virtio-ccw.h"
33 #include "hw/s390x/css-bridge.h"
34 #include "hw/s390x/s390-virtio-ccw.h"
35 #include "sysemu/replay.h"
37 #define NR_CLASSIC_INDICATOR_BITS 64
39 bool have_virtio_ccw
= true;
41 static int virtio_ccw_dev_post_load(void *opaque
, int version_id
)
43 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(opaque
);
44 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
45 CCWDeviceClass
*ck
= CCW_DEVICE_GET_CLASS(ccw_dev
);
47 ccw_dev
->sch
->driver_data
= dev
;
48 if (ccw_dev
->sch
->thinint_active
) {
49 dev
->routes
.adapter
.adapter_id
= css_get_adapter_id(
50 CSS_IO_ADAPTER_VIRTIO
,
53 /* Re-fill subch_id after loading the subchannel states.*/
55 ck
->refill_ids(ccw_dev
);
60 typedef struct VirtioCcwDeviceTmp
{
61 VirtioCcwDevice
*parent
;
62 uint16_t config_vector
;
65 static int virtio_ccw_dev_tmp_pre_save(void *opaque
)
67 VirtioCcwDeviceTmp
*tmp
= opaque
;
68 VirtioCcwDevice
*dev
= tmp
->parent
;
69 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
71 tmp
->config_vector
= vdev
->config_vector
;
76 static int virtio_ccw_dev_tmp_post_load(void *opaque
, int version_id
)
78 VirtioCcwDeviceTmp
*tmp
= opaque
;
79 VirtioCcwDevice
*dev
= tmp
->parent
;
80 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
82 vdev
->config_vector
= tmp
->config_vector
;
86 const VMStateDescription vmstate_virtio_ccw_dev_tmp
= {
87 .name
= "s390_virtio_ccw_dev_tmp",
88 .pre_save
= virtio_ccw_dev_tmp_pre_save
,
89 .post_load
= virtio_ccw_dev_tmp_post_load
,
90 .fields
= (const VMStateField
[]) {
91 VMSTATE_UINT16(config_vector
, VirtioCcwDeviceTmp
),
96 const VMStateDescription vmstate_virtio_ccw_dev
= {
97 .name
= "s390_virtio_ccw_dev",
99 .minimum_version_id
= 1,
100 .post_load
= virtio_ccw_dev_post_load
,
101 .fields
= (const VMStateField
[]) {
102 VMSTATE_CCW_DEVICE(parent_obj
, VirtioCcwDevice
),
103 VMSTATE_PTR_TO_IND_ADDR(indicators
, VirtioCcwDevice
),
104 VMSTATE_PTR_TO_IND_ADDR(indicators2
, VirtioCcwDevice
),
105 VMSTATE_PTR_TO_IND_ADDR(summary_indicator
, VirtioCcwDevice
),
107 * Ugly hack because VirtIODevice does not migrate itself.
108 * This also makes legacy via vmstate_save_state possible.
110 VMSTATE_WITH_TMP(VirtioCcwDevice
, VirtioCcwDeviceTmp
,
111 vmstate_virtio_ccw_dev_tmp
),
112 VMSTATE_STRUCT(routes
, VirtioCcwDevice
, 1, vmstate_adapter_routes
,
114 VMSTATE_UINT8(thinint_isc
, VirtioCcwDevice
),
115 VMSTATE_INT32(revision
, VirtioCcwDevice
),
116 VMSTATE_END_OF_LIST()
120 static void virtio_ccw_bus_new(VirtioBusState
*bus
, size_t bus_size
,
121 VirtioCcwDevice
*dev
);
123 VirtIODevice
*virtio_ccw_get_vdev(SubchDev
*sch
)
125 VirtIODevice
*vdev
= NULL
;
126 VirtioCcwDevice
*dev
= sch
->driver_data
;
129 vdev
= virtio_bus_get_device(&dev
->bus
);
134 static void virtio_ccw_start_ioeventfd(VirtioCcwDevice
*dev
)
136 virtio_bus_start_ioeventfd(&dev
->bus
);
139 static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice
*dev
)
141 virtio_bus_stop_ioeventfd(&dev
->bus
);
144 static bool virtio_ccw_ioeventfd_enabled(DeviceState
*d
)
146 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
148 return (dev
->flags
& VIRTIO_CCW_FLAG_USE_IOEVENTFD
) != 0;
151 static int virtio_ccw_ioeventfd_assign(DeviceState
*d
, EventNotifier
*notifier
,
154 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
155 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
156 SubchDev
*sch
= ccw_dev
->sch
;
157 uint32_t sch_id
= (css_build_subchannel_id(sch
) << 16) | sch
->schid
;
159 return s390_assign_subch_ioeventfd(notifier
, sch_id
, n
, assign
);
162 /* Communication blocks used by several channel commands. */
163 typedef struct VqInfoBlockLegacy
{
168 } QEMU_PACKED VqInfoBlockLegacy
;
170 typedef struct VqInfoBlock
{
177 } QEMU_PACKED VqInfoBlock
;
179 typedef struct VqConfigBlock
{
182 } QEMU_PACKED VqConfigBlock
;
184 typedef struct VirtioFeatDesc
{
187 } QEMU_PACKED VirtioFeatDesc
;
189 typedef struct VirtioThinintInfo
{
190 hwaddr summary_indicator
;
191 hwaddr device_indicator
;
194 } QEMU_PACKED VirtioThinintInfo
;
196 typedef struct VirtioRevInfo
{
200 } QEMU_PACKED VirtioRevInfo
;
202 /* Specify where the virtqueues for the subchannel are in guest memory. */
203 static int virtio_ccw_set_vqs(SubchDev
*sch
, VqInfoBlock
*info
,
204 VqInfoBlockLegacy
*linfo
)
206 VirtIODevice
*vdev
= virtio_ccw_get_vdev(sch
);
207 uint16_t index
= info
? info
->index
: linfo
->index
;
208 uint16_t num
= info
? info
->num
: linfo
->num
;
209 uint64_t desc
= info
? info
->desc
: linfo
->queue
;
211 if (index
>= VIRTIO_QUEUE_MAX
) {
215 /* Current code in virtio.c relies on 4K alignment. */
216 if (linfo
&& desc
&& (linfo
->align
!= 4096)) {
225 virtio_queue_set_rings(vdev
, index
, desc
, info
->avail
, info
->used
);
227 virtio_queue_set_addr(vdev
, index
, desc
);
230 virtio_queue_set_vector(vdev
, index
, VIRTIO_NO_VECTOR
);
233 /* virtio-1 allows changing the ring size. */
234 if (virtio_queue_get_max_num(vdev
, index
) < num
) {
235 /* Fail if we exceed the maximum number. */
238 virtio_queue_set_num(vdev
, index
, num
);
239 virtio_init_region_cache(vdev
, index
);
240 } else if (virtio_queue_get_num(vdev
, index
) > num
) {
241 /* Fail if we don't have a big enough queue. */
244 /* We ignore possible increased num for legacy for compatibility. */
245 virtio_queue_set_vector(vdev
, index
, index
);
247 /* tell notify handler in case of config change */
248 vdev
->config_vector
= VIRTIO_QUEUE_MAX
;
252 static void virtio_ccw_reset_virtio(VirtioCcwDevice
*dev
)
254 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
256 virtio_bus_reset(&dev
->bus
);
257 if (dev
->indicators
) {
258 release_indicator(&dev
->routes
.adapter
, dev
->indicators
);
259 dev
->indicators
= NULL
;
261 if (dev
->indicators2
) {
262 release_indicator(&dev
->routes
.adapter
, dev
->indicators2
);
263 dev
->indicators2
= NULL
;
265 if (dev
->summary_indicator
) {
266 release_indicator(&dev
->routes
.adapter
, dev
->summary_indicator
);
267 dev
->summary_indicator
= NULL
;
269 ccw_dev
->sch
->thinint_active
= false;
272 static int virtio_ccw_handle_set_vq(SubchDev
*sch
, CCW1 ccw
, bool check_len
,
277 VqInfoBlockLegacy linfo
;
278 size_t info_len
= is_legacy
? sizeof(linfo
) : sizeof(info
);
281 if (ccw
.count
!= info_len
) {
284 } else if (ccw
.count
< info_len
) {
285 /* Can't execute command. */
292 ret
= ccw_dstream_read(&sch
->cds
, linfo
);
296 linfo
.queue
= be64_to_cpu(linfo
.queue
);
297 linfo
.align
= be32_to_cpu(linfo
.align
);
298 linfo
.index
= be16_to_cpu(linfo
.index
);
299 linfo
.num
= be16_to_cpu(linfo
.num
);
300 ret
= virtio_ccw_set_vqs(sch
, NULL
, &linfo
);
302 ret
= ccw_dstream_read(&sch
->cds
, info
);
306 info
.desc
= be64_to_cpu(info
.desc
);
307 info
.index
= be16_to_cpu(info
.index
);
308 info
.num
= be16_to_cpu(info
.num
);
309 info
.avail
= be64_to_cpu(info
.avail
);
310 info
.used
= be64_to_cpu(info
.used
);
311 ret
= virtio_ccw_set_vqs(sch
, &info
, NULL
);
313 sch
->curr_status
.scsw
.count
= 0;
317 static int virtio_ccw_cb(SubchDev
*sch
, CCW1 ccw
)
320 VirtioRevInfo revinfo
;
322 VirtioFeatDesc features
;
324 VqConfigBlock vq_config
;
325 VirtioCcwDevice
*dev
= sch
->driver_data
;
326 VirtIODevice
*vdev
= virtio_ccw_get_vdev(sch
);
329 VirtioThinintInfo thinint
;
335 trace_virtio_ccw_interpret_ccw(sch
->cssid
, sch
->ssid
, sch
->schid
,
337 check_len
= !((ccw
.flags
& CCW_FLAG_SLI
) && !(ccw
.flags
& CCW_FLAG_DC
));
339 if (dev
->revision
< 0 && ccw
.cmd_code
!= CCW_CMD_SET_VIRTIO_REV
) {
340 if (dev
->force_revision_1
) {
342 * virtio-1 drivers must start with negotiating to a revision >= 1,
343 * so post a command reject for all other commands
348 * If the driver issues any command that is not SET_VIRTIO_REV,
349 * we'll have to operate the device in legacy mode.
355 /* Look at the command. */
356 switch (ccw
.cmd_code
) {
358 ret
= virtio_ccw_handle_set_vq(sch
, ccw
, check_len
, dev
->revision
< 1);
360 case CCW_CMD_VDEV_RESET
:
361 virtio_ccw_reset_virtio(dev
);
364 case CCW_CMD_READ_FEAT
:
366 if (ccw
.count
!= sizeof(features
)) {
370 } else if (ccw
.count
< sizeof(features
)) {
371 /* Can't execute command. */
378 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
380 ccw_dstream_advance(&sch
->cds
, sizeof(features
.features
));
381 ret
= ccw_dstream_read(&sch
->cds
, features
.index
);
385 if (features
.index
== 0) {
386 if (dev
->revision
>= 1) {
387 /* Don't offer legacy features for modern devices. */
388 features
.features
= (uint32_t)
389 (vdev
->host_features
& ~vdc
->legacy_features
);
391 features
.features
= (uint32_t)vdev
->host_features
;
393 } else if ((features
.index
== 1) && (dev
->revision
>= 1)) {
395 * Only offer feature bits beyond 31 if the guest has
396 * negotiated at least revision 1.
398 features
.features
= (uint32_t)(vdev
->host_features
>> 32);
400 /* Return zeroes if the guest supports more feature bits. */
401 features
.features
= 0;
403 ccw_dstream_rewind(&sch
->cds
);
404 features
.features
= cpu_to_le32(features
.features
);
405 ret
= ccw_dstream_write(&sch
->cds
, features
.features
);
407 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(features
);
411 case CCW_CMD_WRITE_FEAT
:
413 if (ccw
.count
!= sizeof(features
)) {
417 } else if (ccw
.count
< sizeof(features
)) {
418 /* Can't execute command. */
425 ret
= ccw_dstream_read(&sch
->cds
, features
);
429 features
.features
= le32_to_cpu(features
.features
);
430 if (features
.index
== 0) {
431 virtio_set_features(vdev
,
432 (vdev
->guest_features
& 0xffffffff00000000ULL
) |
434 } else if ((features
.index
== 1) && (dev
->revision
>= 1)) {
436 * If the guest did not negotiate at least revision 1,
437 * we did not offer it any feature bits beyond 31. Such a
438 * guest passing us any bit here is therefore buggy.
440 virtio_set_features(vdev
,
441 (vdev
->guest_features
& 0x00000000ffffffffULL
) |
442 ((uint64_t)features
.features
<< 32));
445 * If the guest supports more feature bits, assert that it
446 * passes us zeroes for those we don't support.
448 if (features
.features
) {
449 qemu_log_mask(LOG_GUEST_ERROR
,
450 "Guest bug: features[%i]=%x (expected 0)",
451 features
.index
, features
.features
);
452 /* XXX: do a unit check here? */
455 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(features
);
459 case CCW_CMD_READ_CONF
:
461 if (ccw
.count
> vdev
->config_len
) {
466 len
= MIN(ccw
.count
, vdev
->config_len
);
470 virtio_bus_get_vdev_config(&dev
->bus
, vdev
->config
);
471 ret
= ccw_dstream_write_buf(&sch
->cds
, vdev
->config
, len
);
473 sch
->curr_status
.scsw
.count
= ccw
.count
- len
;
477 case CCW_CMD_WRITE_CONF
:
479 if (ccw
.count
> vdev
->config_len
) {
484 len
= MIN(ccw
.count
, vdev
->config_len
);
488 ret
= ccw_dstream_read_buf(&sch
->cds
, vdev
->config
, len
);
490 virtio_bus_set_vdev_config(&dev
->bus
, vdev
->config
);
491 sch
->curr_status
.scsw
.count
= ccw
.count
- len
;
495 case CCW_CMD_READ_STATUS
:
497 if (ccw
.count
!= sizeof(status
)) {
501 } else if (ccw
.count
< sizeof(status
)) {
502 /* Can't execute command. */
509 address_space_stb(&address_space_memory
, ccw
.cda
, vdev
->status
,
510 MEMTXATTRS_UNSPECIFIED
, NULL
);
511 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(vdev
->status
);
515 case CCW_CMD_WRITE_STATUS
:
517 if (ccw
.count
!= sizeof(status
)) {
521 } else if (ccw
.count
< sizeof(status
)) {
522 /* Can't execute command. */
529 ret
= ccw_dstream_read(&sch
->cds
, status
);
533 if (!(status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
534 virtio_ccw_stop_ioeventfd(dev
);
536 if (virtio_set_status(vdev
, status
) == 0) {
537 if (vdev
->status
== 0) {
538 virtio_ccw_reset_virtio(dev
);
540 if (status
& VIRTIO_CONFIG_S_DRIVER_OK
) {
541 virtio_ccw_start_ioeventfd(dev
);
543 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(status
);
546 /* Trigger a command reject. */
551 case CCW_CMD_SET_IND
:
553 if (ccw
.count
!= sizeof(indicators
)) {
557 } else if (ccw
.count
< sizeof(indicators
)) {
558 /* Can't execute command. */
562 if (sch
->thinint_active
) {
563 /* Trigger a command reject. */
567 if (virtio_get_num_queues(vdev
) > NR_CLASSIC_INDICATOR_BITS
) {
568 /* More queues than indicator bits --> trigger a reject */
575 ret
= ccw_dstream_read(&sch
->cds
, indicators
);
579 indicators
= be64_to_cpu(indicators
);
580 dev
->indicators
= get_indicator(indicators
, sizeof(uint64_t));
581 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(indicators
);
585 case CCW_CMD_SET_CONF_IND
:
587 if (ccw
.count
!= sizeof(indicators
)) {
591 } else if (ccw
.count
< sizeof(indicators
)) {
592 /* Can't execute command. */
599 ret
= ccw_dstream_read(&sch
->cds
, indicators
);
603 indicators
= be64_to_cpu(indicators
);
604 dev
->indicators2
= get_indicator(indicators
, sizeof(uint64_t));
605 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(indicators
);
609 case CCW_CMD_READ_VQ_CONF
:
611 if (ccw
.count
!= sizeof(vq_config
)) {
615 } else if (ccw
.count
< sizeof(vq_config
)) {
616 /* Can't execute command. */
623 ret
= ccw_dstream_read(&sch
->cds
, vq_config
.index
);
627 vq_config
.index
= be16_to_cpu(vq_config
.index
);
628 if (vq_config
.index
>= VIRTIO_QUEUE_MAX
) {
632 vq_config
.num_max
= virtio_queue_get_num(vdev
,
634 vq_config
.num_max
= cpu_to_be16(vq_config
.num_max
);
635 ret
= ccw_dstream_write(&sch
->cds
, vq_config
.num_max
);
637 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(vq_config
);
641 case CCW_CMD_SET_IND_ADAPTER
:
643 if (ccw
.count
!= sizeof(thinint
)) {
647 } else if (ccw
.count
< sizeof(thinint
)) {
648 /* Can't execute command. */
654 } else if (dev
->indicators
&& !sch
->thinint_active
) {
655 /* Trigger a command reject. */
658 if (ccw_dstream_read(&sch
->cds
, thinint
)) {
661 thinint
.ind_bit
= be64_to_cpu(thinint
.ind_bit
);
662 thinint
.summary_indicator
=
663 be64_to_cpu(thinint
.summary_indicator
);
664 thinint
.device_indicator
=
665 be64_to_cpu(thinint
.device_indicator
);
667 dev
->summary_indicator
=
668 get_indicator(thinint
.summary_indicator
, sizeof(uint8_t));
670 get_indicator(thinint
.device_indicator
,
671 thinint
.ind_bit
/ 8 + 1);
672 dev
->thinint_isc
= thinint
.isc
;
673 dev
->routes
.adapter
.ind_offset
= thinint
.ind_bit
;
674 dev
->routes
.adapter
.summary_offset
= 7;
675 dev
->routes
.adapter
.adapter_id
= css_get_adapter_id(
676 CSS_IO_ADAPTER_VIRTIO
,
678 sch
->thinint_active
= ((dev
->indicators
!= NULL
) &&
679 (dev
->summary_indicator
!= NULL
));
680 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(thinint
);
685 case CCW_CMD_SET_VIRTIO_REV
:
686 len
= sizeof(revinfo
);
687 if (ccw
.count
< len
) {
695 ret
= ccw_dstream_read_buf(&sch
->cds
, &revinfo
, 4);
699 revinfo
.revision
= be16_to_cpu(revinfo
.revision
);
700 revinfo
.length
= be16_to_cpu(revinfo
.length
);
701 if (ccw
.count
< len
+ revinfo
.length
||
702 (check_len
&& ccw
.count
> len
+ revinfo
.length
)) {
707 * Once we start to support revisions with additional data, we'll
708 * need to fetch it here. Nothing to do for now, though.
710 if (dev
->revision
>= 0 ||
711 revinfo
.revision
> virtio_ccw_rev_max(dev
) ||
712 (dev
->force_revision_1
&& !revinfo
.revision
)) {
717 dev
->revision
= revinfo
.revision
;
726 static void virtio_sch_disable_cb(SubchDev
*sch
)
728 VirtioCcwDevice
*dev
= sch
->driver_data
;
733 static void virtio_ccw_device_realize(VirtioCcwDevice
*dev
, Error
**errp
)
735 VirtIOCCWDeviceClass
*k
= VIRTIO_CCW_DEVICE_GET_CLASS(dev
);
736 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
737 CCWDeviceClass
*ck
= CCW_DEVICE_GET_CLASS(ccw_dev
);
742 sch
= css_create_sch(ccw_dev
->devno
, errp
);
746 if (!virtio_ccw_rev_max(dev
) && dev
->force_revision_1
) {
747 error_setg(&err
, "Invalid value of property max_rev "
748 "(is %d expected >= 1)", virtio_ccw_rev_max(dev
));
752 sch
->driver_data
= dev
;
753 sch
->ccw_cb
= virtio_ccw_cb
;
754 sch
->disable_cb
= virtio_sch_disable_cb
;
755 sch
->id
.reserved
= 0xff;
756 sch
->id
.cu_type
= VIRTIO_CCW_CU_TYPE
;
757 sch
->do_subchannel_work
= do_subchannel_work_virtual
;
758 sch
->irb_cb
= build_irb_virtual
;
760 dev
->indicators
= NULL
;
762 for (i
= 0; i
< ADAPTER_ROUTES_MAX_GSI
; i
++) {
763 dev
->routes
.gsi
[i
] = -1;
765 css_sch_build_virtual_schib(sch
, 0, VIRTIO_CCW_CHPID_TYPE
);
767 trace_virtio_ccw_new_device(
768 sch
->cssid
, sch
->ssid
, sch
->schid
, sch
->devno
,
769 ccw_dev
->devno
.valid
? "user-configured" : "auto-configured");
771 /* fd-based ioevents can't be synchronized in record/replay */
772 if (replay_mode
!= REPLAY_MODE_NONE
) {
773 dev
->flags
&= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD
;
777 k
->realize(dev
, &err
);
783 ck
->realize(ccw_dev
, &err
);
791 error_propagate(errp
, err
);
792 css_subch_assign(sch
->cssid
, sch
->ssid
, sch
->schid
, sch
->devno
, NULL
);
797 static void virtio_ccw_device_unrealize(VirtioCcwDevice
*dev
)
799 VirtIOCCWDeviceClass
*dc
= VIRTIO_CCW_DEVICE_GET_CLASS(dev
);
800 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
801 SubchDev
*sch
= ccw_dev
->sch
;
808 css_subch_assign(sch
->cssid
, sch
->ssid
, sch
->schid
, sch
->devno
, NULL
);
812 if (dev
->indicators
) {
813 release_indicator(&dev
->routes
.adapter
, dev
->indicators
);
814 dev
->indicators
= NULL
;
818 /* DeviceState to VirtioCcwDevice. Note: used on datapath,
819 * be careful and test performance if you change this.
821 static inline VirtioCcwDevice
*to_virtio_ccw_dev_fast(DeviceState
*d
)
823 CcwDevice
*ccw_dev
= to_ccw_dev_fast(d
);
825 return container_of(ccw_dev
, VirtioCcwDevice
, parent_obj
);
828 static uint8_t virtio_set_ind_atomic(SubchDev
*sch
, uint64_t ind_loc
,
831 uint8_t expected
, actual
;
833 /* avoid multiple fetches */
834 uint8_t volatile *ind_addr
;
836 ind_addr
= cpu_physical_memory_map(ind_loc
, &len
, true);
838 error_report("%s(%x.%x.%04x): unable to access indicator",
839 __func__
, sch
->cssid
, sch
->ssid
, sch
->schid
);
845 actual
= qatomic_cmpxchg(ind_addr
, expected
, expected
| to_be_set
);
846 } while (actual
!= expected
);
847 trace_virtio_ccw_set_ind(ind_loc
, actual
, actual
| to_be_set
);
848 cpu_physical_memory_unmap((void *)ind_addr
, len
, 1, len
);
853 static void virtio_ccw_notify(DeviceState
*d
, uint16_t vector
)
855 VirtioCcwDevice
*dev
= to_virtio_ccw_dev_fast(d
);
856 CcwDevice
*ccw_dev
= to_ccw_dev_fast(d
);
857 SubchDev
*sch
= ccw_dev
->sch
;
860 if (vector
== VIRTIO_NO_VECTOR
) {
864 * vector < VIRTIO_QUEUE_MAX: notification for a virtqueue
865 * vector == VIRTIO_QUEUE_MAX: configuration change notification
866 * bits beyond that are unused and should never be notified for
868 assert(vector
<= VIRTIO_QUEUE_MAX
);
870 if (vector
< VIRTIO_QUEUE_MAX
) {
871 if (!dev
->indicators
) {
874 if (sch
->thinint_active
) {
876 * In the adapter interrupt case, indicators points to a
877 * memory area that may be (way) larger than 64 bit and
878 * ind_bit indicates the start of the indicators in a big
881 uint64_t ind_bit
= dev
->routes
.adapter
.ind_offset
;
883 virtio_set_ind_atomic(sch
, dev
->indicators
->addr
+
884 (ind_bit
+ vector
) / 8,
885 0x80 >> ((ind_bit
+ vector
) % 8));
886 if (!virtio_set_ind_atomic(sch
, dev
->summary_indicator
->addr
,
888 css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO
, dev
->thinint_isc
);
891 assert(vector
< NR_CLASSIC_INDICATOR_BITS
);
892 indicators
= address_space_ldq(&address_space_memory
,
893 dev
->indicators
->addr
,
894 MEMTXATTRS_UNSPECIFIED
,
896 indicators
|= 1ULL << vector
;
897 address_space_stq(&address_space_memory
, dev
->indicators
->addr
,
898 indicators
, MEMTXATTRS_UNSPECIFIED
, NULL
);
899 css_conditional_io_interrupt(sch
);
902 if (!dev
->indicators2
) {
905 indicators
= address_space_ldq(&address_space_memory
,
906 dev
->indicators2
->addr
,
907 MEMTXATTRS_UNSPECIFIED
,
910 address_space_stq(&address_space_memory
, dev
->indicators2
->addr
,
911 indicators
, MEMTXATTRS_UNSPECIFIED
, NULL
);
912 css_conditional_io_interrupt(sch
);
916 static void virtio_ccw_reset(DeviceState
*d
)
918 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
919 VirtIOCCWDeviceClass
*vdc
= VIRTIO_CCW_DEVICE_GET_CLASS(dev
);
921 virtio_ccw_reset_virtio(dev
);
922 if (vdc
->parent_reset
) {
923 vdc
->parent_reset(d
);
927 static void virtio_ccw_vmstate_change(DeviceState
*d
, bool running
)
929 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
932 virtio_ccw_start_ioeventfd(dev
);
934 virtio_ccw_stop_ioeventfd(dev
);
938 static bool virtio_ccw_query_guest_notifiers(DeviceState
*d
)
940 CcwDevice
*dev
= CCW_DEVICE(d
);
942 return !!(dev
->sch
->curr_status
.pmcw
.flags
& PMCW_FLAGS_MASK_ENA
);
945 static int virtio_ccw_get_mappings(VirtioCcwDevice
*dev
)
948 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
950 if (!ccw_dev
->sch
->thinint_active
) {
954 r
= map_indicator(&dev
->routes
.adapter
, dev
->summary_indicator
);
958 r
= map_indicator(&dev
->routes
.adapter
, dev
->indicators
);
962 dev
->routes
.adapter
.summary_addr
= dev
->summary_indicator
->map
;
963 dev
->routes
.adapter
.ind_addr
= dev
->indicators
->map
;
968 static int virtio_ccw_setup_irqroutes(VirtioCcwDevice
*dev
, int nvqs
)
971 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
973 S390FLICState
*fs
= s390_get_flic();
974 S390FLICStateClass
*fsc
= s390_get_flic_class(fs
);
976 ret
= virtio_ccw_get_mappings(dev
);
980 for (i
= 0; i
< nvqs
; i
++) {
981 if (!virtio_queue_get_num(vdev
, i
)) {
985 dev
->routes
.num_routes
= i
;
986 return fsc
->add_adapter_routes(fs
, &dev
->routes
);
989 static void virtio_ccw_release_irqroutes(VirtioCcwDevice
*dev
, int nvqs
)
991 S390FLICState
*fs
= s390_get_flic();
992 S390FLICStateClass
*fsc
= s390_get_flic_class(fs
);
994 fsc
->release_adapter_routes(fs
, &dev
->routes
);
997 static int virtio_ccw_add_irqfd(VirtioCcwDevice
*dev
, int n
)
999 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1000 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
1001 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
1003 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
, notifier
, NULL
,
1004 dev
->routes
.gsi
[n
]);
1007 static void virtio_ccw_remove_irqfd(VirtioCcwDevice
*dev
, int n
)
1009 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1010 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
1011 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
1014 ret
= kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, notifier
,
1015 dev
->routes
.gsi
[n
]);
1019 static int virtio_ccw_set_guest_notifier(VirtioCcwDevice
*dev
, int n
,
1020 bool assign
, bool with_irqfd
)
1022 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1023 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
1024 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
1025 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1028 int r
= event_notifier_init(notifier
, 0);
1033 virtio_queue_set_guest_notifier_fd_handler(vq
, true, with_irqfd
);
1035 r
= virtio_ccw_add_irqfd(dev
, n
);
1037 virtio_queue_set_guest_notifier_fd_handler(vq
, false,
1043 * We do not support individual masking for channel devices, so we
1044 * need to manually trigger any guest masking callbacks here.
1046 if (k
->guest_notifier_mask
&& vdev
->use_guest_notifier_mask
) {
1047 k
->guest_notifier_mask(vdev
, n
, false);
1049 /* get lost events and re-inject */
1050 if (k
->guest_notifier_pending
&&
1051 k
->guest_notifier_pending(vdev
, n
)) {
1052 event_notifier_set(notifier
);
1055 if (k
->guest_notifier_mask
&& vdev
->use_guest_notifier_mask
) {
1056 k
->guest_notifier_mask(vdev
, n
, true);
1059 virtio_ccw_remove_irqfd(dev
, n
);
1061 virtio_queue_set_guest_notifier_fd_handler(vq
, false, with_irqfd
);
1062 event_notifier_cleanup(notifier
);
1067 static int virtio_ccw_set_guest_notifiers(DeviceState
*d
, int nvqs
,
1070 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1071 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1072 CcwDevice
*ccw_dev
= CCW_DEVICE(d
);
1073 bool with_irqfd
= ccw_dev
->sch
->thinint_active
&& kvm_irqfds_enabled();
1076 if (with_irqfd
&& assigned
) {
1077 /* irq routes need to be set up before assigning irqfds */
1078 r
= virtio_ccw_setup_irqroutes(dev
, nvqs
);
1080 goto irqroute_error
;
1083 for (n
= 0; n
< nvqs
; n
++) {
1084 if (!virtio_queue_get_num(vdev
, n
)) {
1087 r
= virtio_ccw_set_guest_notifier(dev
, n
, assigned
, with_irqfd
);
1092 if (with_irqfd
&& !assigned
) {
1093 /* release irq routes after irqfds have been released */
1094 virtio_ccw_release_irqroutes(dev
, nvqs
);
1100 virtio_ccw_set_guest_notifier(dev
, n
, !assigned
, false);
1103 if (with_irqfd
&& assigned
) {
1104 virtio_ccw_release_irqroutes(dev
, nvqs
);
1109 static void virtio_ccw_save_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
1111 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1112 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1114 qemu_put_be16(f
, virtio_queue_vector(vdev
, n
));
1117 static int virtio_ccw_load_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
1119 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1120 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1123 qemu_get_be16s(f
, &vector
);
1124 virtio_queue_set_vector(vdev
, n
, vector
);
1129 static void virtio_ccw_save_config(DeviceState
*d
, QEMUFile
*f
)
1131 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1132 vmstate_save_state(f
, &vmstate_virtio_ccw_dev
, dev
, NULL
);
1135 static int virtio_ccw_load_config(DeviceState
*d
, QEMUFile
*f
)
1137 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1138 return vmstate_load_state(f
, &vmstate_virtio_ccw_dev
, dev
, 1);
1141 static void virtio_ccw_pre_plugged(DeviceState
*d
, Error
**errp
)
1143 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1144 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1146 if (dev
->max_rev
>= 1) {
1147 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_VERSION_1
);
1151 /* This is called by virtio-bus just after the device is plugged. */
1152 static void virtio_ccw_device_plugged(DeviceState
*d
, Error
**errp
)
1154 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1155 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1156 CcwDevice
*ccw_dev
= CCW_DEVICE(d
);
1157 SubchDev
*sch
= ccw_dev
->sch
;
1158 int n
= virtio_get_num_queues(vdev
);
1159 S390FLICState
*flic
= s390_get_flic();
1161 if (!virtio_has_feature(vdev
->host_features
, VIRTIO_F_VERSION_1
)) {
1165 if (!virtio_ccw_rev_max(dev
) && !virtio_legacy_allowed(vdev
)) {
1167 * To avoid migration issues, we allow legacy mode when legacy
1168 * check is disabled in the old machine types (< 5.1).
1170 if (virtio_legacy_check_disabled(vdev
)) {
1171 warn_report("device requires revision >= 1, but for backward "
1172 "compatibility max_revision=0 is allowed");
1174 error_setg(errp
, "Invalid value of property max_rev "
1175 "(is %d expected >= 1)", virtio_ccw_rev_max(dev
));
1180 if (virtio_get_num_queues(vdev
) > VIRTIO_QUEUE_MAX
) {
1181 error_setg(errp
, "The number of virtqueues %d "
1182 "exceeds virtio limit %d", n
,
1186 if (virtio_get_num_queues(vdev
) > flic
->adapter_routes_max_batch
) {
1187 error_setg(errp
, "The number of virtqueues %d "
1188 "exceeds flic adapter route limit %d", n
,
1189 flic
->adapter_routes_max_batch
);
1193 sch
->id
.cu_model
= virtio_bus_get_vdev_id(&dev
->bus
);
1196 css_generate_sch_crws(sch
->cssid
, sch
->ssid
, sch
->schid
,
1200 static void virtio_ccw_device_unplugged(DeviceState
*d
)
1202 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1204 virtio_ccw_stop_ioeventfd(dev
);
1206 /**************** Virtio-ccw Bus Device Descriptions *******************/
1208 static void virtio_ccw_busdev_realize(DeviceState
*dev
, Error
**errp
)
1210 VirtioCcwDevice
*_dev
= (VirtioCcwDevice
*)dev
;
1212 virtio_ccw_bus_new(&_dev
->bus
, sizeof(_dev
->bus
), _dev
);
1213 virtio_ccw_device_realize(_dev
, errp
);
1216 static void virtio_ccw_busdev_unrealize(DeviceState
*dev
)
1218 VirtioCcwDevice
*_dev
= (VirtioCcwDevice
*)dev
;
1220 virtio_ccw_device_unrealize(_dev
);
1223 static void virtio_ccw_busdev_unplug(HotplugHandler
*hotplug_dev
,
1224 DeviceState
*dev
, Error
**errp
)
1226 VirtioCcwDevice
*_dev
= to_virtio_ccw_dev_fast(dev
);
1228 virtio_ccw_stop_ioeventfd(_dev
);
1231 static void virtio_ccw_device_class_init(ObjectClass
*klass
, void *data
)
1233 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1234 CCWDeviceClass
*k
= CCW_DEVICE_CLASS(dc
);
1235 VirtIOCCWDeviceClass
*vdc
= VIRTIO_CCW_DEVICE_CLASS(klass
);
1237 k
->unplug
= virtio_ccw_busdev_unplug
;
1238 dc
->realize
= virtio_ccw_busdev_realize
;
1239 dc
->unrealize
= virtio_ccw_busdev_unrealize
;
1240 device_class_set_parent_reset(dc
, virtio_ccw_reset
, &vdc
->parent_reset
);
1243 static const TypeInfo virtio_ccw_device_info
= {
1244 .name
= TYPE_VIRTIO_CCW_DEVICE
,
1245 .parent
= TYPE_CCW_DEVICE
,
1246 .instance_size
= sizeof(VirtioCcwDevice
),
1247 .class_init
= virtio_ccw_device_class_init
,
1248 .class_size
= sizeof(VirtIOCCWDeviceClass
),
1252 /* virtio-ccw-bus */
1254 static void virtio_ccw_bus_new(VirtioBusState
*bus
, size_t bus_size
,
1255 VirtioCcwDevice
*dev
)
1257 DeviceState
*qdev
= DEVICE(dev
);
1258 char virtio_bus_name
[] = "virtio-bus";
1260 qbus_init(bus
, bus_size
, TYPE_VIRTIO_CCW_BUS
, qdev
, virtio_bus_name
);
1263 static void virtio_ccw_bus_class_init(ObjectClass
*klass
, void *data
)
1265 VirtioBusClass
*k
= VIRTIO_BUS_CLASS(klass
);
1266 BusClass
*bus_class
= BUS_CLASS(klass
);
1268 bus_class
->max_dev
= 1;
1269 k
->notify
= virtio_ccw_notify
;
1270 k
->vmstate_change
= virtio_ccw_vmstate_change
;
1271 k
->query_guest_notifiers
= virtio_ccw_query_guest_notifiers
;
1272 k
->set_guest_notifiers
= virtio_ccw_set_guest_notifiers
;
1273 k
->save_queue
= virtio_ccw_save_queue
;
1274 k
->load_queue
= virtio_ccw_load_queue
;
1275 k
->save_config
= virtio_ccw_save_config
;
1276 k
->load_config
= virtio_ccw_load_config
;
1277 k
->pre_plugged
= virtio_ccw_pre_plugged
;
1278 k
->device_plugged
= virtio_ccw_device_plugged
;
1279 k
->device_unplugged
= virtio_ccw_device_unplugged
;
1280 k
->ioeventfd_enabled
= virtio_ccw_ioeventfd_enabled
;
1281 k
->ioeventfd_assign
= virtio_ccw_ioeventfd_assign
;
1284 static const TypeInfo virtio_ccw_bus_info
= {
1285 .name
= TYPE_VIRTIO_CCW_BUS
,
1286 .parent
= TYPE_VIRTIO_BUS
,
1287 .instance_size
= sizeof(VirtioCcwBusState
),
1288 .class_size
= sizeof(VirtioCcwBusClass
),
1289 .class_init
= virtio_ccw_bus_class_init
,
1292 static void virtio_ccw_register(void)
1294 type_register_static(&virtio_ccw_bus_info
);
1295 type_register_static(&virtio_ccw_device_info
);
1298 type_init(virtio_ccw_register
)