2 * virtio ccw target implementation
4 * Copyright 2012,2015 IBM Corp.
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Pierre Morel <pmorel@linux.vnet.ibm.com>
8 * This work is licensed under the terms of the GNU GPL, version 2 or (at
9 * your option) any later version. See the COPYING file in the top-level
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "sysemu/kvm.h"
17 #include "hw/virtio/virtio.h"
18 #include "migration/qemu-file-types.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "qemu/bitops.h"
21 #include "qemu/error-report.h"
22 #include "qemu/module.h"
23 #include "hw/virtio/virtio-access.h"
24 #include "hw/virtio/virtio-bus.h"
25 #include "hw/s390x/adapter.h"
26 #include "hw/s390x/s390_flic.h"
28 #include "hw/s390x/ioinst.h"
29 #include "hw/s390x/css.h"
30 #include "virtio-ccw.h"
32 #include "hw/s390x/css-bridge.h"
33 #include "hw/s390x/s390-virtio-ccw.h"
34 #include "sysemu/replay.h"
36 #define NR_CLASSIC_INDICATOR_BITS 64
38 bool have_virtio_ccw
= true;
40 static int virtio_ccw_dev_post_load(void *opaque
, int version_id
)
42 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(opaque
);
43 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
44 CCWDeviceClass
*ck
= CCW_DEVICE_GET_CLASS(ccw_dev
);
46 ccw_dev
->sch
->driver_data
= dev
;
47 if (ccw_dev
->sch
->thinint_active
) {
48 dev
->routes
.adapter
.adapter_id
= css_get_adapter_id(
49 CSS_IO_ADAPTER_VIRTIO
,
52 /* Re-fill subch_id after loading the subchannel states.*/
54 ck
->refill_ids(ccw_dev
);
59 typedef struct VirtioCcwDeviceTmp
{
60 VirtioCcwDevice
*parent
;
61 uint16_t config_vector
;
64 static int virtio_ccw_dev_tmp_pre_save(void *opaque
)
66 VirtioCcwDeviceTmp
*tmp
= opaque
;
67 VirtioCcwDevice
*dev
= tmp
->parent
;
68 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
70 tmp
->config_vector
= vdev
->config_vector
;
75 static int virtio_ccw_dev_tmp_post_load(void *opaque
, int version_id
)
77 VirtioCcwDeviceTmp
*tmp
= opaque
;
78 VirtioCcwDevice
*dev
= tmp
->parent
;
79 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
81 vdev
->config_vector
= tmp
->config_vector
;
85 const VMStateDescription vmstate_virtio_ccw_dev_tmp
= {
86 .name
= "s390_virtio_ccw_dev_tmp",
87 .pre_save
= virtio_ccw_dev_tmp_pre_save
,
88 .post_load
= virtio_ccw_dev_tmp_post_load
,
89 .fields
= (VMStateField
[]) {
90 VMSTATE_UINT16(config_vector
, VirtioCcwDeviceTmp
),
95 const VMStateDescription vmstate_virtio_ccw_dev
= {
96 .name
= "s390_virtio_ccw_dev",
98 .minimum_version_id
= 1,
99 .post_load
= virtio_ccw_dev_post_load
,
100 .fields
= (VMStateField
[]) {
101 VMSTATE_CCW_DEVICE(parent_obj
, VirtioCcwDevice
),
102 VMSTATE_PTR_TO_IND_ADDR(indicators
, VirtioCcwDevice
),
103 VMSTATE_PTR_TO_IND_ADDR(indicators2
, VirtioCcwDevice
),
104 VMSTATE_PTR_TO_IND_ADDR(summary_indicator
, VirtioCcwDevice
),
106 * Ugly hack because VirtIODevice does not migrate itself.
107 * This also makes legacy via vmstate_save_state possible.
109 VMSTATE_WITH_TMP(VirtioCcwDevice
, VirtioCcwDeviceTmp
,
110 vmstate_virtio_ccw_dev_tmp
),
111 VMSTATE_STRUCT(routes
, VirtioCcwDevice
, 1, vmstate_adapter_routes
,
113 VMSTATE_UINT8(thinint_isc
, VirtioCcwDevice
),
114 VMSTATE_INT32(revision
, VirtioCcwDevice
),
115 VMSTATE_END_OF_LIST()
119 static void virtio_ccw_bus_new(VirtioBusState
*bus
, size_t bus_size
,
120 VirtioCcwDevice
*dev
);
122 VirtIODevice
*virtio_ccw_get_vdev(SubchDev
*sch
)
124 VirtIODevice
*vdev
= NULL
;
125 VirtioCcwDevice
*dev
= sch
->driver_data
;
128 vdev
= virtio_bus_get_device(&dev
->bus
);
133 static void virtio_ccw_start_ioeventfd(VirtioCcwDevice
*dev
)
135 virtio_bus_start_ioeventfd(&dev
->bus
);
138 static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice
*dev
)
140 virtio_bus_stop_ioeventfd(&dev
->bus
);
143 static bool virtio_ccw_ioeventfd_enabled(DeviceState
*d
)
145 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
147 return (dev
->flags
& VIRTIO_CCW_FLAG_USE_IOEVENTFD
) != 0;
150 static int virtio_ccw_ioeventfd_assign(DeviceState
*d
, EventNotifier
*notifier
,
153 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
154 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
155 SubchDev
*sch
= ccw_dev
->sch
;
156 uint32_t sch_id
= (css_build_subchannel_id(sch
) << 16) | sch
->schid
;
158 return s390_assign_subch_ioeventfd(notifier
, sch_id
, n
, assign
);
161 /* Communication blocks used by several channel commands. */
162 typedef struct VqInfoBlockLegacy
{
167 } QEMU_PACKED VqInfoBlockLegacy
;
169 typedef struct VqInfoBlock
{
176 } QEMU_PACKED VqInfoBlock
;
178 typedef struct VqConfigBlock
{
181 } QEMU_PACKED VqConfigBlock
;
183 typedef struct VirtioFeatDesc
{
186 } QEMU_PACKED VirtioFeatDesc
;
188 typedef struct VirtioThinintInfo
{
189 hwaddr summary_indicator
;
190 hwaddr device_indicator
;
193 } QEMU_PACKED VirtioThinintInfo
;
195 typedef struct VirtioRevInfo
{
199 } QEMU_PACKED VirtioRevInfo
;
201 /* Specify where the virtqueues for the subchannel are in guest memory. */
202 static int virtio_ccw_set_vqs(SubchDev
*sch
, VqInfoBlock
*info
,
203 VqInfoBlockLegacy
*linfo
)
205 VirtIODevice
*vdev
= virtio_ccw_get_vdev(sch
);
206 uint16_t index
= info
? info
->index
: linfo
->index
;
207 uint16_t num
= info
? info
->num
: linfo
->num
;
208 uint64_t desc
= info
? info
->desc
: linfo
->queue
;
210 if (index
>= VIRTIO_QUEUE_MAX
) {
214 /* Current code in virtio.c relies on 4K alignment. */
215 if (linfo
&& desc
&& (linfo
->align
!= 4096)) {
224 virtio_queue_set_rings(vdev
, index
, desc
, info
->avail
, info
->used
);
226 virtio_queue_set_addr(vdev
, index
, desc
);
229 virtio_queue_set_vector(vdev
, index
, VIRTIO_NO_VECTOR
);
232 /* virtio-1 allows changing the ring size. */
233 if (virtio_queue_get_max_num(vdev
, index
) < num
) {
234 /* Fail if we exceed the maximum number. */
237 virtio_queue_set_num(vdev
, index
, num
);
238 } else if (virtio_queue_get_num(vdev
, index
) > num
) {
239 /* Fail if we don't have a big enough queue. */
242 /* We ignore possible increased num for legacy for compatibility. */
243 virtio_queue_set_vector(vdev
, index
, index
);
245 /* tell notify handler in case of config change */
246 vdev
->config_vector
= VIRTIO_QUEUE_MAX
;
250 static void virtio_ccw_reset_virtio(VirtioCcwDevice
*dev
, VirtIODevice
*vdev
)
252 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
254 virtio_ccw_stop_ioeventfd(dev
);
256 if (dev
->indicators
) {
257 release_indicator(&dev
->routes
.adapter
, dev
->indicators
);
258 dev
->indicators
= NULL
;
260 if (dev
->indicators2
) {
261 release_indicator(&dev
->routes
.adapter
, dev
->indicators2
);
262 dev
->indicators2
= NULL
;
264 if (dev
->summary_indicator
) {
265 release_indicator(&dev
->routes
.adapter
, dev
->summary_indicator
);
266 dev
->summary_indicator
= NULL
;
268 ccw_dev
->sch
->thinint_active
= false;
271 static int virtio_ccw_handle_set_vq(SubchDev
*sch
, CCW1 ccw
, bool check_len
,
276 VqInfoBlockLegacy linfo
;
277 size_t info_len
= is_legacy
? sizeof(linfo
) : sizeof(info
);
280 if (ccw
.count
!= info_len
) {
283 } else if (ccw
.count
< info_len
) {
284 /* Can't execute command. */
291 ret
= ccw_dstream_read(&sch
->cds
, linfo
);
295 linfo
.queue
= be64_to_cpu(linfo
.queue
);
296 linfo
.align
= be32_to_cpu(linfo
.align
);
297 linfo
.index
= be16_to_cpu(linfo
.index
);
298 linfo
.num
= be16_to_cpu(linfo
.num
);
299 ret
= virtio_ccw_set_vqs(sch
, NULL
, &linfo
);
301 ret
= ccw_dstream_read(&sch
->cds
, info
);
305 info
.desc
= be64_to_cpu(info
.desc
);
306 info
.index
= be16_to_cpu(info
.index
);
307 info
.num
= be16_to_cpu(info
.num
);
308 info
.avail
= be64_to_cpu(info
.avail
);
309 info
.used
= be64_to_cpu(info
.used
);
310 ret
= virtio_ccw_set_vqs(sch
, &info
, NULL
);
312 sch
->curr_status
.scsw
.count
= 0;
316 static int virtio_ccw_cb(SubchDev
*sch
, CCW1 ccw
)
319 VirtioRevInfo revinfo
;
321 VirtioFeatDesc features
;
323 VqConfigBlock vq_config
;
324 VirtioCcwDevice
*dev
= sch
->driver_data
;
325 VirtIODevice
*vdev
= virtio_ccw_get_vdev(sch
);
328 VirtioThinintInfo thinint
;
334 trace_virtio_ccw_interpret_ccw(sch
->cssid
, sch
->ssid
, sch
->schid
,
336 check_len
= !((ccw
.flags
& CCW_FLAG_SLI
) && !(ccw
.flags
& CCW_FLAG_DC
));
338 if (dev
->revision
< 0 && ccw
.cmd_code
!= CCW_CMD_SET_VIRTIO_REV
) {
339 if (dev
->force_revision_1
) {
341 * virtio-1 drivers must start with negotiating to a revision >= 1,
342 * so post a command reject for all other commands
347 * If the driver issues any command that is not SET_VIRTIO_REV,
348 * we'll have to operate the device in legacy mode.
354 /* Look at the command. */
355 switch (ccw
.cmd_code
) {
357 ret
= virtio_ccw_handle_set_vq(sch
, ccw
, check_len
, dev
->revision
< 1);
359 case CCW_CMD_VDEV_RESET
:
360 virtio_ccw_reset_virtio(dev
, vdev
);
363 case CCW_CMD_READ_FEAT
:
365 if (ccw
.count
!= sizeof(features
)) {
369 } else if (ccw
.count
< sizeof(features
)) {
370 /* Can't execute command. */
377 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
379 ccw_dstream_advance(&sch
->cds
, sizeof(features
.features
));
380 ret
= ccw_dstream_read(&sch
->cds
, features
.index
);
384 if (features
.index
== 0) {
385 if (dev
->revision
>= 1) {
386 /* Don't offer legacy features for modern devices. */
387 features
.features
= (uint32_t)
388 (vdev
->host_features
& ~vdc
->legacy_features
);
390 features
.features
= (uint32_t)vdev
->host_features
;
392 } else if ((features
.index
== 1) && (dev
->revision
>= 1)) {
394 * Only offer feature bits beyond 31 if the guest has
395 * negotiated at least revision 1.
397 features
.features
= (uint32_t)(vdev
->host_features
>> 32);
399 /* Return zeroes if the guest supports more feature bits. */
400 features
.features
= 0;
402 ccw_dstream_rewind(&sch
->cds
);
403 features
.features
= cpu_to_le32(features
.features
);
404 ret
= ccw_dstream_write(&sch
->cds
, features
.features
);
406 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(features
);
410 case CCW_CMD_WRITE_FEAT
:
412 if (ccw
.count
!= sizeof(features
)) {
416 } else if (ccw
.count
< sizeof(features
)) {
417 /* Can't execute command. */
424 ret
= ccw_dstream_read(&sch
->cds
, features
);
428 features
.features
= le32_to_cpu(features
.features
);
429 if (features
.index
== 0) {
430 virtio_set_features(vdev
,
431 (vdev
->guest_features
& 0xffffffff00000000ULL
) |
433 } else if ((features
.index
== 1) && (dev
->revision
>= 1)) {
435 * If the guest did not negotiate at least revision 1,
436 * we did not offer it any feature bits beyond 31. Such a
437 * guest passing us any bit here is therefore buggy.
439 virtio_set_features(vdev
,
440 (vdev
->guest_features
& 0x00000000ffffffffULL
) |
441 ((uint64_t)features
.features
<< 32));
444 * If the guest supports more feature bits, assert that it
445 * passes us zeroes for those we don't support.
447 if (features
.features
) {
448 qemu_log_mask(LOG_GUEST_ERROR
,
449 "Guest bug: features[%i]=%x (expected 0)",
450 features
.index
, features
.features
);
451 /* XXX: do a unit check here? */
454 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(features
);
458 case CCW_CMD_READ_CONF
:
460 if (ccw
.count
> vdev
->config_len
) {
465 len
= MIN(ccw
.count
, vdev
->config_len
);
469 virtio_bus_get_vdev_config(&dev
->bus
, vdev
->config
);
470 ret
= ccw_dstream_write_buf(&sch
->cds
, vdev
->config
, len
);
472 sch
->curr_status
.scsw
.count
= ccw
.count
- len
;
476 case CCW_CMD_WRITE_CONF
:
478 if (ccw
.count
> vdev
->config_len
) {
483 len
= MIN(ccw
.count
, vdev
->config_len
);
487 ret
= ccw_dstream_read_buf(&sch
->cds
, vdev
->config
, len
);
489 virtio_bus_set_vdev_config(&dev
->bus
, vdev
->config
);
490 sch
->curr_status
.scsw
.count
= ccw
.count
- len
;
494 case CCW_CMD_READ_STATUS
:
496 if (ccw
.count
!= sizeof(status
)) {
500 } else if (ccw
.count
< sizeof(status
)) {
501 /* Can't execute command. */
508 address_space_stb(&address_space_memory
, ccw
.cda
, vdev
->status
,
509 MEMTXATTRS_UNSPECIFIED
, NULL
);
510 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(vdev
->status
);
514 case CCW_CMD_WRITE_STATUS
:
516 if (ccw
.count
!= sizeof(status
)) {
520 } else if (ccw
.count
< sizeof(status
)) {
521 /* Can't execute command. */
528 ret
= ccw_dstream_read(&sch
->cds
, status
);
532 if (!(status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
533 virtio_ccw_stop_ioeventfd(dev
);
535 if (virtio_set_status(vdev
, status
) == 0) {
536 if (vdev
->status
== 0) {
537 virtio_ccw_reset_virtio(dev
, vdev
);
539 if (status
& VIRTIO_CONFIG_S_DRIVER_OK
) {
540 virtio_ccw_start_ioeventfd(dev
);
542 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(status
);
545 /* Trigger a command reject. */
550 case CCW_CMD_SET_IND
:
552 if (ccw
.count
!= sizeof(indicators
)) {
556 } else if (ccw
.count
< sizeof(indicators
)) {
557 /* Can't execute command. */
561 if (sch
->thinint_active
) {
562 /* Trigger a command reject. */
566 if (virtio_get_num_queues(vdev
) > NR_CLASSIC_INDICATOR_BITS
) {
567 /* More queues than indicator bits --> trigger a reject */
574 ret
= ccw_dstream_read(&sch
->cds
, indicators
);
578 indicators
= be64_to_cpu(indicators
);
579 dev
->indicators
= get_indicator(indicators
, sizeof(uint64_t));
580 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(indicators
);
584 case CCW_CMD_SET_CONF_IND
:
586 if (ccw
.count
!= sizeof(indicators
)) {
590 } else if (ccw
.count
< sizeof(indicators
)) {
591 /* Can't execute command. */
598 ret
= ccw_dstream_read(&sch
->cds
, indicators
);
602 indicators
= be64_to_cpu(indicators
);
603 dev
->indicators2
= get_indicator(indicators
, sizeof(uint64_t));
604 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(indicators
);
608 case CCW_CMD_READ_VQ_CONF
:
610 if (ccw
.count
!= sizeof(vq_config
)) {
614 } else if (ccw
.count
< sizeof(vq_config
)) {
615 /* Can't execute command. */
622 ret
= ccw_dstream_read(&sch
->cds
, vq_config
.index
);
626 vq_config
.index
= be16_to_cpu(vq_config
.index
);
627 if (vq_config
.index
>= VIRTIO_QUEUE_MAX
) {
631 vq_config
.num_max
= virtio_queue_get_num(vdev
,
633 vq_config
.num_max
= cpu_to_be16(vq_config
.num_max
);
634 ret
= ccw_dstream_write(&sch
->cds
, vq_config
.num_max
);
636 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(vq_config
);
640 case CCW_CMD_SET_IND_ADAPTER
:
642 if (ccw
.count
!= sizeof(thinint
)) {
646 } else if (ccw
.count
< sizeof(thinint
)) {
647 /* Can't execute command. */
653 } else if (dev
->indicators
&& !sch
->thinint_active
) {
654 /* Trigger a command reject. */
657 if (ccw_dstream_read(&sch
->cds
, thinint
)) {
660 thinint
.ind_bit
= be64_to_cpu(thinint
.ind_bit
);
661 thinint
.summary_indicator
=
662 be64_to_cpu(thinint
.summary_indicator
);
663 thinint
.device_indicator
=
664 be64_to_cpu(thinint
.device_indicator
);
666 dev
->summary_indicator
=
667 get_indicator(thinint
.summary_indicator
, sizeof(uint8_t));
669 get_indicator(thinint
.device_indicator
,
670 thinint
.ind_bit
/ 8 + 1);
671 dev
->thinint_isc
= thinint
.isc
;
672 dev
->routes
.adapter
.ind_offset
= thinint
.ind_bit
;
673 dev
->routes
.adapter
.summary_offset
= 7;
674 dev
->routes
.adapter
.adapter_id
= css_get_adapter_id(
675 CSS_IO_ADAPTER_VIRTIO
,
677 sch
->thinint_active
= ((dev
->indicators
!= NULL
) &&
678 (dev
->summary_indicator
!= NULL
));
679 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(thinint
);
684 case CCW_CMD_SET_VIRTIO_REV
:
685 len
= sizeof(revinfo
);
686 if (ccw
.count
< len
) {
694 ret
= ccw_dstream_read_buf(&sch
->cds
, &revinfo
, 4);
698 revinfo
.revision
= be16_to_cpu(revinfo
.revision
);
699 revinfo
.length
= be16_to_cpu(revinfo
.length
);
700 if (ccw
.count
< len
+ revinfo
.length
||
701 (check_len
&& ccw
.count
> len
+ revinfo
.length
)) {
706 * Once we start to support revisions with additional data, we'll
707 * need to fetch it here. Nothing to do for now, though.
709 if (dev
->revision
>= 0 ||
710 revinfo
.revision
> virtio_ccw_rev_max(dev
) ||
711 (dev
->force_revision_1
&& !revinfo
.revision
)) {
716 dev
->revision
= revinfo
.revision
;
725 static void virtio_sch_disable_cb(SubchDev
*sch
)
727 VirtioCcwDevice
*dev
= sch
->driver_data
;
732 static void virtio_ccw_device_realize(VirtioCcwDevice
*dev
, Error
**errp
)
734 VirtIOCCWDeviceClass
*k
= VIRTIO_CCW_DEVICE_GET_CLASS(dev
);
735 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
736 CCWDeviceClass
*ck
= CCW_DEVICE_GET_CLASS(ccw_dev
);
741 sch
= css_create_sch(ccw_dev
->devno
, errp
);
745 if (!virtio_ccw_rev_max(dev
) && dev
->force_revision_1
) {
746 error_setg(&err
, "Invalid value of property max_rev "
747 "(is %d expected >= 1)", virtio_ccw_rev_max(dev
));
751 sch
->driver_data
= dev
;
752 sch
->ccw_cb
= virtio_ccw_cb
;
753 sch
->disable_cb
= virtio_sch_disable_cb
;
754 sch
->id
.reserved
= 0xff;
755 sch
->id
.cu_type
= VIRTIO_CCW_CU_TYPE
;
756 sch
->do_subchannel_work
= do_subchannel_work_virtual
;
757 sch
->irb_cb
= build_irb_virtual
;
759 dev
->indicators
= NULL
;
761 for (i
= 0; i
< ADAPTER_ROUTES_MAX_GSI
; i
++) {
762 dev
->routes
.gsi
[i
] = -1;
764 css_sch_build_virtual_schib(sch
, 0, VIRTIO_CCW_CHPID_TYPE
);
766 trace_virtio_ccw_new_device(
767 sch
->cssid
, sch
->ssid
, sch
->schid
, sch
->devno
,
768 ccw_dev
->devno
.valid
? "user-configured" : "auto-configured");
770 if (kvm_enabled() && !kvm_eventfds_enabled()) {
771 dev
->flags
&= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD
;
774 /* fd-based ioevents can't be synchronized in record/replay */
775 if (replay_mode
!= REPLAY_MODE_NONE
) {
776 dev
->flags
&= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD
;
780 k
->realize(dev
, &err
);
786 ck
->realize(ccw_dev
, &err
);
794 error_propagate(errp
, err
);
795 css_subch_assign(sch
->cssid
, sch
->ssid
, sch
->schid
, sch
->devno
, NULL
);
800 static void virtio_ccw_device_unrealize(VirtioCcwDevice
*dev
)
802 VirtIOCCWDeviceClass
*dc
= VIRTIO_CCW_DEVICE_GET_CLASS(dev
);
803 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
804 SubchDev
*sch
= ccw_dev
->sch
;
811 css_subch_assign(sch
->cssid
, sch
->ssid
, sch
->schid
, sch
->devno
, NULL
);
815 if (dev
->indicators
) {
816 release_indicator(&dev
->routes
.adapter
, dev
->indicators
);
817 dev
->indicators
= NULL
;
821 /* DeviceState to VirtioCcwDevice. Note: used on datapath,
822 * be careful and test performance if you change this.
824 static inline VirtioCcwDevice
*to_virtio_ccw_dev_fast(DeviceState
*d
)
826 CcwDevice
*ccw_dev
= to_ccw_dev_fast(d
);
828 return container_of(ccw_dev
, VirtioCcwDevice
, parent_obj
);
831 static uint8_t virtio_set_ind_atomic(SubchDev
*sch
, uint64_t ind_loc
,
834 uint8_t expected
, actual
;
836 /* avoid multiple fetches */
837 uint8_t volatile *ind_addr
;
839 ind_addr
= cpu_physical_memory_map(ind_loc
, &len
, true);
841 error_report("%s(%x.%x.%04x): unable to access indicator",
842 __func__
, sch
->cssid
, sch
->ssid
, sch
->schid
);
848 actual
= qatomic_cmpxchg(ind_addr
, expected
, expected
| to_be_set
);
849 } while (actual
!= expected
);
850 trace_virtio_ccw_set_ind(ind_loc
, actual
, actual
| to_be_set
);
851 cpu_physical_memory_unmap((void *)ind_addr
, len
, 1, len
);
856 static void virtio_ccw_notify(DeviceState
*d
, uint16_t vector
)
858 VirtioCcwDevice
*dev
= to_virtio_ccw_dev_fast(d
);
859 CcwDevice
*ccw_dev
= to_ccw_dev_fast(d
);
860 SubchDev
*sch
= ccw_dev
->sch
;
863 if (vector
== VIRTIO_NO_VECTOR
) {
867 * vector < VIRTIO_QUEUE_MAX: notification for a virtqueue
868 * vector == VIRTIO_QUEUE_MAX: configuration change notification
869 * bits beyond that are unused and should never be notified for
871 assert(vector
<= VIRTIO_QUEUE_MAX
);
873 if (vector
< VIRTIO_QUEUE_MAX
) {
874 if (!dev
->indicators
) {
877 if (sch
->thinint_active
) {
879 * In the adapter interrupt case, indicators points to a
880 * memory area that may be (way) larger than 64 bit and
881 * ind_bit indicates the start of the indicators in a big
884 uint64_t ind_bit
= dev
->routes
.adapter
.ind_offset
;
886 virtio_set_ind_atomic(sch
, dev
->indicators
->addr
+
887 (ind_bit
+ vector
) / 8,
888 0x80 >> ((ind_bit
+ vector
) % 8));
889 if (!virtio_set_ind_atomic(sch
, dev
->summary_indicator
->addr
,
891 css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO
, dev
->thinint_isc
);
894 assert(vector
< NR_CLASSIC_INDICATOR_BITS
);
895 indicators
= address_space_ldq(&address_space_memory
,
896 dev
->indicators
->addr
,
897 MEMTXATTRS_UNSPECIFIED
,
899 indicators
|= 1ULL << vector
;
900 address_space_stq(&address_space_memory
, dev
->indicators
->addr
,
901 indicators
, MEMTXATTRS_UNSPECIFIED
, NULL
);
902 css_conditional_io_interrupt(sch
);
905 if (!dev
->indicators2
) {
908 indicators
= address_space_ldq(&address_space_memory
,
909 dev
->indicators2
->addr
,
910 MEMTXATTRS_UNSPECIFIED
,
913 address_space_stq(&address_space_memory
, dev
->indicators2
->addr
,
914 indicators
, MEMTXATTRS_UNSPECIFIED
, NULL
);
915 css_conditional_io_interrupt(sch
);
919 static void virtio_ccw_reset(DeviceState
*d
)
921 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
922 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
923 VirtIOCCWDeviceClass
*vdc
= VIRTIO_CCW_DEVICE_GET_CLASS(dev
);
925 virtio_ccw_reset_virtio(dev
, vdev
);
926 if (vdc
->parent_reset
) {
927 vdc
->parent_reset(d
);
931 static void virtio_ccw_vmstate_change(DeviceState
*d
, bool running
)
933 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
936 virtio_ccw_start_ioeventfd(dev
);
938 virtio_ccw_stop_ioeventfd(dev
);
942 static bool virtio_ccw_query_guest_notifiers(DeviceState
*d
)
944 CcwDevice
*dev
= CCW_DEVICE(d
);
946 return !!(dev
->sch
->curr_status
.pmcw
.flags
& PMCW_FLAGS_MASK_ENA
);
949 static int virtio_ccw_get_mappings(VirtioCcwDevice
*dev
)
952 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
954 if (!ccw_dev
->sch
->thinint_active
) {
958 r
= map_indicator(&dev
->routes
.adapter
, dev
->summary_indicator
);
962 r
= map_indicator(&dev
->routes
.adapter
, dev
->indicators
);
966 dev
->routes
.adapter
.summary_addr
= dev
->summary_indicator
->map
;
967 dev
->routes
.adapter
.ind_addr
= dev
->indicators
->map
;
972 static int virtio_ccw_setup_irqroutes(VirtioCcwDevice
*dev
, int nvqs
)
975 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
977 S390FLICState
*fs
= s390_get_flic();
978 S390FLICStateClass
*fsc
= s390_get_flic_class(fs
);
980 ret
= virtio_ccw_get_mappings(dev
);
984 for (i
= 0; i
< nvqs
; i
++) {
985 if (!virtio_queue_get_num(vdev
, i
)) {
989 dev
->routes
.num_routes
= i
;
990 return fsc
->add_adapter_routes(fs
, &dev
->routes
);
993 static void virtio_ccw_release_irqroutes(VirtioCcwDevice
*dev
, int nvqs
)
995 S390FLICState
*fs
= s390_get_flic();
996 S390FLICStateClass
*fsc
= s390_get_flic_class(fs
);
998 fsc
->release_adapter_routes(fs
, &dev
->routes
);
1001 static int virtio_ccw_add_irqfd(VirtioCcwDevice
*dev
, int n
)
1003 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1004 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
1005 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
1007 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
, notifier
, NULL
,
1008 dev
->routes
.gsi
[n
]);
1011 static void virtio_ccw_remove_irqfd(VirtioCcwDevice
*dev
, int n
)
1013 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1014 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
1015 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
1018 ret
= kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, notifier
,
1019 dev
->routes
.gsi
[n
]);
1023 static int virtio_ccw_set_guest_notifier(VirtioCcwDevice
*dev
, int n
,
1024 bool assign
, bool with_irqfd
)
1026 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1027 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
1028 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
1029 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1032 int r
= event_notifier_init(notifier
, 0);
1037 virtio_queue_set_guest_notifier_fd_handler(vq
, true, with_irqfd
);
1039 r
= virtio_ccw_add_irqfd(dev
, n
);
1041 virtio_queue_set_guest_notifier_fd_handler(vq
, false,
1047 * We do not support individual masking for channel devices, so we
1048 * need to manually trigger any guest masking callbacks here.
1050 if (k
->guest_notifier_mask
&& vdev
->use_guest_notifier_mask
) {
1051 k
->guest_notifier_mask(vdev
, n
, false);
1053 /* get lost events and re-inject */
1054 if (k
->guest_notifier_pending
&&
1055 k
->guest_notifier_pending(vdev
, n
)) {
1056 event_notifier_set(notifier
);
1059 if (k
->guest_notifier_mask
&& vdev
->use_guest_notifier_mask
) {
1060 k
->guest_notifier_mask(vdev
, n
, true);
1063 virtio_ccw_remove_irqfd(dev
, n
);
1065 virtio_queue_set_guest_notifier_fd_handler(vq
, false, with_irqfd
);
1066 event_notifier_cleanup(notifier
);
1071 static int virtio_ccw_set_guest_notifiers(DeviceState
*d
, int nvqs
,
1074 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1075 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1076 CcwDevice
*ccw_dev
= CCW_DEVICE(d
);
1077 bool with_irqfd
= ccw_dev
->sch
->thinint_active
&& kvm_irqfds_enabled();
1080 if (with_irqfd
&& assigned
) {
1081 /* irq routes need to be set up before assigning irqfds */
1082 r
= virtio_ccw_setup_irqroutes(dev
, nvqs
);
1084 goto irqroute_error
;
1087 for (n
= 0; n
< nvqs
; n
++) {
1088 if (!virtio_queue_get_num(vdev
, n
)) {
1091 r
= virtio_ccw_set_guest_notifier(dev
, n
, assigned
, with_irqfd
);
1096 if (with_irqfd
&& !assigned
) {
1097 /* release irq routes after irqfds have been released */
1098 virtio_ccw_release_irqroutes(dev
, nvqs
);
1104 virtio_ccw_set_guest_notifier(dev
, n
, !assigned
, false);
1107 if (with_irqfd
&& assigned
) {
1108 virtio_ccw_release_irqroutes(dev
, nvqs
);
1113 static void virtio_ccw_save_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
1115 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1116 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1118 qemu_put_be16(f
, virtio_queue_vector(vdev
, n
));
1121 static int virtio_ccw_load_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
1123 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1124 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1127 qemu_get_be16s(f
, &vector
);
1128 virtio_queue_set_vector(vdev
, n
, vector
);
1133 static void virtio_ccw_save_config(DeviceState
*d
, QEMUFile
*f
)
1135 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1136 vmstate_save_state(f
, &vmstate_virtio_ccw_dev
, dev
, NULL
);
1139 static int virtio_ccw_load_config(DeviceState
*d
, QEMUFile
*f
)
1141 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1142 return vmstate_load_state(f
, &vmstate_virtio_ccw_dev
, dev
, 1);
1145 static void virtio_ccw_pre_plugged(DeviceState
*d
, Error
**errp
)
1147 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1148 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1150 if (dev
->max_rev
>= 1) {
1151 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_VERSION_1
);
1155 /* This is called by virtio-bus just after the device is plugged. */
1156 static void virtio_ccw_device_plugged(DeviceState
*d
, Error
**errp
)
1158 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1159 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1160 CcwDevice
*ccw_dev
= CCW_DEVICE(d
);
1161 SubchDev
*sch
= ccw_dev
->sch
;
1162 int n
= virtio_get_num_queues(vdev
);
1163 S390FLICState
*flic
= s390_get_flic();
1165 if (!virtio_has_feature(vdev
->host_features
, VIRTIO_F_VERSION_1
)) {
1169 if (!virtio_ccw_rev_max(dev
) && !virtio_legacy_allowed(vdev
)) {
1171 * To avoid migration issues, we allow legacy mode when legacy
1172 * check is disabled in the old machine types (< 5.1).
1174 if (virtio_legacy_check_disabled(vdev
)) {
1175 warn_report("device requires revision >= 1, but for backward "
1176 "compatibility max_revision=0 is allowed");
1178 error_setg(errp
, "Invalid value of property max_rev "
1179 "(is %d expected >= 1)", virtio_ccw_rev_max(dev
));
1184 if (virtio_get_num_queues(vdev
) > VIRTIO_QUEUE_MAX
) {
1185 error_setg(errp
, "The number of virtqueues %d "
1186 "exceeds virtio limit %d", n
,
1190 if (virtio_get_num_queues(vdev
) > flic
->adapter_routes_max_batch
) {
1191 error_setg(errp
, "The number of virtqueues %d "
1192 "exceeds flic adapter route limit %d", n
,
1193 flic
->adapter_routes_max_batch
);
1197 sch
->id
.cu_model
= virtio_bus_get_vdev_id(&dev
->bus
);
1200 css_generate_sch_crws(sch
->cssid
, sch
->ssid
, sch
->schid
,
1204 static void virtio_ccw_device_unplugged(DeviceState
*d
)
1206 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1208 virtio_ccw_stop_ioeventfd(dev
);
1210 /**************** Virtio-ccw Bus Device Descriptions *******************/
1212 static void virtio_ccw_busdev_realize(DeviceState
*dev
, Error
**errp
)
1214 VirtioCcwDevice
*_dev
= (VirtioCcwDevice
*)dev
;
1216 virtio_ccw_bus_new(&_dev
->bus
, sizeof(_dev
->bus
), _dev
);
1217 virtio_ccw_device_realize(_dev
, errp
);
1220 static void virtio_ccw_busdev_unrealize(DeviceState
*dev
)
1222 VirtioCcwDevice
*_dev
= (VirtioCcwDevice
*)dev
;
1224 virtio_ccw_device_unrealize(_dev
);
1227 static void virtio_ccw_busdev_unplug(HotplugHandler
*hotplug_dev
,
1228 DeviceState
*dev
, Error
**errp
)
1230 VirtioCcwDevice
*_dev
= to_virtio_ccw_dev_fast(dev
);
1232 virtio_ccw_stop_ioeventfd(_dev
);
1235 static void virtio_ccw_device_class_init(ObjectClass
*klass
, void *data
)
1237 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1238 CCWDeviceClass
*k
= CCW_DEVICE_CLASS(dc
);
1239 VirtIOCCWDeviceClass
*vdc
= VIRTIO_CCW_DEVICE_CLASS(klass
);
1241 k
->unplug
= virtio_ccw_busdev_unplug
;
1242 dc
->realize
= virtio_ccw_busdev_realize
;
1243 dc
->unrealize
= virtio_ccw_busdev_unrealize
;
1244 device_class_set_parent_reset(dc
, virtio_ccw_reset
, &vdc
->parent_reset
);
1247 static const TypeInfo virtio_ccw_device_info
= {
1248 .name
= TYPE_VIRTIO_CCW_DEVICE
,
1249 .parent
= TYPE_CCW_DEVICE
,
1250 .instance_size
= sizeof(VirtioCcwDevice
),
1251 .class_init
= virtio_ccw_device_class_init
,
1252 .class_size
= sizeof(VirtIOCCWDeviceClass
),
1256 /* virtio-ccw-bus */
1258 static void virtio_ccw_bus_new(VirtioBusState
*bus
, size_t bus_size
,
1259 VirtioCcwDevice
*dev
)
1261 DeviceState
*qdev
= DEVICE(dev
);
1262 char virtio_bus_name
[] = "virtio-bus";
1264 qbus_init(bus
, bus_size
, TYPE_VIRTIO_CCW_BUS
, qdev
, virtio_bus_name
);
1267 static void virtio_ccw_bus_class_init(ObjectClass
*klass
, void *data
)
1269 VirtioBusClass
*k
= VIRTIO_BUS_CLASS(klass
);
1270 BusClass
*bus_class
= BUS_CLASS(klass
);
1272 bus_class
->max_dev
= 1;
1273 k
->notify
= virtio_ccw_notify
;
1274 k
->vmstate_change
= virtio_ccw_vmstate_change
;
1275 k
->query_guest_notifiers
= virtio_ccw_query_guest_notifiers
;
1276 k
->set_guest_notifiers
= virtio_ccw_set_guest_notifiers
;
1277 k
->save_queue
= virtio_ccw_save_queue
;
1278 k
->load_queue
= virtio_ccw_load_queue
;
1279 k
->save_config
= virtio_ccw_save_config
;
1280 k
->load_config
= virtio_ccw_load_config
;
1281 k
->pre_plugged
= virtio_ccw_pre_plugged
;
1282 k
->device_plugged
= virtio_ccw_device_plugged
;
1283 k
->device_unplugged
= virtio_ccw_device_unplugged
;
1284 k
->ioeventfd_enabled
= virtio_ccw_ioeventfd_enabled
;
1285 k
->ioeventfd_assign
= virtio_ccw_ioeventfd_assign
;
1288 static const TypeInfo virtio_ccw_bus_info
= {
1289 .name
= TYPE_VIRTIO_CCW_BUS
,
1290 .parent
= TYPE_VIRTIO_BUS
,
1291 .instance_size
= sizeof(VirtioCcwBusState
),
1292 .class_size
= sizeof(VirtioCcwBusClass
),
1293 .class_init
= virtio_ccw_bus_class_init
,
1296 static void virtio_ccw_register(void)
1298 type_register_static(&virtio_ccw_bus_info
);
1299 type_register_static(&virtio_ccw_device_info
);
1302 type_init(virtio_ccw_register
)