2 * virtio ccw target implementation
4 * Copyright 2012,2015 IBM Corp.
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Pierre Morel <pmorel@linux.vnet.ibm.com>
8 * This work is licensed under the terms of the GNU GPL, version 2 or (at
9 * your option) any later version. See the COPYING file in the top-level
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "sysemu/kvm.h"
17 #include "hw/virtio/virtio.h"
18 #include "migration/qemu-file-types.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "hw/sysbus.h"
21 #include "qemu/bitops.h"
22 #include "qemu/error-report.h"
23 #include "qemu/module.h"
24 #include "hw/virtio/virtio-access.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/s390x/adapter.h"
27 #include "hw/s390x/s390_flic.h"
29 #include "hw/s390x/ioinst.h"
30 #include "hw/s390x/css.h"
31 #include "virtio-ccw.h"
33 #include "hw/s390x/css-bridge.h"
34 #include "hw/s390x/s390-virtio-ccw.h"
36 #define NR_CLASSIC_INDICATOR_BITS 64
38 static int virtio_ccw_dev_post_load(void *opaque
, int version_id
)
40 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(opaque
);
41 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
42 CCWDeviceClass
*ck
= CCW_DEVICE_GET_CLASS(ccw_dev
);
44 ccw_dev
->sch
->driver_data
= dev
;
45 if (ccw_dev
->sch
->thinint_active
) {
46 dev
->routes
.adapter
.adapter_id
= css_get_adapter_id(
47 CSS_IO_ADAPTER_VIRTIO
,
50 /* Re-fill subch_id after loading the subchannel states.*/
52 ck
->refill_ids(ccw_dev
);
57 typedef struct VirtioCcwDeviceTmp
{
58 VirtioCcwDevice
*parent
;
59 uint16_t config_vector
;
62 static int virtio_ccw_dev_tmp_pre_save(void *opaque
)
64 VirtioCcwDeviceTmp
*tmp
= opaque
;
65 VirtioCcwDevice
*dev
= tmp
->parent
;
66 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
68 tmp
->config_vector
= vdev
->config_vector
;
73 static int virtio_ccw_dev_tmp_post_load(void *opaque
, int version_id
)
75 VirtioCcwDeviceTmp
*tmp
= opaque
;
76 VirtioCcwDevice
*dev
= tmp
->parent
;
77 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
79 vdev
->config_vector
= tmp
->config_vector
;
83 const VMStateDescription vmstate_virtio_ccw_dev_tmp
= {
84 .name
= "s390_virtio_ccw_dev_tmp",
85 .pre_save
= virtio_ccw_dev_tmp_pre_save
,
86 .post_load
= virtio_ccw_dev_tmp_post_load
,
87 .fields
= (VMStateField
[]) {
88 VMSTATE_UINT16(config_vector
, VirtioCcwDeviceTmp
),
93 const VMStateDescription vmstate_virtio_ccw_dev
= {
94 .name
= "s390_virtio_ccw_dev",
96 .minimum_version_id
= 1,
97 .post_load
= virtio_ccw_dev_post_load
,
98 .fields
= (VMStateField
[]) {
99 VMSTATE_CCW_DEVICE(parent_obj
, VirtioCcwDevice
),
100 VMSTATE_PTR_TO_IND_ADDR(indicators
, VirtioCcwDevice
),
101 VMSTATE_PTR_TO_IND_ADDR(indicators2
, VirtioCcwDevice
),
102 VMSTATE_PTR_TO_IND_ADDR(summary_indicator
, VirtioCcwDevice
),
104 * Ugly hack because VirtIODevice does not migrate itself.
105 * This also makes legacy via vmstate_save_state possible.
107 VMSTATE_WITH_TMP(VirtioCcwDevice
, VirtioCcwDeviceTmp
,
108 vmstate_virtio_ccw_dev_tmp
),
109 VMSTATE_STRUCT(routes
, VirtioCcwDevice
, 1, vmstate_adapter_routes
,
111 VMSTATE_UINT8(thinint_isc
, VirtioCcwDevice
),
112 VMSTATE_INT32(revision
, VirtioCcwDevice
),
113 VMSTATE_END_OF_LIST()
117 static void virtio_ccw_bus_new(VirtioBusState
*bus
, size_t bus_size
,
118 VirtioCcwDevice
*dev
);
120 VirtIODevice
*virtio_ccw_get_vdev(SubchDev
*sch
)
122 VirtIODevice
*vdev
= NULL
;
123 VirtioCcwDevice
*dev
= sch
->driver_data
;
126 vdev
= virtio_bus_get_device(&dev
->bus
);
131 static void virtio_ccw_start_ioeventfd(VirtioCcwDevice
*dev
)
133 virtio_bus_start_ioeventfd(&dev
->bus
);
136 static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice
*dev
)
138 virtio_bus_stop_ioeventfd(&dev
->bus
);
141 static bool virtio_ccw_ioeventfd_enabled(DeviceState
*d
)
143 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
145 return (dev
->flags
& VIRTIO_CCW_FLAG_USE_IOEVENTFD
) != 0;
148 static int virtio_ccw_ioeventfd_assign(DeviceState
*d
, EventNotifier
*notifier
,
151 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
152 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
153 SubchDev
*sch
= ccw_dev
->sch
;
154 uint32_t sch_id
= (css_build_subchannel_id(sch
) << 16) | sch
->schid
;
156 return s390_assign_subch_ioeventfd(notifier
, sch_id
, n
, assign
);
159 /* Communication blocks used by several channel commands. */
160 typedef struct VqInfoBlockLegacy
{
165 } QEMU_PACKED VqInfoBlockLegacy
;
167 typedef struct VqInfoBlock
{
174 } QEMU_PACKED VqInfoBlock
;
176 typedef struct VqConfigBlock
{
179 } QEMU_PACKED VqConfigBlock
;
181 typedef struct VirtioFeatDesc
{
184 } QEMU_PACKED VirtioFeatDesc
;
186 typedef struct VirtioThinintInfo
{
187 hwaddr summary_indicator
;
188 hwaddr device_indicator
;
191 } QEMU_PACKED VirtioThinintInfo
;
193 typedef struct VirtioRevInfo
{
197 } QEMU_PACKED VirtioRevInfo
;
199 /* Specify where the virtqueues for the subchannel are in guest memory. */
200 static int virtio_ccw_set_vqs(SubchDev
*sch
, VqInfoBlock
*info
,
201 VqInfoBlockLegacy
*linfo
)
203 VirtIODevice
*vdev
= virtio_ccw_get_vdev(sch
);
204 uint16_t index
= info
? info
->index
: linfo
->index
;
205 uint16_t num
= info
? info
->num
: linfo
->num
;
206 uint64_t desc
= info
? info
->desc
: linfo
->queue
;
208 if (index
>= VIRTIO_QUEUE_MAX
) {
212 /* Current code in virtio.c relies on 4K alignment. */
213 if (linfo
&& desc
&& (linfo
->align
!= 4096)) {
222 virtio_queue_set_rings(vdev
, index
, desc
, info
->avail
, info
->used
);
224 virtio_queue_set_addr(vdev
, index
, desc
);
227 virtio_queue_set_vector(vdev
, index
, VIRTIO_NO_VECTOR
);
230 /* virtio-1 allows changing the ring size. */
231 if (virtio_queue_get_max_num(vdev
, index
) < num
) {
232 /* Fail if we exceed the maximum number. */
235 virtio_queue_set_num(vdev
, index
, num
);
236 } else if (virtio_queue_get_num(vdev
, index
) > num
) {
237 /* Fail if we don't have a big enough queue. */
240 /* We ignore possible increased num for legacy for compatibility. */
241 virtio_queue_set_vector(vdev
, index
, index
);
243 /* tell notify handler in case of config change */
244 vdev
->config_vector
= VIRTIO_QUEUE_MAX
;
248 static void virtio_ccw_reset_virtio(VirtioCcwDevice
*dev
, VirtIODevice
*vdev
)
250 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
252 virtio_ccw_stop_ioeventfd(dev
);
254 if (dev
->indicators
) {
255 release_indicator(&dev
->routes
.adapter
, dev
->indicators
);
256 dev
->indicators
= NULL
;
258 if (dev
->indicators2
) {
259 release_indicator(&dev
->routes
.adapter
, dev
->indicators2
);
260 dev
->indicators2
= NULL
;
262 if (dev
->summary_indicator
) {
263 release_indicator(&dev
->routes
.adapter
, dev
->summary_indicator
);
264 dev
->summary_indicator
= NULL
;
266 ccw_dev
->sch
->thinint_active
= false;
269 static int virtio_ccw_handle_set_vq(SubchDev
*sch
, CCW1 ccw
, bool check_len
,
274 VqInfoBlockLegacy linfo
;
275 size_t info_len
= is_legacy
? sizeof(linfo
) : sizeof(info
);
278 if (ccw
.count
!= info_len
) {
281 } else if (ccw
.count
< info_len
) {
282 /* Can't execute command. */
289 ccw_dstream_read(&sch
->cds
, linfo
);
290 linfo
.queue
= be64_to_cpu(linfo
.queue
);
291 linfo
.align
= be32_to_cpu(linfo
.align
);
292 linfo
.index
= be16_to_cpu(linfo
.index
);
293 linfo
.num
= be16_to_cpu(linfo
.num
);
294 ret
= virtio_ccw_set_vqs(sch
, NULL
, &linfo
);
296 ccw_dstream_read(&sch
->cds
, info
);
297 info
.desc
= be64_to_cpu(info
.desc
);
298 info
.index
= be16_to_cpu(info
.index
);
299 info
.num
= be16_to_cpu(info
.num
);
300 info
.avail
= be64_to_cpu(info
.avail
);
301 info
.used
= be64_to_cpu(info
.used
);
302 ret
= virtio_ccw_set_vqs(sch
, &info
, NULL
);
304 sch
->curr_status
.scsw
.count
= 0;
308 static int virtio_ccw_cb(SubchDev
*sch
, CCW1 ccw
)
311 VirtioRevInfo revinfo
;
313 VirtioFeatDesc features
;
315 VqConfigBlock vq_config
;
316 VirtioCcwDevice
*dev
= sch
->driver_data
;
317 VirtIODevice
*vdev
= virtio_ccw_get_vdev(sch
);
320 VirtioThinintInfo thinint
;
326 trace_virtio_ccw_interpret_ccw(sch
->cssid
, sch
->ssid
, sch
->schid
,
328 check_len
= !((ccw
.flags
& CCW_FLAG_SLI
) && !(ccw
.flags
& CCW_FLAG_DC
));
330 if (dev
->force_revision_1
&& dev
->revision
< 0 &&
331 ccw
.cmd_code
!= CCW_CMD_SET_VIRTIO_REV
) {
333 * virtio-1 drivers must start with negotiating to a revision >= 1,
334 * so post a command reject for all other commands
339 /* Look at the command. */
340 switch (ccw
.cmd_code
) {
342 ret
= virtio_ccw_handle_set_vq(sch
, ccw
, check_len
, dev
->revision
< 1);
344 case CCW_CMD_VDEV_RESET
:
345 virtio_ccw_reset_virtio(dev
, vdev
);
348 case CCW_CMD_READ_FEAT
:
350 if (ccw
.count
!= sizeof(features
)) {
354 } else if (ccw
.count
< sizeof(features
)) {
355 /* Can't execute command. */
362 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
364 ccw_dstream_advance(&sch
->cds
, sizeof(features
.features
));
365 ccw_dstream_read(&sch
->cds
, features
.index
);
366 if (features
.index
== 0) {
367 if (dev
->revision
>= 1) {
368 /* Don't offer legacy features for modern devices. */
369 features
.features
= (uint32_t)
370 (vdev
->host_features
& ~vdc
->legacy_features
);
372 features
.features
= (uint32_t)vdev
->host_features
;
374 } else if ((features
.index
== 1) && (dev
->revision
>= 1)) {
376 * Only offer feature bits beyond 31 if the guest has
377 * negotiated at least revision 1.
379 features
.features
= (uint32_t)(vdev
->host_features
>> 32);
381 /* Return zeroes if the guest supports more feature bits. */
382 features
.features
= 0;
384 ccw_dstream_rewind(&sch
->cds
);
385 features
.features
= cpu_to_le32(features
.features
);
386 ccw_dstream_write(&sch
->cds
, features
.features
);
387 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(features
);
391 case CCW_CMD_WRITE_FEAT
:
393 if (ccw
.count
!= sizeof(features
)) {
397 } else if (ccw
.count
< sizeof(features
)) {
398 /* Can't execute command. */
405 ccw_dstream_read(&sch
->cds
, features
);
406 features
.features
= le32_to_cpu(features
.features
);
407 if (features
.index
== 0) {
408 virtio_set_features(vdev
,
409 (vdev
->guest_features
& 0xffffffff00000000ULL
) |
411 } else if ((features
.index
== 1) && (dev
->revision
>= 1)) {
413 * If the guest did not negotiate at least revision 1,
414 * we did not offer it any feature bits beyond 31. Such a
415 * guest passing us any bit here is therefore buggy.
417 virtio_set_features(vdev
,
418 (vdev
->guest_features
& 0x00000000ffffffffULL
) |
419 ((uint64_t)features
.features
<< 32));
422 * If the guest supports more feature bits, assert that it
423 * passes us zeroes for those we don't support.
425 if (features
.features
) {
426 qemu_log_mask(LOG_GUEST_ERROR
,
427 "Guest bug: features[%i]=%x (expected 0)",
428 features
.index
, features
.features
);
429 /* XXX: do a unit check here? */
432 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(features
);
436 case CCW_CMD_READ_CONF
:
438 if (ccw
.count
> vdev
->config_len
) {
443 len
= MIN(ccw
.count
, vdev
->config_len
);
447 virtio_bus_get_vdev_config(&dev
->bus
, vdev
->config
);
448 ccw_dstream_write_buf(&sch
->cds
, vdev
->config
, len
);
449 sch
->curr_status
.scsw
.count
= ccw
.count
- len
;
453 case CCW_CMD_WRITE_CONF
:
455 if (ccw
.count
> vdev
->config_len
) {
460 len
= MIN(ccw
.count
, vdev
->config_len
);
464 ret
= ccw_dstream_read_buf(&sch
->cds
, vdev
->config
, len
);
466 virtio_bus_set_vdev_config(&dev
->bus
, vdev
->config
);
467 sch
->curr_status
.scsw
.count
= ccw
.count
- len
;
471 case CCW_CMD_READ_STATUS
:
473 if (ccw
.count
!= sizeof(status
)) {
477 } else if (ccw
.count
< sizeof(status
)) {
478 /* Can't execute command. */
485 address_space_stb(&address_space_memory
, ccw
.cda
, vdev
->status
,
486 MEMTXATTRS_UNSPECIFIED
, NULL
);
487 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(vdev
->status
);
491 case CCW_CMD_WRITE_STATUS
:
493 if (ccw
.count
!= sizeof(status
)) {
497 } else if (ccw
.count
< sizeof(status
)) {
498 /* Can't execute command. */
505 ccw_dstream_read(&sch
->cds
, status
);
506 if (!(status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
507 virtio_ccw_stop_ioeventfd(dev
);
509 if (virtio_set_status(vdev
, status
) == 0) {
510 if (vdev
->status
== 0) {
511 virtio_ccw_reset_virtio(dev
, vdev
);
513 if (status
& VIRTIO_CONFIG_S_DRIVER_OK
) {
514 virtio_ccw_start_ioeventfd(dev
);
516 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(status
);
519 /* Trigger a command reject. */
524 case CCW_CMD_SET_IND
:
526 if (ccw
.count
!= sizeof(indicators
)) {
530 } else if (ccw
.count
< sizeof(indicators
)) {
531 /* Can't execute command. */
535 if (sch
->thinint_active
) {
536 /* Trigger a command reject. */
540 if (virtio_get_num_queues(vdev
) > NR_CLASSIC_INDICATOR_BITS
) {
541 /* More queues than indicator bits --> trigger a reject */
548 ccw_dstream_read(&sch
->cds
, indicators
);
549 indicators
= be64_to_cpu(indicators
);
550 dev
->indicators
= get_indicator(indicators
, sizeof(uint64_t));
551 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(indicators
);
555 case CCW_CMD_SET_CONF_IND
:
557 if (ccw
.count
!= sizeof(indicators
)) {
561 } else if (ccw
.count
< sizeof(indicators
)) {
562 /* Can't execute command. */
569 ccw_dstream_read(&sch
->cds
, indicators
);
570 indicators
= be64_to_cpu(indicators
);
571 dev
->indicators2
= get_indicator(indicators
, sizeof(uint64_t));
572 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(indicators
);
576 case CCW_CMD_READ_VQ_CONF
:
578 if (ccw
.count
!= sizeof(vq_config
)) {
582 } else if (ccw
.count
< sizeof(vq_config
)) {
583 /* Can't execute command. */
590 ccw_dstream_read(&sch
->cds
, vq_config
.index
);
591 vq_config
.index
= be16_to_cpu(vq_config
.index
);
592 if (vq_config
.index
>= VIRTIO_QUEUE_MAX
) {
596 vq_config
.num_max
= virtio_queue_get_num(vdev
,
598 vq_config
.num_max
= cpu_to_be16(vq_config
.num_max
);
599 ccw_dstream_write(&sch
->cds
, vq_config
.num_max
);
600 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(vq_config
);
604 case CCW_CMD_SET_IND_ADAPTER
:
606 if (ccw
.count
!= sizeof(thinint
)) {
610 } else if (ccw
.count
< sizeof(thinint
)) {
611 /* Can't execute command. */
617 } else if (dev
->indicators
&& !sch
->thinint_active
) {
618 /* Trigger a command reject. */
621 if (ccw_dstream_read(&sch
->cds
, thinint
)) {
624 thinint
.ind_bit
= be64_to_cpu(thinint
.ind_bit
);
625 thinint
.summary_indicator
=
626 be64_to_cpu(thinint
.summary_indicator
);
627 thinint
.device_indicator
=
628 be64_to_cpu(thinint
.device_indicator
);
630 dev
->summary_indicator
=
631 get_indicator(thinint
.summary_indicator
, sizeof(uint8_t));
633 get_indicator(thinint
.device_indicator
,
634 thinint
.ind_bit
/ 8 + 1);
635 dev
->thinint_isc
= thinint
.isc
;
636 dev
->routes
.adapter
.ind_offset
= thinint
.ind_bit
;
637 dev
->routes
.adapter
.summary_offset
= 7;
638 dev
->routes
.adapter
.adapter_id
= css_get_adapter_id(
639 CSS_IO_ADAPTER_VIRTIO
,
641 sch
->thinint_active
= ((dev
->indicators
!= NULL
) &&
642 (dev
->summary_indicator
!= NULL
));
643 sch
->curr_status
.scsw
.count
= ccw
.count
- sizeof(thinint
);
648 case CCW_CMD_SET_VIRTIO_REV
:
649 len
= sizeof(revinfo
);
650 if (ccw
.count
< len
) {
658 ccw_dstream_read_buf(&sch
->cds
, &revinfo
, 4);
659 revinfo
.revision
= be16_to_cpu(revinfo
.revision
);
660 revinfo
.length
= be16_to_cpu(revinfo
.length
);
661 if (ccw
.count
< len
+ revinfo
.length
||
662 (check_len
&& ccw
.count
> len
+ revinfo
.length
)) {
667 * Once we start to support revisions with additional data, we'll
668 * need to fetch it here. Nothing to do for now, though.
670 if (dev
->revision
>= 0 ||
671 revinfo
.revision
> virtio_ccw_rev_max(dev
) ||
672 (dev
->force_revision_1
&& !revinfo
.revision
)) {
677 dev
->revision
= revinfo
.revision
;
686 static void virtio_sch_disable_cb(SubchDev
*sch
)
688 VirtioCcwDevice
*dev
= sch
->driver_data
;
693 static void virtio_ccw_device_realize(VirtioCcwDevice
*dev
, Error
**errp
)
695 VirtIOCCWDeviceClass
*k
= VIRTIO_CCW_DEVICE_GET_CLASS(dev
);
696 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
697 CCWDeviceClass
*ck
= CCW_DEVICE_GET_CLASS(ccw_dev
);
701 sch
= css_create_sch(ccw_dev
->devno
, errp
);
705 if (!virtio_ccw_rev_max(dev
) && dev
->force_revision_1
) {
706 error_setg(&err
, "Invalid value of property max_rev "
707 "(is %d expected >= 1)", virtio_ccw_rev_max(dev
));
711 sch
->driver_data
= dev
;
712 sch
->ccw_cb
= virtio_ccw_cb
;
713 sch
->disable_cb
= virtio_sch_disable_cb
;
714 sch
->id
.reserved
= 0xff;
715 sch
->id
.cu_type
= VIRTIO_CCW_CU_TYPE
;
716 sch
->do_subchannel_work
= do_subchannel_work_virtual
;
718 dev
->indicators
= NULL
;
720 css_sch_build_virtual_schib(sch
, 0, VIRTIO_CCW_CHPID_TYPE
);
722 trace_virtio_ccw_new_device(
723 sch
->cssid
, sch
->ssid
, sch
->schid
, sch
->devno
,
724 ccw_dev
->devno
.valid
? "user-configured" : "auto-configured");
726 if (kvm_enabled() && !kvm_eventfds_enabled()) {
727 dev
->flags
&= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD
;
731 k
->realize(dev
, &err
);
737 ck
->realize(ccw_dev
, &err
);
745 error_propagate(errp
, err
);
746 css_subch_assign(sch
->cssid
, sch
->ssid
, sch
->schid
, sch
->devno
, NULL
);
751 static void virtio_ccw_device_unrealize(VirtioCcwDevice
*dev
, Error
**errp
)
753 VirtIOCCWDeviceClass
*dc
= VIRTIO_CCW_DEVICE_GET_CLASS(dev
);
754 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
755 SubchDev
*sch
= ccw_dev
->sch
;
758 dc
->unrealize(dev
, errp
);
762 css_subch_assign(sch
->cssid
, sch
->ssid
, sch
->schid
, sch
->devno
, NULL
);
766 if (dev
->indicators
) {
767 release_indicator(&dev
->routes
.adapter
, dev
->indicators
);
768 dev
->indicators
= NULL
;
772 /* DeviceState to VirtioCcwDevice. Note: used on datapath,
773 * be careful and test performance if you change this.
775 static inline VirtioCcwDevice
*to_virtio_ccw_dev_fast(DeviceState
*d
)
777 CcwDevice
*ccw_dev
= to_ccw_dev_fast(d
);
779 return container_of(ccw_dev
, VirtioCcwDevice
, parent_obj
);
782 static uint8_t virtio_set_ind_atomic(SubchDev
*sch
, uint64_t ind_loc
,
785 uint8_t ind_old
, ind_new
;
789 ind_addr
= cpu_physical_memory_map(ind_loc
, &len
, 1);
791 error_report("%s(%x.%x.%04x): unable to access indicator",
792 __func__
, sch
->cssid
, sch
->ssid
, sch
->schid
);
797 ind_new
= ind_old
| to_be_set
;
798 } while (atomic_cmpxchg(ind_addr
, ind_old
, ind_new
) != ind_old
);
799 trace_virtio_ccw_set_ind(ind_loc
, ind_old
, ind_new
);
800 cpu_physical_memory_unmap(ind_addr
, len
, 1, len
);
805 static void virtio_ccw_notify(DeviceState
*d
, uint16_t vector
)
807 VirtioCcwDevice
*dev
= to_virtio_ccw_dev_fast(d
);
808 CcwDevice
*ccw_dev
= to_ccw_dev_fast(d
);
809 SubchDev
*sch
= ccw_dev
->sch
;
812 if (vector
== VIRTIO_NO_VECTOR
) {
816 * vector < VIRTIO_QUEUE_MAX: notification for a virtqueue
817 * vector == VIRTIO_QUEUE_MAX: configuration change notification
818 * bits beyond that are unused and should never be notified for
820 assert(vector
<= VIRTIO_QUEUE_MAX
);
822 if (vector
< VIRTIO_QUEUE_MAX
) {
823 if (!dev
->indicators
) {
826 if (sch
->thinint_active
) {
828 * In the adapter interrupt case, indicators points to a
829 * memory area that may be (way) larger than 64 bit and
830 * ind_bit indicates the start of the indicators in a big
833 uint64_t ind_bit
= dev
->routes
.adapter
.ind_offset
;
835 virtio_set_ind_atomic(sch
, dev
->indicators
->addr
+
836 (ind_bit
+ vector
) / 8,
837 0x80 >> ((ind_bit
+ vector
) % 8));
838 if (!virtio_set_ind_atomic(sch
, dev
->summary_indicator
->addr
,
840 css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO
, dev
->thinint_isc
);
843 assert(vector
< NR_CLASSIC_INDICATOR_BITS
);
844 indicators
= address_space_ldq(&address_space_memory
,
845 dev
->indicators
->addr
,
846 MEMTXATTRS_UNSPECIFIED
,
848 indicators
|= 1ULL << vector
;
849 address_space_stq(&address_space_memory
, dev
->indicators
->addr
,
850 indicators
, MEMTXATTRS_UNSPECIFIED
, NULL
);
851 css_conditional_io_interrupt(sch
);
854 if (!dev
->indicators2
) {
857 indicators
= address_space_ldq(&address_space_memory
,
858 dev
->indicators2
->addr
,
859 MEMTXATTRS_UNSPECIFIED
,
862 address_space_stq(&address_space_memory
, dev
->indicators2
->addr
,
863 indicators
, MEMTXATTRS_UNSPECIFIED
, NULL
);
864 css_conditional_io_interrupt(sch
);
868 static void virtio_ccw_reset(DeviceState
*d
)
870 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
871 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
872 VirtIOCCWDeviceClass
*vdc
= VIRTIO_CCW_DEVICE_GET_CLASS(dev
);
874 virtio_ccw_reset_virtio(dev
, vdev
);
875 if (vdc
->parent_reset
) {
876 vdc
->parent_reset(d
);
880 static void virtio_ccw_vmstate_change(DeviceState
*d
, bool running
)
882 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
885 virtio_ccw_start_ioeventfd(dev
);
887 virtio_ccw_stop_ioeventfd(dev
);
891 static bool virtio_ccw_query_guest_notifiers(DeviceState
*d
)
893 CcwDevice
*dev
= CCW_DEVICE(d
);
895 return !!(dev
->sch
->curr_status
.pmcw
.flags
& PMCW_FLAGS_MASK_ENA
);
898 static int virtio_ccw_get_mappings(VirtioCcwDevice
*dev
)
901 CcwDevice
*ccw_dev
= CCW_DEVICE(dev
);
903 if (!ccw_dev
->sch
->thinint_active
) {
907 r
= map_indicator(&dev
->routes
.adapter
, dev
->summary_indicator
);
911 r
= map_indicator(&dev
->routes
.adapter
, dev
->indicators
);
915 dev
->routes
.adapter
.summary_addr
= dev
->summary_indicator
->map
;
916 dev
->routes
.adapter
.ind_addr
= dev
->indicators
->map
;
921 static int virtio_ccw_setup_irqroutes(VirtioCcwDevice
*dev
, int nvqs
)
924 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
926 S390FLICState
*fs
= s390_get_flic();
927 S390FLICStateClass
*fsc
= s390_get_flic_class(fs
);
929 ret
= virtio_ccw_get_mappings(dev
);
933 for (i
= 0; i
< nvqs
; i
++) {
934 if (!virtio_queue_get_num(vdev
, i
)) {
938 dev
->routes
.num_routes
= i
;
939 return fsc
->add_adapter_routes(fs
, &dev
->routes
);
942 static void virtio_ccw_release_irqroutes(VirtioCcwDevice
*dev
, int nvqs
)
944 S390FLICState
*fs
= s390_get_flic();
945 S390FLICStateClass
*fsc
= s390_get_flic_class(fs
);
947 fsc
->release_adapter_routes(fs
, &dev
->routes
);
950 static int virtio_ccw_add_irqfd(VirtioCcwDevice
*dev
, int n
)
952 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
953 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
954 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
956 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
, notifier
, NULL
,
960 static void virtio_ccw_remove_irqfd(VirtioCcwDevice
*dev
, int n
)
962 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
963 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
964 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
967 ret
= kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, notifier
,
972 static int virtio_ccw_set_guest_notifier(VirtioCcwDevice
*dev
, int n
,
973 bool assign
, bool with_irqfd
)
975 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
976 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
977 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
978 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
981 int r
= event_notifier_init(notifier
, 0);
986 virtio_queue_set_guest_notifier_fd_handler(vq
, true, with_irqfd
);
988 r
= virtio_ccw_add_irqfd(dev
, n
);
990 virtio_queue_set_guest_notifier_fd_handler(vq
, false,
996 * We do not support individual masking for channel devices, so we
997 * need to manually trigger any guest masking callbacks here.
999 if (k
->guest_notifier_mask
&& vdev
->use_guest_notifier_mask
) {
1000 k
->guest_notifier_mask(vdev
, n
, false);
1002 /* get lost events and re-inject */
1003 if (k
->guest_notifier_pending
&&
1004 k
->guest_notifier_pending(vdev
, n
)) {
1005 event_notifier_set(notifier
);
1008 if (k
->guest_notifier_mask
&& vdev
->use_guest_notifier_mask
) {
1009 k
->guest_notifier_mask(vdev
, n
, true);
1012 virtio_ccw_remove_irqfd(dev
, n
);
1014 virtio_queue_set_guest_notifier_fd_handler(vq
, false, with_irqfd
);
1015 event_notifier_cleanup(notifier
);
1020 static int virtio_ccw_set_guest_notifiers(DeviceState
*d
, int nvqs
,
1023 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1024 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1025 CcwDevice
*ccw_dev
= CCW_DEVICE(d
);
1026 bool with_irqfd
= ccw_dev
->sch
->thinint_active
&& kvm_irqfds_enabled();
1029 if (with_irqfd
&& assigned
) {
1030 /* irq routes need to be set up before assigning irqfds */
1031 r
= virtio_ccw_setup_irqroutes(dev
, nvqs
);
1033 goto irqroute_error
;
1036 for (n
= 0; n
< nvqs
; n
++) {
1037 if (!virtio_queue_get_num(vdev
, n
)) {
1040 r
= virtio_ccw_set_guest_notifier(dev
, n
, assigned
, with_irqfd
);
1045 if (with_irqfd
&& !assigned
) {
1046 /* release irq routes after irqfds have been released */
1047 virtio_ccw_release_irqroutes(dev
, nvqs
);
1053 virtio_ccw_set_guest_notifier(dev
, n
, !assigned
, false);
1056 if (with_irqfd
&& assigned
) {
1057 virtio_ccw_release_irqroutes(dev
, nvqs
);
1062 static void virtio_ccw_save_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
1064 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1065 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1067 qemu_put_be16(f
, virtio_queue_vector(vdev
, n
));
1070 static int virtio_ccw_load_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
1072 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1073 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1076 qemu_get_be16s(f
, &vector
);
1077 virtio_queue_set_vector(vdev
, n
, vector
);
1082 static void virtio_ccw_save_config(DeviceState
*d
, QEMUFile
*f
)
1084 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1085 vmstate_save_state(f
, &vmstate_virtio_ccw_dev
, dev
, NULL
);
1088 static int virtio_ccw_load_config(DeviceState
*d
, QEMUFile
*f
)
1090 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1091 return vmstate_load_state(f
, &vmstate_virtio_ccw_dev
, dev
, 1);
1094 static void virtio_ccw_pre_plugged(DeviceState
*d
, Error
**errp
)
1096 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1097 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1099 if (dev
->max_rev
>= 1) {
1100 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_VERSION_1
);
1104 /* This is called by virtio-bus just after the device is plugged. */
1105 static void virtio_ccw_device_plugged(DeviceState
*d
, Error
**errp
)
1107 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1108 VirtIODevice
*vdev
= virtio_bus_get_device(&dev
->bus
);
1109 CcwDevice
*ccw_dev
= CCW_DEVICE(d
);
1110 SubchDev
*sch
= ccw_dev
->sch
;
1111 int n
= virtio_get_num_queues(vdev
);
1112 S390FLICState
*flic
= s390_get_flic();
1114 if (!virtio_has_feature(vdev
->host_features
, VIRTIO_F_VERSION_1
)) {
1118 if (virtio_get_num_queues(vdev
) > VIRTIO_QUEUE_MAX
) {
1119 error_setg(errp
, "The number of virtqueues %d "
1120 "exceeds virtio limit %d", n
,
1124 if (virtio_get_num_queues(vdev
) > flic
->adapter_routes_max_batch
) {
1125 error_setg(errp
, "The number of virtqueues %d "
1126 "exceeds flic adapter route limit %d", n
,
1127 flic
->adapter_routes_max_batch
);
1131 sch
->id
.cu_model
= virtio_bus_get_vdev_id(&dev
->bus
);
1134 css_generate_sch_crws(sch
->cssid
, sch
->ssid
, sch
->schid
,
1138 static void virtio_ccw_device_unplugged(DeviceState
*d
)
1140 VirtioCcwDevice
*dev
= VIRTIO_CCW_DEVICE(d
);
1142 virtio_ccw_stop_ioeventfd(dev
);
1144 /**************** Virtio-ccw Bus Device Descriptions *******************/
1146 static void virtio_ccw_busdev_realize(DeviceState
*dev
, Error
**errp
)
1148 VirtioCcwDevice
*_dev
= (VirtioCcwDevice
*)dev
;
1150 virtio_ccw_bus_new(&_dev
->bus
, sizeof(_dev
->bus
), _dev
);
1151 virtio_ccw_device_realize(_dev
, errp
);
1154 static void virtio_ccw_busdev_unrealize(DeviceState
*dev
, Error
**errp
)
1156 VirtioCcwDevice
*_dev
= (VirtioCcwDevice
*)dev
;
1158 virtio_ccw_device_unrealize(_dev
, errp
);
1161 static void virtio_ccw_busdev_unplug(HotplugHandler
*hotplug_dev
,
1162 DeviceState
*dev
, Error
**errp
)
1164 VirtioCcwDevice
*_dev
= to_virtio_ccw_dev_fast(dev
);
1166 virtio_ccw_stop_ioeventfd(_dev
);
1169 static void virtio_ccw_device_class_init(ObjectClass
*klass
, void *data
)
1171 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1172 CCWDeviceClass
*k
= CCW_DEVICE_CLASS(dc
);
1173 VirtIOCCWDeviceClass
*vdc
= VIRTIO_CCW_DEVICE_CLASS(klass
);
1175 k
->unplug
= virtio_ccw_busdev_unplug
;
1176 dc
->realize
= virtio_ccw_busdev_realize
;
1177 dc
->unrealize
= virtio_ccw_busdev_unrealize
;
1178 dc
->bus_type
= TYPE_VIRTUAL_CSS_BUS
;
1179 device_class_set_parent_reset(dc
, virtio_ccw_reset
, &vdc
->parent_reset
);
1182 static const TypeInfo virtio_ccw_device_info
= {
1183 .name
= TYPE_VIRTIO_CCW_DEVICE
,
1184 .parent
= TYPE_CCW_DEVICE
,
1185 .instance_size
= sizeof(VirtioCcwDevice
),
1186 .class_init
= virtio_ccw_device_class_init
,
1187 .class_size
= sizeof(VirtIOCCWDeviceClass
),
1191 /* virtio-ccw-bus */
1193 static void virtio_ccw_bus_new(VirtioBusState
*bus
, size_t bus_size
,
1194 VirtioCcwDevice
*dev
)
1196 DeviceState
*qdev
= DEVICE(dev
);
1197 char virtio_bus_name
[] = "virtio-bus";
1199 qbus_create_inplace(bus
, bus_size
, TYPE_VIRTIO_CCW_BUS
,
1200 qdev
, virtio_bus_name
);
1203 static void virtio_ccw_bus_class_init(ObjectClass
*klass
, void *data
)
1205 VirtioBusClass
*k
= VIRTIO_BUS_CLASS(klass
);
1206 BusClass
*bus_class
= BUS_CLASS(klass
);
1208 bus_class
->max_dev
= 1;
1209 k
->notify
= virtio_ccw_notify
;
1210 k
->vmstate_change
= virtio_ccw_vmstate_change
;
1211 k
->query_guest_notifiers
= virtio_ccw_query_guest_notifiers
;
1212 k
->set_guest_notifiers
= virtio_ccw_set_guest_notifiers
;
1213 k
->save_queue
= virtio_ccw_save_queue
;
1214 k
->load_queue
= virtio_ccw_load_queue
;
1215 k
->save_config
= virtio_ccw_save_config
;
1216 k
->load_config
= virtio_ccw_load_config
;
1217 k
->pre_plugged
= virtio_ccw_pre_plugged
;
1218 k
->device_plugged
= virtio_ccw_device_plugged
;
1219 k
->device_unplugged
= virtio_ccw_device_unplugged
;
1220 k
->ioeventfd_enabled
= virtio_ccw_ioeventfd_enabled
;
1221 k
->ioeventfd_assign
= virtio_ccw_ioeventfd_assign
;
1224 static const TypeInfo virtio_ccw_bus_info
= {
1225 .name
= TYPE_VIRTIO_CCW_BUS
,
1226 .parent
= TYPE_VIRTIO_BUS
,
1227 .instance_size
= sizeof(VirtioCcwBusState
),
1228 .class_init
= virtio_ccw_bus_class_init
,
1231 static void virtio_ccw_register(void)
1233 type_register_static(&virtio_ccw_bus_info
);
1234 type_register_static(&virtio_ccw_device_info
);
1237 type_init(virtio_ccw_register
)