s390x/css: update css_adapter_interrupt
[qemu/ar7.git] / hw / s390x / css.c
blob1aed89fd85255a5b19d6ee0e8ee342c995b31088
1 /*
2 * Channel subsystem base support.
4 * Copyright 2012 IBM Corp.
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
7 * This work is licensed under the terms of the GNU GPL, version 2 or (at
8 * your option) any later version. See the COPYING file in the top-level
9 * directory.
12 #include "qemu/osdep.h"
13 #include "qapi/error.h"
14 #include "qapi/visitor.h"
15 #include "hw/qdev.h"
16 #include "qemu/error-report.h"
17 #include "qemu/bitops.h"
18 #include "qemu/error-report.h"
19 #include "exec/address-spaces.h"
20 #include "cpu.h"
21 #include "hw/s390x/ioinst.h"
22 #include "hw/s390x/css.h"
23 #include "trace.h"
24 #include "hw/s390x/s390_flic.h"
25 #include "hw/s390x/s390-virtio-ccw.h"
27 typedef struct CrwContainer {
28 CRW crw;
29 QTAILQ_ENTRY(CrwContainer) sibling;
30 } CrwContainer;
32 typedef struct ChpInfo {
33 uint8_t in_use;
34 uint8_t type;
35 uint8_t is_virtual;
36 } ChpInfo;
38 typedef struct SubchSet {
39 SubchDev *sch[MAX_SCHID + 1];
40 unsigned long schids_used[BITS_TO_LONGS(MAX_SCHID + 1)];
41 unsigned long devnos_used[BITS_TO_LONGS(MAX_SCHID + 1)];
42 } SubchSet;
44 static const VMStateDescription vmstate_scsw = {
45 .name = "s390_scsw",
46 .version_id = 1,
47 .minimum_version_id = 1,
48 .fields = (VMStateField[]) {
49 VMSTATE_UINT16(flags, SCSW),
50 VMSTATE_UINT16(ctrl, SCSW),
51 VMSTATE_UINT32(cpa, SCSW),
52 VMSTATE_UINT8(dstat, SCSW),
53 VMSTATE_UINT8(cstat, SCSW),
54 VMSTATE_UINT16(count, SCSW),
55 VMSTATE_END_OF_LIST()
59 static const VMStateDescription vmstate_pmcw = {
60 .name = "s390_pmcw",
61 .version_id = 1,
62 .minimum_version_id = 1,
63 .fields = (VMStateField[]) {
64 VMSTATE_UINT32(intparm, PMCW),
65 VMSTATE_UINT16(flags, PMCW),
66 VMSTATE_UINT16(devno, PMCW),
67 VMSTATE_UINT8(lpm, PMCW),
68 VMSTATE_UINT8(pnom, PMCW),
69 VMSTATE_UINT8(lpum, PMCW),
70 VMSTATE_UINT8(pim, PMCW),
71 VMSTATE_UINT16(mbi, PMCW),
72 VMSTATE_UINT8(pom, PMCW),
73 VMSTATE_UINT8(pam, PMCW),
74 VMSTATE_UINT8_ARRAY(chpid, PMCW, 8),
75 VMSTATE_UINT32(chars, PMCW),
76 VMSTATE_END_OF_LIST()
80 static const VMStateDescription vmstate_schib = {
81 .name = "s390_schib",
82 .version_id = 1,
83 .minimum_version_id = 1,
84 .fields = (VMStateField[]) {
85 VMSTATE_STRUCT(pmcw, SCHIB, 0, vmstate_pmcw, PMCW),
86 VMSTATE_STRUCT(scsw, SCHIB, 0, vmstate_scsw, SCSW),
87 VMSTATE_UINT64(mba, SCHIB),
88 VMSTATE_UINT8_ARRAY(mda, SCHIB, 4),
89 VMSTATE_END_OF_LIST()
94 static const VMStateDescription vmstate_ccw1 = {
95 .name = "s390_ccw1",
96 .version_id = 1,
97 .minimum_version_id = 1,
98 .fields = (VMStateField[]) {
99 VMSTATE_UINT8(cmd_code, CCW1),
100 VMSTATE_UINT8(flags, CCW1),
101 VMSTATE_UINT16(count, CCW1),
102 VMSTATE_UINT32(cda, CCW1),
103 VMSTATE_END_OF_LIST()
107 static const VMStateDescription vmstate_ciw = {
108 .name = "s390_ciw",
109 .version_id = 1,
110 .minimum_version_id = 1,
111 .fields = (VMStateField[]) {
112 VMSTATE_UINT8(type, CIW),
113 VMSTATE_UINT8(command, CIW),
114 VMSTATE_UINT16(count, CIW),
115 VMSTATE_END_OF_LIST()
119 static const VMStateDescription vmstate_sense_id = {
120 .name = "s390_sense_id",
121 .version_id = 1,
122 .minimum_version_id = 1,
123 .fields = (VMStateField[]) {
124 VMSTATE_UINT8(reserved, SenseId),
125 VMSTATE_UINT16(cu_type, SenseId),
126 VMSTATE_UINT8(cu_model, SenseId),
127 VMSTATE_UINT16(dev_type, SenseId),
128 VMSTATE_UINT8(dev_model, SenseId),
129 VMSTATE_UINT8(unused, SenseId),
130 VMSTATE_STRUCT_ARRAY(ciw, SenseId, MAX_CIWS, 0, vmstate_ciw, CIW),
131 VMSTATE_END_OF_LIST()
135 static int subch_dev_post_load(void *opaque, int version_id);
136 static void subch_dev_pre_save(void *opaque);
138 const char err_hint_devno[] = "Devno mismatch, tried to load wrong section!"
139 " Likely reason: some sequences of plug and unplug can break"
140 " migration for machine versions prior to 2.7 (known design flaw).";
142 const VMStateDescription vmstate_subch_dev = {
143 .name = "s390_subch_dev",
144 .version_id = 1,
145 .minimum_version_id = 1,
146 .post_load = subch_dev_post_load,
147 .pre_save = subch_dev_pre_save,
148 .fields = (VMStateField[]) {
149 VMSTATE_UINT8_EQUAL(cssid, SubchDev, "Bug!"),
150 VMSTATE_UINT8_EQUAL(ssid, SubchDev, "Bug!"),
151 VMSTATE_UINT16(migrated_schid, SubchDev),
152 VMSTATE_UINT16_EQUAL(devno, SubchDev, err_hint_devno),
153 VMSTATE_BOOL(thinint_active, SubchDev),
154 VMSTATE_STRUCT(curr_status, SubchDev, 0, vmstate_schib, SCHIB),
155 VMSTATE_UINT8_ARRAY(sense_data, SubchDev, 32),
156 VMSTATE_UINT64(channel_prog, SubchDev),
157 VMSTATE_STRUCT(last_cmd, SubchDev, 0, vmstate_ccw1, CCW1),
158 VMSTATE_BOOL(last_cmd_valid, SubchDev),
159 VMSTATE_STRUCT(id, SubchDev, 0, vmstate_sense_id, SenseId),
160 VMSTATE_BOOL(ccw_fmt_1, SubchDev),
161 VMSTATE_UINT8(ccw_no_data_cnt, SubchDev),
162 VMSTATE_END_OF_LIST()
166 typedef struct IndAddrPtrTmp {
167 IndAddr **parent;
168 uint64_t addr;
169 int32_t len;
170 } IndAddrPtrTmp;
172 static int post_load_ind_addr(void *opaque, int version_id)
174 IndAddrPtrTmp *ptmp = opaque;
175 IndAddr **ind_addr = ptmp->parent;
177 if (ptmp->len != 0) {
178 *ind_addr = get_indicator(ptmp->addr, ptmp->len);
179 } else {
180 *ind_addr = NULL;
182 return 0;
185 static void pre_save_ind_addr(void *opaque)
187 IndAddrPtrTmp *ptmp = opaque;
188 IndAddr *ind_addr = *(ptmp->parent);
190 if (ind_addr != NULL) {
191 ptmp->len = ind_addr->len;
192 ptmp->addr = ind_addr->addr;
193 } else {
194 ptmp->len = 0;
195 ptmp->addr = 0L;
199 const VMStateDescription vmstate_ind_addr_tmp = {
200 .name = "s390_ind_addr_tmp",
201 .pre_save = pre_save_ind_addr,
202 .post_load = post_load_ind_addr,
204 .fields = (VMStateField[]) {
205 VMSTATE_INT32(len, IndAddrPtrTmp),
206 VMSTATE_UINT64(addr, IndAddrPtrTmp),
207 VMSTATE_END_OF_LIST()
211 const VMStateDescription vmstate_ind_addr = {
212 .name = "s390_ind_addr_tmp",
213 .fields = (VMStateField[]) {
214 VMSTATE_WITH_TMP(IndAddr*, IndAddrPtrTmp, vmstate_ind_addr_tmp),
215 VMSTATE_END_OF_LIST()
219 typedef struct CssImage {
220 SubchSet *sch_set[MAX_SSID + 1];
221 ChpInfo chpids[MAX_CHPID + 1];
222 } CssImage;
224 typedef struct IoAdapter {
225 uint32_t id;
226 uint8_t type;
227 uint8_t isc;
228 uint8_t flags;
229 } IoAdapter;
231 typedef struct ChannelSubSys {
232 QTAILQ_HEAD(, CrwContainer) pending_crws;
233 bool sei_pending;
234 bool do_crw_mchk;
235 bool crws_lost;
236 uint8_t max_cssid;
237 uint8_t max_ssid;
238 bool chnmon_active;
239 uint64_t chnmon_area;
240 CssImage *css[MAX_CSSID + 1];
241 uint8_t default_cssid;
242 IoAdapter *io_adapters[CSS_IO_ADAPTER_TYPE_NUMS][MAX_ISC + 1];
243 QTAILQ_HEAD(, IndAddr) indicator_addresses;
244 } ChannelSubSys;
246 static ChannelSubSys channel_subsys = {
247 .pending_crws = QTAILQ_HEAD_INITIALIZER(channel_subsys.pending_crws),
248 .do_crw_mchk = true,
249 .sei_pending = false,
250 .do_crw_mchk = true,
251 .crws_lost = false,
252 .chnmon_active = false,
253 .indicator_addresses =
254 QTAILQ_HEAD_INITIALIZER(channel_subsys.indicator_addresses),
257 static void subch_dev_pre_save(void *opaque)
259 SubchDev *s = opaque;
261 /* Prepare remote_schid for save */
262 s->migrated_schid = s->schid;
265 static int subch_dev_post_load(void *opaque, int version_id)
268 SubchDev *s = opaque;
270 /* Re-assign the subchannel to remote_schid if necessary */
271 if (s->migrated_schid != s->schid) {
272 if (css_find_subch(true, s->cssid, s->ssid, s->schid) == s) {
274 * Cleanup the slot before moving to s->migrated_schid provided
275 * it still belongs to us, i.e. it was not changed by previous
276 * invocation of this function.
278 css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, NULL);
280 /* It's OK to re-assign without a prior de-assign. */
281 s->schid = s->migrated_schid;
282 css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, s);
286 * Hack alert. If we don't migrate the channel subsystem status
287 * we still need to find out if the guest enabled mss/mcss-e.
288 * If the subchannel is enabled, it certainly was able to access it,
289 * so adjust the max_ssid/max_cssid values for relevant ssid/cssid
290 * values. This is not watertight, but better than nothing.
292 if (s->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA) {
293 if (s->ssid) {
294 channel_subsys.max_ssid = MAX_SSID;
296 if (s->cssid != channel_subsys.default_cssid) {
297 channel_subsys.max_cssid = MAX_CSSID;
300 return 0;
303 IndAddr *get_indicator(hwaddr ind_addr, int len)
305 IndAddr *indicator;
307 QTAILQ_FOREACH(indicator, &channel_subsys.indicator_addresses, sibling) {
308 if (indicator->addr == ind_addr) {
309 indicator->refcnt++;
310 return indicator;
313 indicator = g_new0(IndAddr, 1);
314 indicator->addr = ind_addr;
315 indicator->len = len;
316 indicator->refcnt = 1;
317 QTAILQ_INSERT_TAIL(&channel_subsys.indicator_addresses,
318 indicator, sibling);
319 return indicator;
322 static int s390_io_adapter_map(AdapterInfo *adapter, uint64_t map_addr,
323 bool do_map)
325 S390FLICState *fs = s390_get_flic();
326 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
328 return fsc->io_adapter_map(fs, adapter->adapter_id, map_addr, do_map);
331 void release_indicator(AdapterInfo *adapter, IndAddr *indicator)
333 assert(indicator->refcnt > 0);
334 indicator->refcnt--;
335 if (indicator->refcnt > 0) {
336 return;
338 QTAILQ_REMOVE(&channel_subsys.indicator_addresses, indicator, sibling);
339 if (indicator->map) {
340 s390_io_adapter_map(adapter, indicator->map, false);
342 g_free(indicator);
345 int map_indicator(AdapterInfo *adapter, IndAddr *indicator)
347 int ret;
349 if (indicator->map) {
350 return 0; /* already mapped is not an error */
352 indicator->map = indicator->addr;
353 ret = s390_io_adapter_map(adapter, indicator->map, true);
354 if ((ret != 0) && (ret != -ENOSYS)) {
355 goto out_err;
357 return 0;
359 out_err:
360 indicator->map = 0;
361 return ret;
364 int css_create_css_image(uint8_t cssid, bool default_image)
366 trace_css_new_image(cssid, default_image ? "(default)" : "");
367 /* 255 is reserved */
368 if (cssid == 255) {
369 return -EINVAL;
371 if (channel_subsys.css[cssid]) {
372 return -EBUSY;
374 channel_subsys.css[cssid] = g_malloc0(sizeof(CssImage));
375 if (default_image) {
376 channel_subsys.default_cssid = cssid;
378 return 0;
381 uint32_t css_get_adapter_id(CssIoAdapterType type, uint8_t isc)
383 if (type >= CSS_IO_ADAPTER_TYPE_NUMS || isc > MAX_ISC ||
384 !channel_subsys.io_adapters[type][isc]) {
385 return -1;
388 return channel_subsys.io_adapters[type][isc]->id;
392 * css_register_io_adapters: Register I/O adapters per ISC during init
394 * @swap: an indication if byte swap is needed.
395 * @maskable: an indication if the adapter is subject to the mask operation.
396 * @flags: further characteristics of the adapter.
397 * e.g. suppressible, an indication if the adapter is subject to AIS.
398 * @errp: location to store error information.
400 void css_register_io_adapters(CssIoAdapterType type, bool swap, bool maskable,
401 uint8_t flags, Error **errp)
403 uint32_t id;
404 int ret, isc;
405 IoAdapter *adapter;
406 S390FLICState *fs = s390_get_flic();
407 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
410 * Disallow multiple registrations for the same device type.
411 * Report an error if registering for an already registered type.
413 if (channel_subsys.io_adapters[type][0]) {
414 error_setg(errp, "Adapters for type %d already registered", type);
417 for (isc = 0; isc <= MAX_ISC; isc++) {
418 id = (type << 3) | isc;
419 ret = fsc->register_io_adapter(fs, id, isc, swap, maskable, flags);
420 if (ret == 0) {
421 adapter = g_new0(IoAdapter, 1);
422 adapter->id = id;
423 adapter->isc = isc;
424 adapter->type = type;
425 adapter->flags = flags;
426 channel_subsys.io_adapters[type][isc] = adapter;
427 } else {
428 error_setg_errno(errp, -ret, "Unexpected error %d when "
429 "registering adapter %d", ret, id);
430 break;
435 * No need to free registered adapters in kvm: kvm will clean up
436 * when the machine goes away.
438 if (ret) {
439 for (isc--; isc >= 0; isc--) {
440 g_free(channel_subsys.io_adapters[type][isc]);
441 channel_subsys.io_adapters[type][isc] = NULL;
447 static void css_clear_io_interrupt(uint16_t subchannel_id,
448 uint16_t subchannel_nr)
450 Error *err = NULL;
451 static bool no_clear_irq;
452 S390FLICState *fs = s390_get_flic();
453 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
454 int r;
456 if (unlikely(no_clear_irq)) {
457 return;
459 r = fsc->clear_io_irq(fs, subchannel_id, subchannel_nr);
460 switch (r) {
461 case 0:
462 break;
463 case -ENOSYS:
464 no_clear_irq = true;
466 * Ignore unavailability, as the user can't do anything
467 * about it anyway.
469 break;
470 default:
471 error_setg_errno(&err, -r, "unexpected error condition");
472 error_propagate(&error_abort, err);
476 static inline uint16_t css_do_build_subchannel_id(uint8_t cssid, uint8_t ssid)
478 if (channel_subsys.max_cssid > 0) {
479 return (cssid << 8) | (1 << 3) | (ssid << 1) | 1;
481 return (ssid << 1) | 1;
484 uint16_t css_build_subchannel_id(SubchDev *sch)
486 return css_do_build_subchannel_id(sch->cssid, sch->ssid);
489 void css_inject_io_interrupt(SubchDev *sch)
491 uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
493 trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
494 sch->curr_status.pmcw.intparm, isc, "");
495 s390_io_interrupt(css_build_subchannel_id(sch),
496 sch->schid,
497 sch->curr_status.pmcw.intparm,
498 isc << 27);
501 void css_conditional_io_interrupt(SubchDev *sch)
504 * If the subchannel is not currently status pending, make it pending
505 * with alert status.
507 if (!(sch->curr_status.scsw.ctrl & SCSW_STCTL_STATUS_PEND)) {
508 uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
510 trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
511 sch->curr_status.pmcw.intparm, isc,
512 "(unsolicited)");
513 sch->curr_status.scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
514 sch->curr_status.scsw.ctrl |=
515 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
516 /* Inject an I/O interrupt. */
517 s390_io_interrupt(css_build_subchannel_id(sch),
518 sch->schid,
519 sch->curr_status.pmcw.intparm,
520 isc << 27);
524 int css_do_sic(CPUS390XState *env, uint8_t isc, uint16_t mode)
526 S390FLICState *fs = s390_get_flic();
527 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
528 int r;
530 if (env->psw.mask & PSW_MASK_PSTATE) {
531 r = -PGM_PRIVILEGED;
532 goto out;
535 trace_css_do_sic(mode, isc);
536 switch (mode) {
537 case SIC_IRQ_MODE_ALL:
538 case SIC_IRQ_MODE_SINGLE:
539 break;
540 default:
541 r = -PGM_OPERAND;
542 goto out;
545 r = fsc->modify_ais_mode(fs, isc, mode) ? -PGM_OPERATION : 0;
546 out:
547 return r;
550 void css_adapter_interrupt(CssIoAdapterType type, uint8_t isc)
552 S390FLICState *fs = s390_get_flic();
553 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
554 uint32_t io_int_word = (isc << 27) | IO_INT_WORD_AI;
555 IoAdapter *adapter = channel_subsys.io_adapters[type][isc];
557 if (!adapter) {
558 return;
561 trace_css_adapter_interrupt(isc);
562 if (fs->ais_supported) {
563 if (fsc->inject_airq(fs, type, isc, adapter->flags)) {
564 error_report("Failed to inject airq with AIS supported");
565 exit(1);
567 } else {
568 s390_io_interrupt(0, 0, 0, io_int_word);
572 static void sch_handle_clear_func(SubchDev *sch)
574 PMCW *p = &sch->curr_status.pmcw;
575 SCSW *s = &sch->curr_status.scsw;
576 int path;
578 /* Path management: In our simple css, we always choose the only path. */
579 path = 0x80;
581 /* Reset values prior to 'issuing the clear signal'. */
582 p->lpum = 0;
583 p->pom = 0xff;
584 s->flags &= ~SCSW_FLAGS_MASK_PNO;
586 /* We always 'attempt to issue the clear signal', and we always succeed. */
587 sch->channel_prog = 0x0;
588 sch->last_cmd_valid = false;
589 s->ctrl &= ~SCSW_ACTL_CLEAR_PEND;
590 s->ctrl |= SCSW_STCTL_STATUS_PEND;
592 s->dstat = 0;
593 s->cstat = 0;
594 p->lpum = path;
598 static void sch_handle_halt_func(SubchDev *sch)
601 PMCW *p = &sch->curr_status.pmcw;
602 SCSW *s = &sch->curr_status.scsw;
603 hwaddr curr_ccw = sch->channel_prog;
604 int path;
606 /* Path management: In our simple css, we always choose the only path. */
607 path = 0x80;
609 /* We always 'attempt to issue the halt signal', and we always succeed. */
610 sch->channel_prog = 0x0;
611 sch->last_cmd_valid = false;
612 s->ctrl &= ~SCSW_ACTL_HALT_PEND;
613 s->ctrl |= SCSW_STCTL_STATUS_PEND;
615 if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) ||
616 !((s->ctrl & SCSW_ACTL_START_PEND) ||
617 (s->ctrl & SCSW_ACTL_SUSP))) {
618 s->dstat = SCSW_DSTAT_DEVICE_END;
620 if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) ||
621 (s->ctrl & SCSW_ACTL_SUSP)) {
622 s->cpa = curr_ccw + 8;
624 s->cstat = 0;
625 p->lpum = path;
629 static void copy_sense_id_to_guest(SenseId *dest, SenseId *src)
631 int i;
633 dest->reserved = src->reserved;
634 dest->cu_type = cpu_to_be16(src->cu_type);
635 dest->cu_model = src->cu_model;
636 dest->dev_type = cpu_to_be16(src->dev_type);
637 dest->dev_model = src->dev_model;
638 dest->unused = src->unused;
639 for (i = 0; i < ARRAY_SIZE(dest->ciw); i++) {
640 dest->ciw[i].type = src->ciw[i].type;
641 dest->ciw[i].command = src->ciw[i].command;
642 dest->ciw[i].count = cpu_to_be16(src->ciw[i].count);
646 static CCW1 copy_ccw_from_guest(hwaddr addr, bool fmt1)
648 CCW0 tmp0;
649 CCW1 tmp1;
650 CCW1 ret;
652 if (fmt1) {
653 cpu_physical_memory_read(addr, &tmp1, sizeof(tmp1));
654 ret.cmd_code = tmp1.cmd_code;
655 ret.flags = tmp1.flags;
656 ret.count = be16_to_cpu(tmp1.count);
657 ret.cda = be32_to_cpu(tmp1.cda);
658 } else {
659 cpu_physical_memory_read(addr, &tmp0, sizeof(tmp0));
660 if ((tmp0.cmd_code & 0x0f) == CCW_CMD_TIC) {
661 ret.cmd_code = CCW_CMD_TIC;
662 ret.flags = 0;
663 ret.count = 0;
664 } else {
665 ret.cmd_code = tmp0.cmd_code;
666 ret.flags = tmp0.flags;
667 ret.count = be16_to_cpu(tmp0.count);
669 ret.cda = be16_to_cpu(tmp0.cda1) | (tmp0.cda0 << 16);
671 return ret;
674 static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr,
675 bool suspend_allowed)
677 int ret;
678 bool check_len;
679 int len;
680 CCW1 ccw;
682 if (!ccw_addr) {
683 return -EIO;
686 /* Translate everything to format-1 ccws - the information is the same. */
687 ccw = copy_ccw_from_guest(ccw_addr, sch->ccw_fmt_1);
689 /* Check for invalid command codes. */
690 if ((ccw.cmd_code & 0x0f) == 0) {
691 return -EINVAL;
693 if (((ccw.cmd_code & 0x0f) == CCW_CMD_TIC) &&
694 ((ccw.cmd_code & 0xf0) != 0)) {
695 return -EINVAL;
697 if (!sch->ccw_fmt_1 && (ccw.count == 0) &&
698 (ccw.cmd_code != CCW_CMD_TIC)) {
699 return -EINVAL;
702 /* We don't support MIDA. */
703 if (ccw.flags & CCW_FLAG_MIDA) {
704 return -EINVAL;
707 if (ccw.flags & CCW_FLAG_SUSPEND) {
708 return suspend_allowed ? -EINPROGRESS : -EINVAL;
711 check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
713 if (!ccw.cda) {
714 if (sch->ccw_no_data_cnt == 255) {
715 return -EINVAL;
717 sch->ccw_no_data_cnt++;
720 /* Look at the command. */
721 switch (ccw.cmd_code) {
722 case CCW_CMD_NOOP:
723 /* Nothing to do. */
724 ret = 0;
725 break;
726 case CCW_CMD_BASIC_SENSE:
727 if (check_len) {
728 if (ccw.count != sizeof(sch->sense_data)) {
729 ret = -EINVAL;
730 break;
733 len = MIN(ccw.count, sizeof(sch->sense_data));
734 cpu_physical_memory_write(ccw.cda, sch->sense_data, len);
735 sch->curr_status.scsw.count = ccw.count - len;
736 memset(sch->sense_data, 0, sizeof(sch->sense_data));
737 ret = 0;
738 break;
739 case CCW_CMD_SENSE_ID:
741 SenseId sense_id;
743 copy_sense_id_to_guest(&sense_id, &sch->id);
744 /* Sense ID information is device specific. */
745 if (check_len) {
746 if (ccw.count != sizeof(sense_id)) {
747 ret = -EINVAL;
748 break;
751 len = MIN(ccw.count, sizeof(sense_id));
753 * Only indicate 0xff in the first sense byte if we actually
754 * have enough place to store at least bytes 0-3.
756 if (len >= 4) {
757 sense_id.reserved = 0xff;
758 } else {
759 sense_id.reserved = 0;
761 cpu_physical_memory_write(ccw.cda, &sense_id, len);
762 sch->curr_status.scsw.count = ccw.count - len;
763 ret = 0;
764 break;
766 case CCW_CMD_TIC:
767 if (sch->last_cmd_valid && (sch->last_cmd.cmd_code == CCW_CMD_TIC)) {
768 ret = -EINVAL;
769 break;
771 if (ccw.flags & (CCW_FLAG_CC | CCW_FLAG_DC)) {
772 ret = -EINVAL;
773 break;
775 sch->channel_prog = ccw.cda;
776 ret = -EAGAIN;
777 break;
778 default:
779 if (sch->ccw_cb) {
780 /* Handle device specific commands. */
781 ret = sch->ccw_cb(sch, ccw);
782 } else {
783 ret = -ENOSYS;
785 break;
787 sch->last_cmd = ccw;
788 sch->last_cmd_valid = true;
789 if (ret == 0) {
790 if (ccw.flags & CCW_FLAG_CC) {
791 sch->channel_prog += 8;
792 ret = -EAGAIN;
796 return ret;
799 static void sch_handle_start_func_virtual(SubchDev *sch, ORB *orb)
802 PMCW *p = &sch->curr_status.pmcw;
803 SCSW *s = &sch->curr_status.scsw;
804 int path;
805 int ret;
806 bool suspend_allowed;
808 /* Path management: In our simple css, we always choose the only path. */
809 path = 0x80;
811 if (!(s->ctrl & SCSW_ACTL_SUSP)) {
812 /* Start Function triggered via ssch, i.e. we have an ORB */
813 s->cstat = 0;
814 s->dstat = 0;
815 /* Look at the orb and try to execute the channel program. */
816 assert(orb != NULL); /* resume does not pass an orb */
817 p->intparm = orb->intparm;
818 if (!(orb->lpm & path)) {
819 /* Generate a deferred cc 3 condition. */
820 s->flags |= SCSW_FLAGS_MASK_CC;
821 s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
822 s->ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
823 return;
825 sch->ccw_fmt_1 = !!(orb->ctrl0 & ORB_CTRL0_MASK_FMT);
826 s->flags |= (sch->ccw_fmt_1) ? SCSW_FLAGS_MASK_FMT : 0;
827 sch->ccw_no_data_cnt = 0;
828 suspend_allowed = !!(orb->ctrl0 & ORB_CTRL0_MASK_SPND);
829 } else {
830 /* Start Function resumed via rsch, i.e. we don't have an
831 * ORB */
832 s->ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND);
833 /* The channel program had been suspended before. */
834 suspend_allowed = true;
836 sch->last_cmd_valid = false;
837 do {
838 ret = css_interpret_ccw(sch, sch->channel_prog, suspend_allowed);
839 switch (ret) {
840 case -EAGAIN:
841 /* ccw chain, continue processing */
842 break;
843 case 0:
844 /* success */
845 s->ctrl &= ~SCSW_ACTL_START_PEND;
846 s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
847 s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
848 SCSW_STCTL_STATUS_PEND;
849 s->dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END;
850 s->cpa = sch->channel_prog + 8;
851 break;
852 case -EIO:
853 /* I/O errors, status depends on specific devices */
854 break;
855 case -ENOSYS:
856 /* unsupported command, generate unit check (command reject) */
857 s->ctrl &= ~SCSW_ACTL_START_PEND;
858 s->dstat = SCSW_DSTAT_UNIT_CHECK;
859 /* Set sense bit 0 in ecw0. */
860 sch->sense_data[0] = 0x80;
861 s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
862 s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
863 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
864 s->cpa = sch->channel_prog + 8;
865 break;
866 case -EFAULT:
867 /* memory problem, generate channel data check */
868 s->ctrl &= ~SCSW_ACTL_START_PEND;
869 s->cstat = SCSW_CSTAT_DATA_CHECK;
870 s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
871 s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
872 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
873 s->cpa = sch->channel_prog + 8;
874 break;
875 case -EBUSY:
876 /* subchannel busy, generate deferred cc 1 */
877 s->flags &= ~SCSW_FLAGS_MASK_CC;
878 s->flags |= (1 << 8);
879 s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
880 s->ctrl |= SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
881 break;
882 case -EINPROGRESS:
883 /* channel program has been suspended */
884 s->ctrl &= ~SCSW_ACTL_START_PEND;
885 s->ctrl |= SCSW_ACTL_SUSP;
886 break;
887 default:
888 /* error, generate channel program check */
889 s->ctrl &= ~SCSW_ACTL_START_PEND;
890 s->cstat = SCSW_CSTAT_PROG_CHECK;
891 s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
892 s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
893 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
894 s->cpa = sch->channel_prog + 8;
895 break;
897 } while (ret == -EAGAIN);
901 static int sch_handle_start_func_passthrough(SubchDev *sch, ORB *orb)
904 PMCW *p = &sch->curr_status.pmcw;
905 SCSW *s = &sch->curr_status.scsw;
906 int ret;
908 if (!(s->ctrl & SCSW_ACTL_SUSP)) {
909 assert(orb != NULL);
910 p->intparm = orb->intparm;
914 * Only support prefetch enable mode.
915 * Only support 64bit addressing idal.
917 if (!(orb->ctrl0 & ORB_CTRL0_MASK_PFCH) ||
918 !(orb->ctrl0 & ORB_CTRL0_MASK_C64)) {
919 return -EINVAL;
922 ret = s390_ccw_cmd_request(orb, s, sch->driver_data);
923 switch (ret) {
924 /* Currently we don't update control block and just return the cc code. */
925 case 0:
926 break;
927 case -EBUSY:
928 break;
929 case -ENODEV:
930 break;
931 case -EACCES:
932 /* Let's reflect an inaccessible host device by cc 3. */
933 ret = -ENODEV;
934 break;
935 default:
937 * All other return codes will trigger a program check,
938 * or set cc to 1.
940 break;
943 return ret;
947 * On real machines, this would run asynchronously to the main vcpus.
948 * We might want to make some parts of the ssch handling (interpreting
949 * read/writes) asynchronous later on if we start supporting more than
950 * our current very simple devices.
952 int do_subchannel_work_virtual(SubchDev *sch, ORB *orb)
955 SCSW *s = &sch->curr_status.scsw;
957 if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) {
958 sch_handle_clear_func(sch);
959 } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) {
960 sch_handle_halt_func(sch);
961 } else if (s->ctrl & SCSW_FCTL_START_FUNC) {
962 /* Triggered by both ssch and rsch. */
963 sch_handle_start_func_virtual(sch, orb);
964 } else {
965 /* Cannot happen. */
966 return 0;
968 css_inject_io_interrupt(sch);
969 return 0;
972 int do_subchannel_work_passthrough(SubchDev *sch, ORB *orb)
974 int ret;
975 SCSW *s = &sch->curr_status.scsw;
977 if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) {
978 /* TODO: Clear handling */
979 sch_handle_clear_func(sch);
980 ret = 0;
981 } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) {
982 /* TODO: Halt handling */
983 sch_handle_halt_func(sch);
984 ret = 0;
985 } else if (s->ctrl & SCSW_FCTL_START_FUNC) {
986 ret = sch_handle_start_func_passthrough(sch, orb);
987 } else {
988 /* Cannot happen. */
989 return -ENODEV;
992 return ret;
995 static int do_subchannel_work(SubchDev *sch, ORB *orb)
997 if (sch->do_subchannel_work) {
998 return sch->do_subchannel_work(sch, orb);
999 } else {
1000 return -EINVAL;
1004 static void copy_pmcw_to_guest(PMCW *dest, const PMCW *src)
1006 int i;
1008 dest->intparm = cpu_to_be32(src->intparm);
1009 dest->flags = cpu_to_be16(src->flags);
1010 dest->devno = cpu_to_be16(src->devno);
1011 dest->lpm = src->lpm;
1012 dest->pnom = src->pnom;
1013 dest->lpum = src->lpum;
1014 dest->pim = src->pim;
1015 dest->mbi = cpu_to_be16(src->mbi);
1016 dest->pom = src->pom;
1017 dest->pam = src->pam;
1018 for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
1019 dest->chpid[i] = src->chpid[i];
1021 dest->chars = cpu_to_be32(src->chars);
1024 void copy_scsw_to_guest(SCSW *dest, const SCSW *src)
1026 dest->flags = cpu_to_be16(src->flags);
1027 dest->ctrl = cpu_to_be16(src->ctrl);
1028 dest->cpa = cpu_to_be32(src->cpa);
1029 dest->dstat = src->dstat;
1030 dest->cstat = src->cstat;
1031 dest->count = cpu_to_be16(src->count);
1034 static void copy_schib_to_guest(SCHIB *dest, const SCHIB *src)
1036 int i;
1038 copy_pmcw_to_guest(&dest->pmcw, &src->pmcw);
1039 copy_scsw_to_guest(&dest->scsw, &src->scsw);
1040 dest->mba = cpu_to_be64(src->mba);
1041 for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
1042 dest->mda[i] = src->mda[i];
1046 int css_do_stsch(SubchDev *sch, SCHIB *schib)
1048 /* Use current status. */
1049 copy_schib_to_guest(schib, &sch->curr_status);
1050 return 0;
1053 static void copy_pmcw_from_guest(PMCW *dest, const PMCW *src)
1055 int i;
1057 dest->intparm = be32_to_cpu(src->intparm);
1058 dest->flags = be16_to_cpu(src->flags);
1059 dest->devno = be16_to_cpu(src->devno);
1060 dest->lpm = src->lpm;
1061 dest->pnom = src->pnom;
1062 dest->lpum = src->lpum;
1063 dest->pim = src->pim;
1064 dest->mbi = be16_to_cpu(src->mbi);
1065 dest->pom = src->pom;
1066 dest->pam = src->pam;
1067 for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
1068 dest->chpid[i] = src->chpid[i];
1070 dest->chars = be32_to_cpu(src->chars);
1073 static void copy_scsw_from_guest(SCSW *dest, const SCSW *src)
1075 dest->flags = be16_to_cpu(src->flags);
1076 dest->ctrl = be16_to_cpu(src->ctrl);
1077 dest->cpa = be32_to_cpu(src->cpa);
1078 dest->dstat = src->dstat;
1079 dest->cstat = src->cstat;
1080 dest->count = be16_to_cpu(src->count);
1083 static void copy_schib_from_guest(SCHIB *dest, const SCHIB *src)
1085 int i;
1087 copy_pmcw_from_guest(&dest->pmcw, &src->pmcw);
1088 copy_scsw_from_guest(&dest->scsw, &src->scsw);
1089 dest->mba = be64_to_cpu(src->mba);
1090 for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
1091 dest->mda[i] = src->mda[i];
1095 int css_do_msch(SubchDev *sch, const SCHIB *orig_schib)
1097 SCSW *s = &sch->curr_status.scsw;
1098 PMCW *p = &sch->curr_status.pmcw;
1099 uint16_t oldflags;
1100 int ret;
1101 SCHIB schib;
1103 if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_DNV)) {
1104 ret = 0;
1105 goto out;
1108 if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
1109 ret = -EINPROGRESS;
1110 goto out;
1113 if (s->ctrl &
1114 (SCSW_FCTL_START_FUNC|SCSW_FCTL_HALT_FUNC|SCSW_FCTL_CLEAR_FUNC)) {
1115 ret = -EBUSY;
1116 goto out;
1119 copy_schib_from_guest(&schib, orig_schib);
1120 /* Only update the program-modifiable fields. */
1121 p->intparm = schib.pmcw.intparm;
1122 oldflags = p->flags;
1123 p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
1124 PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
1125 PMCW_FLAGS_MASK_MP);
1126 p->flags |= schib.pmcw.flags &
1127 (PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
1128 PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
1129 PMCW_FLAGS_MASK_MP);
1130 p->lpm = schib.pmcw.lpm;
1131 p->mbi = schib.pmcw.mbi;
1132 p->pom = schib.pmcw.pom;
1133 p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
1134 p->chars |= schib.pmcw.chars &
1135 (PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
1136 sch->curr_status.mba = schib.mba;
1138 /* Has the channel been disabled? */
1139 if (sch->disable_cb && (oldflags & PMCW_FLAGS_MASK_ENA) != 0
1140 && (p->flags & PMCW_FLAGS_MASK_ENA) == 0) {
1141 sch->disable_cb(sch);
1144 ret = 0;
1146 out:
1147 return ret;
1150 int css_do_xsch(SubchDev *sch)
1152 SCSW *s = &sch->curr_status.scsw;
1153 PMCW *p = &sch->curr_status.pmcw;
1154 int ret;
1156 if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1157 ret = -ENODEV;
1158 goto out;
1161 if (!(s->ctrl & SCSW_CTRL_MASK_FCTL) ||
1162 ((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
1163 (!(s->ctrl &
1164 (SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_SUSP))) ||
1165 (s->ctrl & SCSW_ACTL_SUBCH_ACTIVE)) {
1166 ret = -EINPROGRESS;
1167 goto out;
1170 if (s->ctrl & SCSW_CTRL_MASK_STCTL) {
1171 ret = -EBUSY;
1172 goto out;
1175 /* Cancel the current operation. */
1176 s->ctrl &= ~(SCSW_FCTL_START_FUNC |
1177 SCSW_ACTL_RESUME_PEND |
1178 SCSW_ACTL_START_PEND |
1179 SCSW_ACTL_SUSP);
1180 sch->channel_prog = 0x0;
1181 sch->last_cmd_valid = false;
1182 s->dstat = 0;
1183 s->cstat = 0;
1184 ret = 0;
1186 out:
1187 return ret;
1190 int css_do_csch(SubchDev *sch)
1192 SCSW *s = &sch->curr_status.scsw;
1193 PMCW *p = &sch->curr_status.pmcw;
1194 int ret;
1196 if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1197 ret = -ENODEV;
1198 goto out;
1201 /* Trigger the clear function. */
1202 s->ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL);
1203 s->ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_ACTL_CLEAR_PEND;
1205 do_subchannel_work(sch, NULL);
1206 ret = 0;
1208 out:
1209 return ret;
1212 int css_do_hsch(SubchDev *sch)
1214 SCSW *s = &sch->curr_status.scsw;
1215 PMCW *p = &sch->curr_status.pmcw;
1216 int ret;
1218 if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1219 ret = -ENODEV;
1220 goto out;
1223 if (((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_STATUS_PEND) ||
1224 (s->ctrl & (SCSW_STCTL_PRIMARY |
1225 SCSW_STCTL_SECONDARY |
1226 SCSW_STCTL_ALERT))) {
1227 ret = -EINPROGRESS;
1228 goto out;
1231 if (s->ctrl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
1232 ret = -EBUSY;
1233 goto out;
1236 /* Trigger the halt function. */
1237 s->ctrl |= SCSW_FCTL_HALT_FUNC;
1238 s->ctrl &= ~SCSW_FCTL_START_FUNC;
1239 if (((s->ctrl & SCSW_CTRL_MASK_ACTL) ==
1240 (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) &&
1241 ((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_INTERMEDIATE)) {
1242 s->ctrl &= ~SCSW_STCTL_STATUS_PEND;
1244 s->ctrl |= SCSW_ACTL_HALT_PEND;
1246 do_subchannel_work(sch, NULL);
1247 ret = 0;
1249 out:
1250 return ret;
1253 static void css_update_chnmon(SubchDev *sch)
1255 if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_MME)) {
1256 /* Not active. */
1257 return;
1259 /* The counter is conveniently located at the beginning of the struct. */
1260 if (sch->curr_status.pmcw.chars & PMCW_CHARS_MASK_MBFC) {
1261 /* Format 1, per-subchannel area. */
1262 uint32_t count;
1264 count = address_space_ldl(&address_space_memory,
1265 sch->curr_status.mba,
1266 MEMTXATTRS_UNSPECIFIED,
1267 NULL);
1268 count++;
1269 address_space_stl(&address_space_memory, sch->curr_status.mba, count,
1270 MEMTXATTRS_UNSPECIFIED, NULL);
1271 } else {
1272 /* Format 0, global area. */
1273 uint32_t offset;
1274 uint16_t count;
1276 offset = sch->curr_status.pmcw.mbi << 5;
1277 count = address_space_lduw(&address_space_memory,
1278 channel_subsys.chnmon_area + offset,
1279 MEMTXATTRS_UNSPECIFIED,
1280 NULL);
1281 count++;
1282 address_space_stw(&address_space_memory,
1283 channel_subsys.chnmon_area + offset, count,
1284 MEMTXATTRS_UNSPECIFIED, NULL);
1288 int css_do_ssch(SubchDev *sch, ORB *orb)
1290 SCSW *s = &sch->curr_status.scsw;
1291 PMCW *p = &sch->curr_status.pmcw;
1292 int ret;
1294 if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1295 ret = -ENODEV;
1296 goto out;
1299 if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
1300 ret = -EINPROGRESS;
1301 goto out;
1304 if (s->ctrl & (SCSW_FCTL_START_FUNC |
1305 SCSW_FCTL_HALT_FUNC |
1306 SCSW_FCTL_CLEAR_FUNC)) {
1307 ret = -EBUSY;
1308 goto out;
1311 /* If monitoring is active, update counter. */
1312 if (channel_subsys.chnmon_active) {
1313 css_update_chnmon(sch);
1315 sch->channel_prog = orb->cpa;
1316 /* Trigger the start function. */
1317 s->ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND);
1318 s->flags &= ~SCSW_FLAGS_MASK_PNO;
1320 ret = do_subchannel_work(sch, orb);
1322 out:
1323 return ret;
1326 static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw,
1327 int *irb_len)
1329 int i;
1330 uint16_t stctl = src->scsw.ctrl & SCSW_CTRL_MASK_STCTL;
1331 uint16_t actl = src->scsw.ctrl & SCSW_CTRL_MASK_ACTL;
1333 copy_scsw_to_guest(&dest->scsw, &src->scsw);
1335 for (i = 0; i < ARRAY_SIZE(dest->esw); i++) {
1336 dest->esw[i] = cpu_to_be32(src->esw[i]);
1338 for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) {
1339 dest->ecw[i] = cpu_to_be32(src->ecw[i]);
1341 *irb_len = sizeof(*dest) - sizeof(dest->emw);
1343 /* extended measurements enabled? */
1344 if ((src->scsw.flags & SCSW_FLAGS_MASK_ESWF) ||
1345 !(pmcw->flags & PMCW_FLAGS_MASK_TF) ||
1346 !(pmcw->chars & PMCW_CHARS_MASK_XMWME)) {
1347 return;
1349 /* extended measurements pending? */
1350 if (!(stctl & SCSW_STCTL_STATUS_PEND)) {
1351 return;
1353 if ((stctl & SCSW_STCTL_PRIMARY) ||
1354 (stctl == SCSW_STCTL_SECONDARY) ||
1355 ((stctl & SCSW_STCTL_INTERMEDIATE) && (actl & SCSW_ACTL_SUSP))) {
1356 for (i = 0; i < ARRAY_SIZE(dest->emw); i++) {
1357 dest->emw[i] = cpu_to_be32(src->emw[i]);
1360 *irb_len = sizeof(*dest);
1363 int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len)
1365 SCSW *s = &sch->curr_status.scsw;
1366 PMCW *p = &sch->curr_status.pmcw;
1367 uint16_t stctl;
1368 IRB irb;
1370 if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1371 return 3;
1374 stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
1376 /* Prepare the irb for the guest. */
1377 memset(&irb, 0, sizeof(IRB));
1379 /* Copy scsw from current status. */
1380 memcpy(&irb.scsw, s, sizeof(SCSW));
1381 if (stctl & SCSW_STCTL_STATUS_PEND) {
1382 if (s->cstat & (SCSW_CSTAT_DATA_CHECK |
1383 SCSW_CSTAT_CHN_CTRL_CHK |
1384 SCSW_CSTAT_INTF_CTRL_CHK)) {
1385 irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF;
1386 irb.esw[0] = 0x04804000;
1387 } else {
1388 irb.esw[0] = 0x00800000;
1390 /* If a unit check is pending, copy sense data. */
1391 if ((s->dstat & SCSW_DSTAT_UNIT_CHECK) &&
1392 (p->chars & PMCW_CHARS_MASK_CSENSE)) {
1393 int i;
1395 irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL;
1396 /* Attention: sense_data is already BE! */
1397 memcpy(irb.ecw, sch->sense_data, sizeof(sch->sense_data));
1398 for (i = 0; i < ARRAY_SIZE(irb.ecw); i++) {
1399 irb.ecw[i] = be32_to_cpu(irb.ecw[i]);
1401 irb.esw[1] = 0x01000000 | (sizeof(sch->sense_data) << 8);
1404 /* Store the irb to the guest. */
1405 copy_irb_to_guest(target_irb, &irb, p, irb_len);
1407 return ((stctl & SCSW_STCTL_STATUS_PEND) == 0);
1410 void css_do_tsch_update_subch(SubchDev *sch)
1412 SCSW *s = &sch->curr_status.scsw;
1413 PMCW *p = &sch->curr_status.pmcw;
1414 uint16_t stctl;
1415 uint16_t fctl;
1416 uint16_t actl;
1418 stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
1419 fctl = s->ctrl & SCSW_CTRL_MASK_FCTL;
1420 actl = s->ctrl & SCSW_CTRL_MASK_ACTL;
1422 /* Clear conditions on subchannel, if applicable. */
1423 if (stctl & SCSW_STCTL_STATUS_PEND) {
1424 s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
1425 if ((stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) ||
1426 ((fctl & SCSW_FCTL_HALT_FUNC) &&
1427 (actl & SCSW_ACTL_SUSP))) {
1428 s->ctrl &= ~SCSW_CTRL_MASK_FCTL;
1430 if (stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) {
1431 s->flags &= ~SCSW_FLAGS_MASK_PNO;
1432 s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
1433 SCSW_ACTL_START_PEND |
1434 SCSW_ACTL_HALT_PEND |
1435 SCSW_ACTL_CLEAR_PEND |
1436 SCSW_ACTL_SUSP);
1437 } else {
1438 if ((actl & SCSW_ACTL_SUSP) &&
1439 (fctl & SCSW_FCTL_START_FUNC)) {
1440 s->flags &= ~SCSW_FLAGS_MASK_PNO;
1441 if (fctl & SCSW_FCTL_HALT_FUNC) {
1442 s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
1443 SCSW_ACTL_START_PEND |
1444 SCSW_ACTL_HALT_PEND |
1445 SCSW_ACTL_CLEAR_PEND |
1446 SCSW_ACTL_SUSP);
1447 } else {
1448 s->ctrl &= ~SCSW_ACTL_RESUME_PEND;
1452 /* Clear pending sense data. */
1453 if (p->chars & PMCW_CHARS_MASK_CSENSE) {
1454 memset(sch->sense_data, 0 , sizeof(sch->sense_data));
1459 static void copy_crw_to_guest(CRW *dest, const CRW *src)
1461 dest->flags = cpu_to_be16(src->flags);
1462 dest->rsid = cpu_to_be16(src->rsid);
1465 int css_do_stcrw(CRW *crw)
1467 CrwContainer *crw_cont;
1468 int ret;
1470 crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws);
1471 if (crw_cont) {
1472 QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling);
1473 copy_crw_to_guest(crw, &crw_cont->crw);
1474 g_free(crw_cont);
1475 ret = 0;
1476 } else {
1477 /* List was empty, turn crw machine checks on again. */
1478 memset(crw, 0, sizeof(*crw));
1479 channel_subsys.do_crw_mchk = true;
1480 ret = 1;
1483 return ret;
1486 static void copy_crw_from_guest(CRW *dest, const CRW *src)
1488 dest->flags = be16_to_cpu(src->flags);
1489 dest->rsid = be16_to_cpu(src->rsid);
1492 void css_undo_stcrw(CRW *crw)
1494 CrwContainer *crw_cont;
1496 crw_cont = g_try_malloc0(sizeof(CrwContainer));
1497 if (!crw_cont) {
1498 channel_subsys.crws_lost = true;
1499 return;
1501 copy_crw_from_guest(&crw_cont->crw, crw);
1503 QTAILQ_INSERT_HEAD(&channel_subsys.pending_crws, crw_cont, sibling);
1506 int css_do_tpi(IOIntCode *int_code, int lowcore)
1508 /* No pending interrupts for !KVM. */
1509 return 0;
1512 int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
1513 int rfmt, void *buf)
1515 int i, desc_size;
1516 uint32_t words[8];
1517 uint32_t chpid_type_word;
1518 CssImage *css;
1520 if (!m && !cssid) {
1521 css = channel_subsys.css[channel_subsys.default_cssid];
1522 } else {
1523 css = channel_subsys.css[cssid];
1525 if (!css) {
1526 return 0;
1528 desc_size = 0;
1529 for (i = f_chpid; i <= l_chpid; i++) {
1530 if (css->chpids[i].in_use) {
1531 chpid_type_word = 0x80000000 | (css->chpids[i].type << 8) | i;
1532 if (rfmt == 0) {
1533 words[0] = cpu_to_be32(chpid_type_word);
1534 words[1] = 0;
1535 memcpy(buf + desc_size, words, 8);
1536 desc_size += 8;
1537 } else if (rfmt == 1) {
1538 words[0] = cpu_to_be32(chpid_type_word);
1539 words[1] = 0;
1540 words[2] = 0;
1541 words[3] = 0;
1542 words[4] = 0;
1543 words[5] = 0;
1544 words[6] = 0;
1545 words[7] = 0;
1546 memcpy(buf + desc_size, words, 32);
1547 desc_size += 32;
1551 return desc_size;
1554 void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo)
1556 /* dct is currently ignored (not really meaningful for our devices) */
1557 /* TODO: Don't ignore mbk. */
1558 if (update && !channel_subsys.chnmon_active) {
1559 /* Enable measuring. */
1560 channel_subsys.chnmon_area = mbo;
1561 channel_subsys.chnmon_active = true;
1563 if (!update && channel_subsys.chnmon_active) {
1564 /* Disable measuring. */
1565 channel_subsys.chnmon_area = 0;
1566 channel_subsys.chnmon_active = false;
1570 int css_do_rsch(SubchDev *sch)
1572 SCSW *s = &sch->curr_status.scsw;
1573 PMCW *p = &sch->curr_status.pmcw;
1574 int ret;
1576 if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
1577 ret = -ENODEV;
1578 goto out;
1581 if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
1582 ret = -EINPROGRESS;
1583 goto out;
1586 if (((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
1587 (s->ctrl & SCSW_ACTL_RESUME_PEND) ||
1588 (!(s->ctrl & SCSW_ACTL_SUSP))) {
1589 ret = -EINVAL;
1590 goto out;
1593 /* If monitoring is active, update counter. */
1594 if (channel_subsys.chnmon_active) {
1595 css_update_chnmon(sch);
1598 s->ctrl |= SCSW_ACTL_RESUME_PEND;
1599 do_subchannel_work(sch, NULL);
1600 ret = 0;
1602 out:
1603 return ret;
1606 int css_do_rchp(uint8_t cssid, uint8_t chpid)
1608 uint8_t real_cssid;
1610 if (cssid > channel_subsys.max_cssid) {
1611 return -EINVAL;
1613 if (channel_subsys.max_cssid == 0) {
1614 real_cssid = channel_subsys.default_cssid;
1615 } else {
1616 real_cssid = cssid;
1618 if (!channel_subsys.css[real_cssid]) {
1619 return -EINVAL;
1622 if (!channel_subsys.css[real_cssid]->chpids[chpid].in_use) {
1623 return -ENODEV;
1626 if (!channel_subsys.css[real_cssid]->chpids[chpid].is_virtual) {
1627 fprintf(stderr,
1628 "rchp unsupported for non-virtual chpid %x.%02x!\n",
1629 real_cssid, chpid);
1630 return -ENODEV;
1633 /* We don't really use a channel path, so we're done here. */
1634 css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT,
1635 channel_subsys.max_cssid > 0 ? 1 : 0, chpid);
1636 if (channel_subsys.max_cssid > 0) {
1637 css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 0, real_cssid << 8);
1639 return 0;
1642 bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid)
1644 SubchSet *set;
1645 uint8_t real_cssid;
1647 real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid;
1648 if (ssid > MAX_SSID ||
1649 !channel_subsys.css[real_cssid] ||
1650 !channel_subsys.css[real_cssid]->sch_set[ssid]) {
1651 return true;
1653 set = channel_subsys.css[real_cssid]->sch_set[ssid];
1654 return schid > find_last_bit(set->schids_used,
1655 (MAX_SCHID + 1) / sizeof(unsigned long));
1658 unsigned int css_find_free_chpid(uint8_t cssid)
1660 CssImage *css = channel_subsys.css[cssid];
1661 unsigned int chpid;
1663 if (!css) {
1664 return MAX_CHPID + 1;
1667 for (chpid = 0; chpid <= MAX_CHPID; chpid++) {
1668 /* skip reserved chpid */
1669 if (chpid == VIRTIO_CCW_CHPID) {
1670 continue;
1672 if (!css->chpids[chpid].in_use) {
1673 return chpid;
1676 return MAX_CHPID + 1;
1679 static int css_add_chpid(uint8_t cssid, uint8_t chpid, uint8_t type,
1680 bool is_virt)
1682 CssImage *css;
1684 trace_css_chpid_add(cssid, chpid, type);
1685 css = channel_subsys.css[cssid];
1686 if (!css) {
1687 return -EINVAL;
1689 if (css->chpids[chpid].in_use) {
1690 return -EEXIST;
1692 css->chpids[chpid].in_use = 1;
1693 css->chpids[chpid].type = type;
1694 css->chpids[chpid].is_virtual = is_virt;
1696 css_generate_chp_crws(cssid, chpid);
1698 return 0;
1701 void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type)
1703 PMCW *p = &sch->curr_status.pmcw;
1704 SCSW *s = &sch->curr_status.scsw;
1705 int i;
1706 CssImage *css = channel_subsys.css[sch->cssid];
1708 assert(css != NULL);
1709 memset(p, 0, sizeof(PMCW));
1710 p->flags |= PMCW_FLAGS_MASK_DNV;
1711 p->devno = sch->devno;
1712 /* single path */
1713 p->pim = 0x80;
1714 p->pom = 0xff;
1715 p->pam = 0x80;
1716 p->chpid[0] = chpid;
1717 if (!css->chpids[chpid].in_use) {
1718 css_add_chpid(sch->cssid, chpid, type, true);
1721 memset(s, 0, sizeof(SCSW));
1722 sch->curr_status.mba = 0;
1723 for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) {
1724 sch->curr_status.mda[i] = 0;
1728 SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid, uint16_t schid)
1730 uint8_t real_cssid;
1732 real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid;
1734 if (!channel_subsys.css[real_cssid]) {
1735 return NULL;
1738 if (!channel_subsys.css[real_cssid]->sch_set[ssid]) {
1739 return NULL;
1742 return channel_subsys.css[real_cssid]->sch_set[ssid]->sch[schid];
1746 * Return free device number in subchannel set.
1748 * Return index of the first free device number in the subchannel set
1749 * identified by @p cssid and @p ssid, beginning the search at @p
1750 * start and wrapping around at MAX_DEVNO. Return a value exceeding
1751 * MAX_SCHID if there are no free device numbers in the subchannel
1752 * set.
1754 static uint32_t css_find_free_devno(uint8_t cssid, uint8_t ssid,
1755 uint16_t start)
1757 uint32_t round;
1759 for (round = 0; round <= MAX_DEVNO; round++) {
1760 uint16_t devno = (start + round) % MAX_DEVNO;
1762 if (!css_devno_used(cssid, ssid, devno)) {
1763 return devno;
1766 return MAX_DEVNO + 1;
1770 * Return first free subchannel (id) in subchannel set.
1772 * Return index of the first free subchannel in the subchannel set
1773 * identified by @p cssid and @p ssid, if there is any. Return a value
1774 * exceeding MAX_SCHID if there are no free subchannels in the
1775 * subchannel set.
1777 static uint32_t css_find_free_subch(uint8_t cssid, uint8_t ssid)
1779 uint32_t schid;
1781 for (schid = 0; schid <= MAX_SCHID; schid++) {
1782 if (!css_find_subch(1, cssid, ssid, schid)) {
1783 return schid;
1786 return MAX_SCHID + 1;
1790 * Return first free subchannel (id) in subchannel set for a device number
1792 * Verify the device number @p devno is not used yet in the subchannel
1793 * set identified by @p cssid and @p ssid. Set @p schid to the index
1794 * of the first free subchannel in the subchannel set, if there is
1795 * any. Return true if everything succeeded and false otherwise.
1797 static bool css_find_free_subch_for_devno(uint8_t cssid, uint8_t ssid,
1798 uint16_t devno, uint16_t *schid,
1799 Error **errp)
1801 uint32_t free_schid;
1803 assert(schid);
1804 if (css_devno_used(cssid, ssid, devno)) {
1805 error_setg(errp, "Device %x.%x.%04x already exists",
1806 cssid, ssid, devno);
1807 return false;
1809 free_schid = css_find_free_subch(cssid, ssid);
1810 if (free_schid > MAX_SCHID) {
1811 error_setg(errp, "No free subchannel found for %x.%x.%04x",
1812 cssid, ssid, devno);
1813 return false;
1815 *schid = free_schid;
1816 return true;
1820 * Return first free subchannel (id) and device number
1822 * Locate the first free subchannel and first free device number in
1823 * any of the subchannel sets of the channel subsystem identified by
1824 * @p cssid. Return false if no free subchannel / device number could
1825 * be found. Otherwise set @p ssid, @p devno and @p schid to identify
1826 * the available subchannel and device number and return true.
1828 * May modify @p ssid, @p devno and / or @p schid even if no free
1829 * subchannel / device number could be found.
1831 static bool css_find_free_subch_and_devno(uint8_t cssid, uint8_t *ssid,
1832 uint16_t *devno, uint16_t *schid,
1833 Error **errp)
1835 uint32_t free_schid, free_devno;
1837 assert(ssid && devno && schid);
1838 for (*ssid = 0; *ssid <= MAX_SSID; (*ssid)++) {
1839 free_schid = css_find_free_subch(cssid, *ssid);
1840 if (free_schid > MAX_SCHID) {
1841 continue;
1843 free_devno = css_find_free_devno(cssid, *ssid, free_schid);
1844 if (free_devno > MAX_DEVNO) {
1845 continue;
1847 *schid = free_schid;
1848 *devno = free_devno;
1849 return true;
1851 error_setg(errp, "Virtual channel subsystem is full!");
1852 return false;
1855 bool css_subch_visible(SubchDev *sch)
1857 if (sch->ssid > channel_subsys.max_ssid) {
1858 return false;
1861 if (sch->cssid != channel_subsys.default_cssid) {
1862 return (channel_subsys.max_cssid > 0);
1865 return true;
1868 bool css_present(uint8_t cssid)
1870 return (channel_subsys.css[cssid] != NULL);
1873 bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno)
1875 if (!channel_subsys.css[cssid]) {
1876 return false;
1878 if (!channel_subsys.css[cssid]->sch_set[ssid]) {
1879 return false;
1882 return !!test_bit(devno,
1883 channel_subsys.css[cssid]->sch_set[ssid]->devnos_used);
1886 void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid,
1887 uint16_t devno, SubchDev *sch)
1889 CssImage *css;
1890 SubchSet *s_set;
1892 trace_css_assign_subch(sch ? "assign" : "deassign", cssid, ssid, schid,
1893 devno);
1894 if (!channel_subsys.css[cssid]) {
1895 fprintf(stderr,
1896 "Suspicious call to %s (%x.%x.%04x) for non-existing css!\n",
1897 __func__, cssid, ssid, schid);
1898 return;
1900 css = channel_subsys.css[cssid];
1902 if (!css->sch_set[ssid]) {
1903 css->sch_set[ssid] = g_malloc0(sizeof(SubchSet));
1905 s_set = css->sch_set[ssid];
1907 s_set->sch[schid] = sch;
1908 if (sch) {
1909 set_bit(schid, s_set->schids_used);
1910 set_bit(devno, s_set->devnos_used);
1911 } else {
1912 clear_bit(schid, s_set->schids_used);
1913 clear_bit(devno, s_set->devnos_used);
1917 void css_queue_crw(uint8_t rsc, uint8_t erc, int chain, uint16_t rsid)
1919 CrwContainer *crw_cont;
1921 trace_css_crw(rsc, erc, rsid, chain ? "(chained)" : "");
1922 /* TODO: Maybe use a static crw pool? */
1923 crw_cont = g_try_malloc0(sizeof(CrwContainer));
1924 if (!crw_cont) {
1925 channel_subsys.crws_lost = true;
1926 return;
1928 crw_cont->crw.flags = (rsc << 8) | erc;
1929 if (chain) {
1930 crw_cont->crw.flags |= CRW_FLAGS_MASK_C;
1932 crw_cont->crw.rsid = rsid;
1933 if (channel_subsys.crws_lost) {
1934 crw_cont->crw.flags |= CRW_FLAGS_MASK_R;
1935 channel_subsys.crws_lost = false;
1938 QTAILQ_INSERT_TAIL(&channel_subsys.pending_crws, crw_cont, sibling);
1940 if (channel_subsys.do_crw_mchk) {
1941 channel_subsys.do_crw_mchk = false;
1942 /* Inject crw pending machine check. */
1943 s390_crw_mchk();
1947 void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid,
1948 int hotplugged, int add)
1950 uint8_t guest_cssid;
1951 bool chain_crw;
1953 if (add && !hotplugged) {
1954 return;
1956 if (channel_subsys.max_cssid == 0) {
1957 /* Default cssid shows up as 0. */
1958 guest_cssid = (cssid == channel_subsys.default_cssid) ? 0 : cssid;
1959 } else {
1960 /* Show real cssid to the guest. */
1961 guest_cssid = cssid;
1964 * Only notify for higher subchannel sets/channel subsystems if the
1965 * guest has enabled it.
1967 if ((ssid > channel_subsys.max_ssid) ||
1968 (guest_cssid > channel_subsys.max_cssid) ||
1969 ((channel_subsys.max_cssid == 0) &&
1970 (cssid != channel_subsys.default_cssid))) {
1971 return;
1973 chain_crw = (channel_subsys.max_ssid > 0) ||
1974 (channel_subsys.max_cssid > 0);
1975 css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, chain_crw ? 1 : 0, schid);
1976 if (chain_crw) {
1977 css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0,
1978 (guest_cssid << 8) | (ssid << 4));
1980 /* RW_ERC_IPI --> clear pending interrupts */
1981 css_clear_io_interrupt(css_do_build_subchannel_id(cssid, ssid), schid);
1984 void css_generate_chp_crws(uint8_t cssid, uint8_t chpid)
1986 /* TODO */
1989 void css_generate_css_crws(uint8_t cssid)
1991 if (!channel_subsys.sei_pending) {
1992 css_queue_crw(CRW_RSC_CSS, 0, 0, cssid);
1994 channel_subsys.sei_pending = true;
1997 void css_clear_sei_pending(void)
1999 channel_subsys.sei_pending = false;
2002 int css_enable_mcsse(void)
2004 trace_css_enable_facility("mcsse");
2005 channel_subsys.max_cssid = MAX_CSSID;
2006 return 0;
2009 int css_enable_mss(void)
2011 trace_css_enable_facility("mss");
2012 channel_subsys.max_ssid = MAX_SSID;
2013 return 0;
2016 void css_reset_sch(SubchDev *sch)
2018 PMCW *p = &sch->curr_status.pmcw;
2020 if ((p->flags & PMCW_FLAGS_MASK_ENA) != 0 && sch->disable_cb) {
2021 sch->disable_cb(sch);
2024 p->intparm = 0;
2025 p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
2026 PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
2027 PMCW_FLAGS_MASK_MP | PMCW_FLAGS_MASK_TF);
2028 p->flags |= PMCW_FLAGS_MASK_DNV;
2029 p->devno = sch->devno;
2030 p->pim = 0x80;
2031 p->lpm = p->pim;
2032 p->pnom = 0;
2033 p->lpum = 0;
2034 p->mbi = 0;
2035 p->pom = 0xff;
2036 p->pam = 0x80;
2037 p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_XMWME |
2038 PMCW_CHARS_MASK_CSENSE);
2040 memset(&sch->curr_status.scsw, 0, sizeof(sch->curr_status.scsw));
2041 sch->curr_status.mba = 0;
2043 sch->channel_prog = 0x0;
2044 sch->last_cmd_valid = false;
2045 sch->thinint_active = false;
2048 void css_reset(void)
2050 CrwContainer *crw_cont;
2052 /* Clean up monitoring. */
2053 channel_subsys.chnmon_active = false;
2054 channel_subsys.chnmon_area = 0;
2056 /* Clear pending CRWs. */
2057 while ((crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws))) {
2058 QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling);
2059 g_free(crw_cont);
2061 channel_subsys.sei_pending = false;
2062 channel_subsys.do_crw_mchk = true;
2063 channel_subsys.crws_lost = false;
2065 /* Reset maximum ids. */
2066 channel_subsys.max_cssid = 0;
2067 channel_subsys.max_ssid = 0;
2070 static void get_css_devid(Object *obj, Visitor *v, const char *name,
2071 void *opaque, Error **errp)
2073 DeviceState *dev = DEVICE(obj);
2074 Property *prop = opaque;
2075 CssDevId *dev_id = qdev_get_prop_ptr(dev, prop);
2076 char buffer[] = "xx.x.xxxx";
2077 char *p = buffer;
2078 int r;
2080 if (dev_id->valid) {
2082 r = snprintf(buffer, sizeof(buffer), "%02x.%1x.%04x", dev_id->cssid,
2083 dev_id->ssid, dev_id->devid);
2084 assert(r == sizeof(buffer) - 1);
2086 /* drop leading zero */
2087 if (dev_id->cssid <= 0xf) {
2088 p++;
2090 } else {
2091 snprintf(buffer, sizeof(buffer), "<unset>");
2094 visit_type_str(v, name, &p, errp);
2098 * parse <cssid>.<ssid>.<devid> and assert valid range for cssid/ssid
2100 static void set_css_devid(Object *obj, Visitor *v, const char *name,
2101 void *opaque, Error **errp)
2103 DeviceState *dev = DEVICE(obj);
2104 Property *prop = opaque;
2105 CssDevId *dev_id = qdev_get_prop_ptr(dev, prop);
2106 Error *local_err = NULL;
2107 char *str;
2108 int num, n1, n2;
2109 unsigned int cssid, ssid, devid;
2111 if (dev->realized) {
2112 qdev_prop_set_after_realize(dev, name, errp);
2113 return;
2116 visit_type_str(v, name, &str, &local_err);
2117 if (local_err) {
2118 error_propagate(errp, local_err);
2119 return;
2122 num = sscanf(str, "%2x.%1x%n.%4x%n", &cssid, &ssid, &n1, &devid, &n2);
2123 if (num != 3 || (n2 - n1) != 5 || strlen(str) != n2) {
2124 error_set_from_qdev_prop_error(errp, EINVAL, dev, prop, str);
2125 goto out;
2127 if ((cssid > MAX_CSSID) || (ssid > MAX_SSID)) {
2128 error_setg(errp, "Invalid cssid or ssid: cssid %x, ssid %x",
2129 cssid, ssid);
2130 goto out;
2133 dev_id->cssid = cssid;
2134 dev_id->ssid = ssid;
2135 dev_id->devid = devid;
2136 dev_id->valid = true;
2138 out:
2139 g_free(str);
2142 PropertyInfo css_devid_propinfo = {
2143 .name = "str",
2144 .description = "Identifier of an I/O device in the channel "
2145 "subsystem, example: fe.1.23ab",
2146 .get = get_css_devid,
2147 .set = set_css_devid,
2150 PropertyInfo css_devid_ro_propinfo = {
2151 .name = "str",
2152 .description = "Read-only identifier of an I/O device in the channel "
2153 "subsystem, example: fe.1.23ab",
2154 .get = get_css_devid,
2157 SubchDev *css_create_sch(CssDevId bus_id, bool is_virtual, bool squash_mcss,
2158 Error **errp)
2160 uint16_t schid = 0;
2161 SubchDev *sch;
2163 if (bus_id.valid) {
2164 if (is_virtual != (bus_id.cssid == VIRTUAL_CSSID)) {
2165 error_setg(errp, "cssid %hhx not valid for %s devices",
2166 bus_id.cssid,
2167 (is_virtual ? "virtual" : "non-virtual"));
2168 return NULL;
2172 if (bus_id.valid) {
2173 if (squash_mcss) {
2174 bus_id.cssid = channel_subsys.default_cssid;
2175 } else if (!channel_subsys.css[bus_id.cssid]) {
2176 css_create_css_image(bus_id.cssid, false);
2179 if (!css_find_free_subch_for_devno(bus_id.cssid, bus_id.ssid,
2180 bus_id.devid, &schid, errp)) {
2181 return NULL;
2183 } else if (squash_mcss || is_virtual) {
2184 bus_id.cssid = channel_subsys.default_cssid;
2186 if (!css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid,
2187 &bus_id.devid, &schid, errp)) {
2188 return NULL;
2190 } else {
2191 for (bus_id.cssid = 0; bus_id.cssid < MAX_CSSID; ++bus_id.cssid) {
2192 if (bus_id.cssid == VIRTUAL_CSSID) {
2193 continue;
2196 if (!channel_subsys.css[bus_id.cssid]) {
2197 css_create_css_image(bus_id.cssid, false);
2200 if (css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid,
2201 &bus_id.devid, &schid,
2202 NULL)) {
2203 break;
2205 if (bus_id.cssid == MAX_CSSID) {
2206 error_setg(errp, "Virtual channel subsystem is full!");
2207 return NULL;
2212 sch = g_malloc0(sizeof(*sch));
2213 sch->cssid = bus_id.cssid;
2214 sch->ssid = bus_id.ssid;
2215 sch->devno = bus_id.devid;
2216 sch->schid = schid;
2217 css_subch_assign(sch->cssid, sch->ssid, schid, sch->devno, sch);
2218 return sch;
2221 static int css_sch_get_chpids(SubchDev *sch, CssDevId *dev_id)
2223 char *fid_path;
2224 FILE *fd;
2225 uint32_t chpid[8];
2226 int i;
2227 PMCW *p = &sch->curr_status.pmcw;
2229 fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/chpids",
2230 dev_id->cssid, dev_id->ssid, dev_id->devid);
2231 fd = fopen(fid_path, "r");
2232 if (fd == NULL) {
2233 error_report("%s: open %s failed", __func__, fid_path);
2234 g_free(fid_path);
2235 return -EINVAL;
2238 if (fscanf(fd, "%x %x %x %x %x %x %x %x",
2239 &chpid[0], &chpid[1], &chpid[2], &chpid[3],
2240 &chpid[4], &chpid[5], &chpid[6], &chpid[7]) != 8) {
2241 fclose(fd);
2242 g_free(fid_path);
2243 return -EINVAL;
2246 for (i = 0; i < ARRAY_SIZE(p->chpid); i++) {
2247 p->chpid[i] = chpid[i];
2250 fclose(fd);
2251 g_free(fid_path);
2253 return 0;
2256 static int css_sch_get_path_masks(SubchDev *sch, CssDevId *dev_id)
2258 char *fid_path;
2259 FILE *fd;
2260 uint32_t pim, pam, pom;
2261 PMCW *p = &sch->curr_status.pmcw;
2263 fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/pimpampom",
2264 dev_id->cssid, dev_id->ssid, dev_id->devid);
2265 fd = fopen(fid_path, "r");
2266 if (fd == NULL) {
2267 error_report("%s: open %s failed", __func__, fid_path);
2268 g_free(fid_path);
2269 return -EINVAL;
2272 if (fscanf(fd, "%x %x %x", &pim, &pam, &pom) != 3) {
2273 fclose(fd);
2274 g_free(fid_path);
2275 return -EINVAL;
2278 p->pim = pim;
2279 p->pam = pam;
2280 p->pom = pom;
2281 fclose(fd);
2282 g_free(fid_path);
2284 return 0;
2287 static int css_sch_get_chpid_type(uint8_t chpid, uint32_t *type,
2288 CssDevId *dev_id)
2290 char *fid_path;
2291 FILE *fd;
2293 fid_path = g_strdup_printf("/sys/devices/css%x/chp0.%02x/type",
2294 dev_id->cssid, chpid);
2295 fd = fopen(fid_path, "r");
2296 if (fd == NULL) {
2297 error_report("%s: open %s failed", __func__, fid_path);
2298 g_free(fid_path);
2299 return -EINVAL;
2302 if (fscanf(fd, "%x", type) != 1) {
2303 fclose(fd);
2304 g_free(fid_path);
2305 return -EINVAL;
2308 fclose(fd);
2309 g_free(fid_path);
2311 return 0;
2315 * We currently retrieve the real device information from sysfs to build the
2316 * guest subchannel information block without considering the migration feature.
2317 * We need to revisit this problem when we want to add migration support.
2319 int css_sch_build_schib(SubchDev *sch, CssDevId *dev_id)
2321 CssImage *css = channel_subsys.css[sch->cssid];
2322 PMCW *p = &sch->curr_status.pmcw;
2323 SCSW *s = &sch->curr_status.scsw;
2324 uint32_t type;
2325 int i, ret;
2327 assert(css != NULL);
2328 memset(p, 0, sizeof(PMCW));
2329 p->flags |= PMCW_FLAGS_MASK_DNV;
2330 /* We are dealing with I/O subchannels only. */
2331 p->devno = sch->devno;
2333 /* Grab path mask from sysfs. */
2334 ret = css_sch_get_path_masks(sch, dev_id);
2335 if (ret) {
2336 return ret;
2339 /* Grab chpids from sysfs. */
2340 ret = css_sch_get_chpids(sch, dev_id);
2341 if (ret) {
2342 return ret;
2345 /* Build chpid type. */
2346 for (i = 0; i < ARRAY_SIZE(p->chpid); i++) {
2347 if (p->chpid[i] && !css->chpids[p->chpid[i]].in_use) {
2348 ret = css_sch_get_chpid_type(p->chpid[i], &type, dev_id);
2349 if (ret) {
2350 return ret;
2352 css_add_chpid(sch->cssid, p->chpid[i], type, false);
2356 memset(s, 0, sizeof(SCSW));
2357 sch->curr_status.mba = 0;
2358 for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) {
2359 sch->curr_status.mda[i] = 0;
2362 return 0;