exec: Make stl_*_phys input an AddressSpace
[qemu-kvm.git] / hw / s390x / css.c
blobcfa8a9bf9f9aae476fa26c78fa03dad67956d54b
1 /*
2 * Channel subsystem base support.
4 * Copyright 2012 IBM Corp.
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
7 * This work is licensed under the terms of the GNU GPL, version 2 or (at
8 * your option) any later version. See the COPYING file in the top-level
9 * directory.
12 #include <hw/qdev.h>
13 #include "qemu/bitops.h"
14 #include "exec/address-spaces.h"
15 #include "cpu.h"
16 #include "ioinst.h"
17 #include "css.h"
18 #include "trace.h"
20 typedef struct CrwContainer {
21 CRW crw;
22 QTAILQ_ENTRY(CrwContainer) sibling;
23 } CrwContainer;
25 typedef struct ChpInfo {
26 uint8_t in_use;
27 uint8_t type;
28 uint8_t is_virtual;
29 } ChpInfo;
31 typedef struct SubchSet {
32 SubchDev *sch[MAX_SCHID + 1];
33 unsigned long schids_used[BITS_TO_LONGS(MAX_SCHID + 1)];
34 unsigned long devnos_used[BITS_TO_LONGS(MAX_SCHID + 1)];
35 } SubchSet;
37 typedef struct CssImage {
38 SubchSet *sch_set[MAX_SSID + 1];
39 ChpInfo chpids[MAX_CHPID + 1];
40 } CssImage;
42 typedef struct ChannelSubSys {
43 QTAILQ_HEAD(, CrwContainer) pending_crws;
44 bool do_crw_mchk;
45 bool crws_lost;
46 uint8_t max_cssid;
47 uint8_t max_ssid;
48 bool chnmon_active;
49 uint64_t chnmon_area;
50 CssImage *css[MAX_CSSID + 1];
51 uint8_t default_cssid;
52 } ChannelSubSys;
54 static ChannelSubSys *channel_subsys;
56 int css_create_css_image(uint8_t cssid, bool default_image)
58 trace_css_new_image(cssid, default_image ? "(default)" : "");
59 if (cssid > MAX_CSSID) {
60 return -EINVAL;
62 if (channel_subsys->css[cssid]) {
63 return -EBUSY;
65 channel_subsys->css[cssid] = g_malloc0(sizeof(CssImage));
66 if (default_image) {
67 channel_subsys->default_cssid = cssid;
69 return 0;
72 uint16_t css_build_subchannel_id(SubchDev *sch)
74 if (channel_subsys->max_cssid > 0) {
75 return (sch->cssid << 8) | (1 << 3) | (sch->ssid << 1) | 1;
77 return (sch->ssid << 1) | 1;
80 static void css_inject_io_interrupt(SubchDev *sch)
82 S390CPU *cpu = s390_cpu_addr2state(0);
83 uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
85 trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
86 sch->curr_status.pmcw.intparm, isc, "");
87 s390_io_interrupt(cpu,
88 css_build_subchannel_id(sch),
89 sch->schid,
90 sch->curr_status.pmcw.intparm,
91 isc << 27);
94 void css_conditional_io_interrupt(SubchDev *sch)
97 * If the subchannel is not currently status pending, make it pending
98 * with alert status.
100 if (!(sch->curr_status.scsw.ctrl & SCSW_STCTL_STATUS_PEND)) {
101 S390CPU *cpu = s390_cpu_addr2state(0);
102 uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
104 trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
105 sch->curr_status.pmcw.intparm, isc,
106 "(unsolicited)");
107 sch->curr_status.scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
108 sch->curr_status.scsw.ctrl |=
109 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
110 /* Inject an I/O interrupt. */
111 s390_io_interrupt(cpu,
112 css_build_subchannel_id(sch),
113 sch->schid,
114 sch->curr_status.pmcw.intparm,
115 isc << 27);
119 static void sch_handle_clear_func(SubchDev *sch)
121 PMCW *p = &sch->curr_status.pmcw;
122 SCSW *s = &sch->curr_status.scsw;
123 int path;
125 /* Path management: In our simple css, we always choose the only path. */
126 path = 0x80;
128 /* Reset values prior to 'issuing the clear signal'. */
129 p->lpum = 0;
130 p->pom = 0xff;
131 s->flags &= ~SCSW_FLAGS_MASK_PNO;
133 /* We always 'attempt to issue the clear signal', and we always succeed. */
134 sch->orb = NULL;
135 sch->channel_prog = 0x0;
136 sch->last_cmd_valid = false;
137 s->ctrl &= ~SCSW_ACTL_CLEAR_PEND;
138 s->ctrl |= SCSW_STCTL_STATUS_PEND;
140 s->dstat = 0;
141 s->cstat = 0;
142 p->lpum = path;
146 static void sch_handle_halt_func(SubchDev *sch)
149 PMCW *p = &sch->curr_status.pmcw;
150 SCSW *s = &sch->curr_status.scsw;
151 int path;
153 /* Path management: In our simple css, we always choose the only path. */
154 path = 0x80;
156 /* We always 'attempt to issue the halt signal', and we always succeed. */
157 sch->orb = NULL;
158 sch->channel_prog = 0x0;
159 sch->last_cmd_valid = false;
160 s->ctrl &= ~SCSW_ACTL_HALT_PEND;
161 s->ctrl |= SCSW_STCTL_STATUS_PEND;
163 if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) ||
164 !((s->ctrl & SCSW_ACTL_START_PEND) ||
165 (s->ctrl & SCSW_ACTL_SUSP))) {
166 s->dstat = SCSW_DSTAT_DEVICE_END;
168 s->cstat = 0;
169 p->lpum = path;
173 static void copy_sense_id_to_guest(SenseId *dest, SenseId *src)
175 int i;
177 dest->reserved = src->reserved;
178 dest->cu_type = cpu_to_be16(src->cu_type);
179 dest->cu_model = src->cu_model;
180 dest->dev_type = cpu_to_be16(src->dev_type);
181 dest->dev_model = src->dev_model;
182 dest->unused = src->unused;
183 for (i = 0; i < ARRAY_SIZE(dest->ciw); i++) {
184 dest->ciw[i].type = src->ciw[i].type;
185 dest->ciw[i].command = src->ciw[i].command;
186 dest->ciw[i].count = cpu_to_be16(src->ciw[i].count);
190 static CCW1 copy_ccw_from_guest(hwaddr addr)
192 CCW1 tmp;
193 CCW1 ret;
195 cpu_physical_memory_read(addr, &tmp, sizeof(tmp));
196 ret.cmd_code = tmp.cmd_code;
197 ret.flags = tmp.flags;
198 ret.count = be16_to_cpu(tmp.count);
199 ret.cda = be32_to_cpu(tmp.cda);
201 return ret;
204 static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr)
206 int ret;
207 bool check_len;
208 int len;
209 CCW1 ccw;
211 if (!ccw_addr) {
212 return -EIO;
215 ccw = copy_ccw_from_guest(ccw_addr);
217 /* Check for invalid command codes. */
218 if ((ccw.cmd_code & 0x0f) == 0) {
219 return -EINVAL;
221 if (((ccw.cmd_code & 0x0f) == CCW_CMD_TIC) &&
222 ((ccw.cmd_code & 0xf0) != 0)) {
223 return -EINVAL;
226 if (ccw.flags & CCW_FLAG_SUSPEND) {
227 return -EINPROGRESS;
230 check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
232 /* Look at the command. */
233 switch (ccw.cmd_code) {
234 case CCW_CMD_NOOP:
235 /* Nothing to do. */
236 ret = 0;
237 break;
238 case CCW_CMD_BASIC_SENSE:
239 if (check_len) {
240 if (ccw.count != sizeof(sch->sense_data)) {
241 ret = -EINVAL;
242 break;
245 len = MIN(ccw.count, sizeof(sch->sense_data));
246 cpu_physical_memory_write(ccw.cda, sch->sense_data, len);
247 sch->curr_status.scsw.count = ccw.count - len;
248 memset(sch->sense_data, 0, sizeof(sch->sense_data));
249 ret = 0;
250 break;
251 case CCW_CMD_SENSE_ID:
253 SenseId sense_id;
255 copy_sense_id_to_guest(&sense_id, &sch->id);
256 /* Sense ID information is device specific. */
257 if (check_len) {
258 if (ccw.count != sizeof(sense_id)) {
259 ret = -EINVAL;
260 break;
263 len = MIN(ccw.count, sizeof(sense_id));
265 * Only indicate 0xff in the first sense byte if we actually
266 * have enough place to store at least bytes 0-3.
268 if (len >= 4) {
269 sense_id.reserved = 0xff;
270 } else {
271 sense_id.reserved = 0;
273 cpu_physical_memory_write(ccw.cda, &sense_id, len);
274 sch->curr_status.scsw.count = ccw.count - len;
275 ret = 0;
276 break;
278 case CCW_CMD_TIC:
279 if (sch->last_cmd_valid && (sch->last_cmd.cmd_code == CCW_CMD_TIC)) {
280 ret = -EINVAL;
281 break;
283 if (ccw.flags & (CCW_FLAG_CC | CCW_FLAG_DC)) {
284 ret = -EINVAL;
285 break;
287 sch->channel_prog = ccw.cda;
288 ret = -EAGAIN;
289 break;
290 default:
291 if (sch->ccw_cb) {
292 /* Handle device specific commands. */
293 ret = sch->ccw_cb(sch, ccw);
294 } else {
295 ret = -ENOSYS;
297 break;
299 sch->last_cmd = ccw;
300 sch->last_cmd_valid = true;
301 if (ret == 0) {
302 if (ccw.flags & CCW_FLAG_CC) {
303 sch->channel_prog += 8;
304 ret = -EAGAIN;
308 return ret;
311 static void sch_handle_start_func(SubchDev *sch)
314 PMCW *p = &sch->curr_status.pmcw;
315 SCSW *s = &sch->curr_status.scsw;
316 ORB *orb = sch->orb;
317 int path;
318 int ret;
320 /* Path management: In our simple css, we always choose the only path. */
321 path = 0x80;
323 if (!(s->ctrl & SCSW_ACTL_SUSP)) {
324 /* Look at the orb and try to execute the channel program. */
325 p->intparm = orb->intparm;
326 if (!(orb->lpm & path)) {
327 /* Generate a deferred cc 3 condition. */
328 s->flags |= SCSW_FLAGS_MASK_CC;
329 s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
330 s->ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
331 return;
333 } else {
334 s->ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND);
336 sch->last_cmd_valid = false;
337 do {
338 ret = css_interpret_ccw(sch, sch->channel_prog);
339 switch (ret) {
340 case -EAGAIN:
341 /* ccw chain, continue processing */
342 break;
343 case 0:
344 /* success */
345 s->ctrl &= ~SCSW_ACTL_START_PEND;
346 s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
347 s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
348 SCSW_STCTL_STATUS_PEND;
349 s->dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END;
350 break;
351 case -ENOSYS:
352 /* unsupported command, generate unit check (command reject) */
353 s->ctrl &= ~SCSW_ACTL_START_PEND;
354 s->dstat = SCSW_DSTAT_UNIT_CHECK;
355 /* Set sense bit 0 in ecw0. */
356 sch->sense_data[0] = 0x80;
357 s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
358 s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
359 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
360 break;
361 case -EFAULT:
362 /* memory problem, generate channel data check */
363 s->ctrl &= ~SCSW_ACTL_START_PEND;
364 s->cstat = SCSW_CSTAT_DATA_CHECK;
365 s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
366 s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
367 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
368 break;
369 case -EBUSY:
370 /* subchannel busy, generate deferred cc 1 */
371 s->flags &= ~SCSW_FLAGS_MASK_CC;
372 s->flags |= (1 << 8);
373 s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
374 s->ctrl |= SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
375 break;
376 case -EINPROGRESS:
377 /* channel program has been suspended */
378 s->ctrl &= ~SCSW_ACTL_START_PEND;
379 s->ctrl |= SCSW_ACTL_SUSP;
380 break;
381 default:
382 /* error, generate channel program check */
383 s->ctrl &= ~SCSW_ACTL_START_PEND;
384 s->cstat = SCSW_CSTAT_PROG_CHECK;
385 s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
386 s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
387 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
388 break;
390 } while (ret == -EAGAIN);
395 * On real machines, this would run asynchronously to the main vcpus.
396 * We might want to make some parts of the ssch handling (interpreting
397 * read/writes) asynchronous later on if we start supporting more than
398 * our current very simple devices.
400 static void do_subchannel_work(SubchDev *sch)
403 SCSW *s = &sch->curr_status.scsw;
405 if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) {
406 sch_handle_clear_func(sch);
407 } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) {
408 sch_handle_halt_func(sch);
409 } else if (s->ctrl & SCSW_FCTL_START_FUNC) {
410 sch_handle_start_func(sch);
411 } else {
412 /* Cannot happen. */
413 return;
415 css_inject_io_interrupt(sch);
418 static void copy_pmcw_to_guest(PMCW *dest, const PMCW *src)
420 int i;
422 dest->intparm = cpu_to_be32(src->intparm);
423 dest->flags = cpu_to_be16(src->flags);
424 dest->devno = cpu_to_be16(src->devno);
425 dest->lpm = src->lpm;
426 dest->pnom = src->pnom;
427 dest->lpum = src->lpum;
428 dest->pim = src->pim;
429 dest->mbi = cpu_to_be16(src->mbi);
430 dest->pom = src->pom;
431 dest->pam = src->pam;
432 for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
433 dest->chpid[i] = src->chpid[i];
435 dest->chars = cpu_to_be32(src->chars);
438 static void copy_scsw_to_guest(SCSW *dest, const SCSW *src)
440 dest->flags = cpu_to_be16(src->flags);
441 dest->ctrl = cpu_to_be16(src->ctrl);
442 dest->cpa = cpu_to_be32(src->cpa);
443 dest->dstat = src->dstat;
444 dest->cstat = src->cstat;
445 dest->count = cpu_to_be16(src->count);
448 static void copy_schib_to_guest(SCHIB *dest, const SCHIB *src)
450 int i;
452 copy_pmcw_to_guest(&dest->pmcw, &src->pmcw);
453 copy_scsw_to_guest(&dest->scsw, &src->scsw);
454 dest->mba = cpu_to_be64(src->mba);
455 for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
456 dest->mda[i] = src->mda[i];
460 int css_do_stsch(SubchDev *sch, SCHIB *schib)
462 /* Use current status. */
463 copy_schib_to_guest(schib, &sch->curr_status);
464 return 0;
467 static void copy_pmcw_from_guest(PMCW *dest, const PMCW *src)
469 int i;
471 dest->intparm = be32_to_cpu(src->intparm);
472 dest->flags = be16_to_cpu(src->flags);
473 dest->devno = be16_to_cpu(src->devno);
474 dest->lpm = src->lpm;
475 dest->pnom = src->pnom;
476 dest->lpum = src->lpum;
477 dest->pim = src->pim;
478 dest->mbi = be16_to_cpu(src->mbi);
479 dest->pom = src->pom;
480 dest->pam = src->pam;
481 for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
482 dest->chpid[i] = src->chpid[i];
484 dest->chars = be32_to_cpu(src->chars);
487 static void copy_scsw_from_guest(SCSW *dest, const SCSW *src)
489 dest->flags = be16_to_cpu(src->flags);
490 dest->ctrl = be16_to_cpu(src->ctrl);
491 dest->cpa = be32_to_cpu(src->cpa);
492 dest->dstat = src->dstat;
493 dest->cstat = src->cstat;
494 dest->count = be16_to_cpu(src->count);
497 static void copy_schib_from_guest(SCHIB *dest, const SCHIB *src)
499 int i;
501 copy_pmcw_from_guest(&dest->pmcw, &src->pmcw);
502 copy_scsw_from_guest(&dest->scsw, &src->scsw);
503 dest->mba = be64_to_cpu(src->mba);
504 for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
505 dest->mda[i] = src->mda[i];
509 int css_do_msch(SubchDev *sch, SCHIB *orig_schib)
511 SCSW *s = &sch->curr_status.scsw;
512 PMCW *p = &sch->curr_status.pmcw;
513 int ret;
514 SCHIB schib;
516 if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_DNV)) {
517 ret = 0;
518 goto out;
521 if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
522 ret = -EINPROGRESS;
523 goto out;
526 if (s->ctrl &
527 (SCSW_FCTL_START_FUNC|SCSW_FCTL_HALT_FUNC|SCSW_FCTL_CLEAR_FUNC)) {
528 ret = -EBUSY;
529 goto out;
532 copy_schib_from_guest(&schib, orig_schib);
533 /* Only update the program-modifiable fields. */
534 p->intparm = schib.pmcw.intparm;
535 p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
536 PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
537 PMCW_FLAGS_MASK_MP);
538 p->flags |= schib.pmcw.flags &
539 (PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
540 PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
541 PMCW_FLAGS_MASK_MP);
542 p->lpm = schib.pmcw.lpm;
543 p->mbi = schib.pmcw.mbi;
544 p->pom = schib.pmcw.pom;
545 p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
546 p->chars |= schib.pmcw.chars &
547 (PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
548 sch->curr_status.mba = schib.mba;
550 ret = 0;
552 out:
553 return ret;
556 int css_do_xsch(SubchDev *sch)
558 SCSW *s = &sch->curr_status.scsw;
559 PMCW *p = &sch->curr_status.pmcw;
560 int ret;
562 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
563 ret = -ENODEV;
564 goto out;
567 if (!(s->ctrl & SCSW_CTRL_MASK_FCTL) ||
568 ((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
569 (!(s->ctrl &
570 (SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_SUSP))) ||
571 (s->ctrl & SCSW_ACTL_SUBCH_ACTIVE)) {
572 ret = -EINPROGRESS;
573 goto out;
576 if (s->ctrl & SCSW_CTRL_MASK_STCTL) {
577 ret = -EBUSY;
578 goto out;
581 /* Cancel the current operation. */
582 s->ctrl &= ~(SCSW_FCTL_START_FUNC |
583 SCSW_ACTL_RESUME_PEND |
584 SCSW_ACTL_START_PEND |
585 SCSW_ACTL_SUSP);
586 sch->channel_prog = 0x0;
587 sch->last_cmd_valid = false;
588 sch->orb = NULL;
589 s->dstat = 0;
590 s->cstat = 0;
591 ret = 0;
593 out:
594 return ret;
597 int css_do_csch(SubchDev *sch)
599 SCSW *s = &sch->curr_status.scsw;
600 PMCW *p = &sch->curr_status.pmcw;
601 int ret;
603 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
604 ret = -ENODEV;
605 goto out;
608 /* Trigger the clear function. */
609 s->ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL);
610 s->ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_CLEAR_FUNC;
612 do_subchannel_work(sch);
613 ret = 0;
615 out:
616 return ret;
619 int css_do_hsch(SubchDev *sch)
621 SCSW *s = &sch->curr_status.scsw;
622 PMCW *p = &sch->curr_status.pmcw;
623 int ret;
625 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
626 ret = -ENODEV;
627 goto out;
630 if (((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_STATUS_PEND) ||
631 (s->ctrl & (SCSW_STCTL_PRIMARY |
632 SCSW_STCTL_SECONDARY |
633 SCSW_STCTL_ALERT))) {
634 ret = -EINPROGRESS;
635 goto out;
638 if (s->ctrl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
639 ret = -EBUSY;
640 goto out;
643 /* Trigger the halt function. */
644 s->ctrl |= SCSW_FCTL_HALT_FUNC;
645 s->ctrl &= ~SCSW_FCTL_START_FUNC;
646 if (((s->ctrl & SCSW_CTRL_MASK_ACTL) ==
647 (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) &&
648 ((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_INTERMEDIATE)) {
649 s->ctrl &= ~SCSW_STCTL_STATUS_PEND;
651 s->ctrl |= SCSW_ACTL_HALT_PEND;
653 do_subchannel_work(sch);
654 ret = 0;
656 out:
657 return ret;
660 static void css_update_chnmon(SubchDev *sch)
662 if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_MME)) {
663 /* Not active. */
664 return;
666 /* The counter is conveniently located at the beginning of the struct. */
667 if (sch->curr_status.pmcw.chars & PMCW_CHARS_MASK_MBFC) {
668 /* Format 1, per-subchannel area. */
669 uint32_t count;
671 count = ldl_phys(&address_space_memory, sch->curr_status.mba);
672 count++;
673 stl_phys(&address_space_memory, sch->curr_status.mba, count);
674 } else {
675 /* Format 0, global area. */
676 uint32_t offset;
677 uint16_t count;
679 offset = sch->curr_status.pmcw.mbi << 5;
680 count = lduw_phys(&address_space_memory,
681 channel_subsys->chnmon_area + offset);
682 count++;
683 stw_phys(channel_subsys->chnmon_area + offset, count);
687 int css_do_ssch(SubchDev *sch, ORB *orb)
689 SCSW *s = &sch->curr_status.scsw;
690 PMCW *p = &sch->curr_status.pmcw;
691 int ret;
693 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
694 ret = -ENODEV;
695 goto out;
698 if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
699 ret = -EINPROGRESS;
700 goto out;
703 if (s->ctrl & (SCSW_FCTL_START_FUNC |
704 SCSW_FCTL_HALT_FUNC |
705 SCSW_FCTL_CLEAR_FUNC)) {
706 ret = -EBUSY;
707 goto out;
710 /* If monitoring is active, update counter. */
711 if (channel_subsys->chnmon_active) {
712 css_update_chnmon(sch);
714 sch->orb = orb;
715 sch->channel_prog = orb->cpa;
716 /* Trigger the start function. */
717 s->ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND);
718 s->flags &= ~SCSW_FLAGS_MASK_PNO;
720 do_subchannel_work(sch);
721 ret = 0;
723 out:
724 return ret;
727 static void copy_irb_to_guest(IRB *dest, const IRB *src)
729 int i;
731 copy_scsw_to_guest(&dest->scsw, &src->scsw);
733 for (i = 0; i < ARRAY_SIZE(dest->esw); i++) {
734 dest->esw[i] = cpu_to_be32(src->esw[i]);
736 for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) {
737 dest->ecw[i] = cpu_to_be32(src->ecw[i]);
739 for (i = 0; i < ARRAY_SIZE(dest->emw); i++) {
740 dest->emw[i] = cpu_to_be32(src->emw[i]);
744 int css_do_tsch(SubchDev *sch, IRB *target_irb)
746 SCSW *s = &sch->curr_status.scsw;
747 PMCW *p = &sch->curr_status.pmcw;
748 uint16_t stctl;
749 uint16_t fctl;
750 uint16_t actl;
751 IRB irb;
752 int ret;
754 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
755 ret = 3;
756 goto out;
759 stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
760 fctl = s->ctrl & SCSW_CTRL_MASK_FCTL;
761 actl = s->ctrl & SCSW_CTRL_MASK_ACTL;
763 /* Prepare the irb for the guest. */
764 memset(&irb, 0, sizeof(IRB));
766 /* Copy scsw from current status. */
767 memcpy(&irb.scsw, s, sizeof(SCSW));
768 if (stctl & SCSW_STCTL_STATUS_PEND) {
769 if (s->cstat & (SCSW_CSTAT_DATA_CHECK |
770 SCSW_CSTAT_CHN_CTRL_CHK |
771 SCSW_CSTAT_INTF_CTRL_CHK)) {
772 irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF;
773 irb.esw[0] = 0x04804000;
774 } else {
775 irb.esw[0] = 0x00800000;
777 /* If a unit check is pending, copy sense data. */
778 if ((s->dstat & SCSW_DSTAT_UNIT_CHECK) &&
779 (p->chars & PMCW_CHARS_MASK_CSENSE)) {
780 irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL;
781 memcpy(irb.ecw, sch->sense_data, sizeof(sch->sense_data));
782 irb.esw[1] = 0x01000000 | (sizeof(sch->sense_data) << 8);
785 /* Store the irb to the guest. */
786 copy_irb_to_guest(target_irb, &irb);
788 /* Clear conditions on subchannel, if applicable. */
789 if (stctl & SCSW_STCTL_STATUS_PEND) {
790 s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
791 if ((stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) ||
792 ((fctl & SCSW_FCTL_HALT_FUNC) &&
793 (actl & SCSW_ACTL_SUSP))) {
794 s->ctrl &= ~SCSW_CTRL_MASK_FCTL;
796 if (stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) {
797 s->flags &= ~SCSW_FLAGS_MASK_PNO;
798 s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
799 SCSW_ACTL_START_PEND |
800 SCSW_ACTL_HALT_PEND |
801 SCSW_ACTL_CLEAR_PEND |
802 SCSW_ACTL_SUSP);
803 } else {
804 if ((actl & SCSW_ACTL_SUSP) &&
805 (fctl & SCSW_FCTL_START_FUNC)) {
806 s->flags &= ~SCSW_FLAGS_MASK_PNO;
807 if (fctl & SCSW_FCTL_HALT_FUNC) {
808 s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
809 SCSW_ACTL_START_PEND |
810 SCSW_ACTL_HALT_PEND |
811 SCSW_ACTL_CLEAR_PEND |
812 SCSW_ACTL_SUSP);
813 } else {
814 s->ctrl &= ~SCSW_ACTL_RESUME_PEND;
818 /* Clear pending sense data. */
819 if (p->chars & PMCW_CHARS_MASK_CSENSE) {
820 memset(sch->sense_data, 0 , sizeof(sch->sense_data));
824 ret = ((stctl & SCSW_STCTL_STATUS_PEND) == 0);
826 out:
827 return ret;
830 static void copy_crw_to_guest(CRW *dest, const CRW *src)
832 dest->flags = cpu_to_be16(src->flags);
833 dest->rsid = cpu_to_be16(src->rsid);
836 int css_do_stcrw(CRW *crw)
838 CrwContainer *crw_cont;
839 int ret;
841 crw_cont = QTAILQ_FIRST(&channel_subsys->pending_crws);
842 if (crw_cont) {
843 QTAILQ_REMOVE(&channel_subsys->pending_crws, crw_cont, sibling);
844 copy_crw_to_guest(crw, &crw_cont->crw);
845 g_free(crw_cont);
846 ret = 0;
847 } else {
848 /* List was empty, turn crw machine checks on again. */
849 memset(crw, 0, sizeof(*crw));
850 channel_subsys->do_crw_mchk = true;
851 ret = 1;
854 return ret;
857 int css_do_tpi(IOIntCode *int_code, int lowcore)
859 /* No pending interrupts for !KVM. */
860 return 0;
863 int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
864 int rfmt, void *buf)
866 int i, desc_size;
867 uint32_t words[8];
868 uint32_t chpid_type_word;
869 CssImage *css;
871 if (!m && !cssid) {
872 css = channel_subsys->css[channel_subsys->default_cssid];
873 } else {
874 css = channel_subsys->css[cssid];
876 if (!css) {
877 return 0;
879 desc_size = 0;
880 for (i = f_chpid; i <= l_chpid; i++) {
881 if (css->chpids[i].in_use) {
882 chpid_type_word = 0x80000000 | (css->chpids[i].type << 8) | i;
883 if (rfmt == 0) {
884 words[0] = cpu_to_be32(chpid_type_word);
885 words[1] = 0;
886 memcpy(buf + desc_size, words, 8);
887 desc_size += 8;
888 } else if (rfmt == 1) {
889 words[0] = cpu_to_be32(chpid_type_word);
890 words[1] = 0;
891 words[2] = 0;
892 words[3] = 0;
893 words[4] = 0;
894 words[5] = 0;
895 words[6] = 0;
896 words[7] = 0;
897 memcpy(buf + desc_size, words, 32);
898 desc_size += 32;
902 return desc_size;
905 void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo)
907 /* dct is currently ignored (not really meaningful for our devices) */
908 /* TODO: Don't ignore mbk. */
909 if (update && !channel_subsys->chnmon_active) {
910 /* Enable measuring. */
911 channel_subsys->chnmon_area = mbo;
912 channel_subsys->chnmon_active = true;
914 if (!update && channel_subsys->chnmon_active) {
915 /* Disable measuring. */
916 channel_subsys->chnmon_area = 0;
917 channel_subsys->chnmon_active = false;
921 int css_do_rsch(SubchDev *sch)
923 SCSW *s = &sch->curr_status.scsw;
924 PMCW *p = &sch->curr_status.pmcw;
925 int ret;
927 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
928 ret = -ENODEV;
929 goto out;
932 if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
933 ret = -EINPROGRESS;
934 goto out;
937 if (((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
938 (s->ctrl & SCSW_ACTL_RESUME_PEND) ||
939 (!(s->ctrl & SCSW_ACTL_SUSP))) {
940 ret = -EINVAL;
941 goto out;
944 /* If monitoring is active, update counter. */
945 if (channel_subsys->chnmon_active) {
946 css_update_chnmon(sch);
949 s->ctrl |= SCSW_ACTL_RESUME_PEND;
950 do_subchannel_work(sch);
951 ret = 0;
953 out:
954 return ret;
957 int css_do_rchp(uint8_t cssid, uint8_t chpid)
959 uint8_t real_cssid;
961 if (cssid > channel_subsys->max_cssid) {
962 return -EINVAL;
964 if (channel_subsys->max_cssid == 0) {
965 real_cssid = channel_subsys->default_cssid;
966 } else {
967 real_cssid = cssid;
969 if (!channel_subsys->css[real_cssid]) {
970 return -EINVAL;
973 if (!channel_subsys->css[real_cssid]->chpids[chpid].in_use) {
974 return -ENODEV;
977 if (!channel_subsys->css[real_cssid]->chpids[chpid].is_virtual) {
978 fprintf(stderr,
979 "rchp unsupported for non-virtual chpid %x.%02x!\n",
980 real_cssid, chpid);
981 return -ENODEV;
984 /* We don't really use a channel path, so we're done here. */
985 css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT,
986 channel_subsys->max_cssid > 0 ? 1 : 0, chpid);
987 if (channel_subsys->max_cssid > 0) {
988 css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 0, real_cssid << 8);
990 return 0;
993 bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid)
995 SubchSet *set;
996 uint8_t real_cssid;
998 real_cssid = (!m && (cssid == 0)) ? channel_subsys->default_cssid : cssid;
999 if (real_cssid > MAX_CSSID || ssid > MAX_SSID ||
1000 !channel_subsys->css[real_cssid] ||
1001 !channel_subsys->css[real_cssid]->sch_set[ssid]) {
1002 return true;
1004 set = channel_subsys->css[real_cssid]->sch_set[ssid];
1005 return schid > find_last_bit(set->schids_used,
1006 (MAX_SCHID + 1) / sizeof(unsigned long));
1009 static int css_add_virtual_chpid(uint8_t cssid, uint8_t chpid, uint8_t type)
1011 CssImage *css;
1013 trace_css_chpid_add(cssid, chpid, type);
1014 if (cssid > MAX_CSSID) {
1015 return -EINVAL;
1017 css = channel_subsys->css[cssid];
1018 if (!css) {
1019 return -EINVAL;
1021 if (css->chpids[chpid].in_use) {
1022 return -EEXIST;
1024 css->chpids[chpid].in_use = 1;
1025 css->chpids[chpid].type = type;
1026 css->chpids[chpid].is_virtual = 1;
1028 css_generate_chp_crws(cssid, chpid);
1030 return 0;
1033 void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type)
1035 PMCW *p = &sch->curr_status.pmcw;
1036 SCSW *s = &sch->curr_status.scsw;
1037 int i;
1038 CssImage *css = channel_subsys->css[sch->cssid];
1040 assert(css != NULL);
1041 memset(p, 0, sizeof(PMCW));
1042 p->flags |= PMCW_FLAGS_MASK_DNV;
1043 p->devno = sch->devno;
1044 /* single path */
1045 p->pim = 0x80;
1046 p->pom = 0xff;
1047 p->pam = 0x80;
1048 p->chpid[0] = chpid;
1049 if (!css->chpids[chpid].in_use) {
1050 css_add_virtual_chpid(sch->cssid, chpid, type);
1053 memset(s, 0, sizeof(SCSW));
1054 sch->curr_status.mba = 0;
1055 for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) {
1056 sch->curr_status.mda[i] = 0;
1060 SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid, uint16_t schid)
1062 uint8_t real_cssid;
1064 real_cssid = (!m && (cssid == 0)) ? channel_subsys->default_cssid : cssid;
1066 if (!channel_subsys->css[real_cssid]) {
1067 return NULL;
1070 if (!channel_subsys->css[real_cssid]->sch_set[ssid]) {
1071 return NULL;
1074 return channel_subsys->css[real_cssid]->sch_set[ssid]->sch[schid];
1077 bool css_subch_visible(SubchDev *sch)
1079 if (sch->ssid > channel_subsys->max_ssid) {
1080 return false;
1083 if (sch->cssid != channel_subsys->default_cssid) {
1084 return (channel_subsys->max_cssid > 0);
1087 return true;
1090 bool css_present(uint8_t cssid)
1092 return (channel_subsys->css[cssid] != NULL);
1095 bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno)
1097 if (!channel_subsys->css[cssid]) {
1098 return false;
1100 if (!channel_subsys->css[cssid]->sch_set[ssid]) {
1101 return false;
1104 return !!test_bit(devno,
1105 channel_subsys->css[cssid]->sch_set[ssid]->devnos_used);
1108 void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid,
1109 uint16_t devno, SubchDev *sch)
1111 CssImage *css;
1112 SubchSet *s_set;
1114 trace_css_assign_subch(sch ? "assign" : "deassign", cssid, ssid, schid,
1115 devno);
1116 if (!channel_subsys->css[cssid]) {
1117 fprintf(stderr,
1118 "Suspicious call to %s (%x.%x.%04x) for non-existing css!\n",
1119 __func__, cssid, ssid, schid);
1120 return;
1122 css = channel_subsys->css[cssid];
1124 if (!css->sch_set[ssid]) {
1125 css->sch_set[ssid] = g_malloc0(sizeof(SubchSet));
1127 s_set = css->sch_set[ssid];
1129 s_set->sch[schid] = sch;
1130 if (sch) {
1131 set_bit(schid, s_set->schids_used);
1132 set_bit(devno, s_set->devnos_used);
1133 } else {
1134 clear_bit(schid, s_set->schids_used);
1135 clear_bit(devno, s_set->devnos_used);
1139 void css_queue_crw(uint8_t rsc, uint8_t erc, int chain, uint16_t rsid)
1141 CrwContainer *crw_cont;
1143 trace_css_crw(rsc, erc, rsid, chain ? "(chained)" : "");
1144 /* TODO: Maybe use a static crw pool? */
1145 crw_cont = g_try_malloc0(sizeof(CrwContainer));
1146 if (!crw_cont) {
1147 channel_subsys->crws_lost = true;
1148 return;
1150 crw_cont->crw.flags = (rsc << 8) | erc;
1151 if (chain) {
1152 crw_cont->crw.flags |= CRW_FLAGS_MASK_C;
1154 crw_cont->crw.rsid = rsid;
1155 if (channel_subsys->crws_lost) {
1156 crw_cont->crw.flags |= CRW_FLAGS_MASK_R;
1157 channel_subsys->crws_lost = false;
1160 QTAILQ_INSERT_TAIL(&channel_subsys->pending_crws, crw_cont, sibling);
1162 if (channel_subsys->do_crw_mchk) {
1163 S390CPU *cpu = s390_cpu_addr2state(0);
1165 channel_subsys->do_crw_mchk = false;
1166 /* Inject crw pending machine check. */
1167 s390_crw_mchk(cpu);
1171 void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid,
1172 int hotplugged, int add)
1174 uint8_t guest_cssid;
1175 bool chain_crw;
1177 if (add && !hotplugged) {
1178 return;
1180 if (channel_subsys->max_cssid == 0) {
1181 /* Default cssid shows up as 0. */
1182 guest_cssid = (cssid == channel_subsys->default_cssid) ? 0 : cssid;
1183 } else {
1184 /* Show real cssid to the guest. */
1185 guest_cssid = cssid;
1188 * Only notify for higher subchannel sets/channel subsystems if the
1189 * guest has enabled it.
1191 if ((ssid > channel_subsys->max_ssid) ||
1192 (guest_cssid > channel_subsys->max_cssid) ||
1193 ((channel_subsys->max_cssid == 0) &&
1194 (cssid != channel_subsys->default_cssid))) {
1195 return;
1197 chain_crw = (channel_subsys->max_ssid > 0) ||
1198 (channel_subsys->max_cssid > 0);
1199 css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, chain_crw ? 1 : 0, schid);
1200 if (chain_crw) {
1201 css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0,
1202 (guest_cssid << 8) | (ssid << 4));
1206 void css_generate_chp_crws(uint8_t cssid, uint8_t chpid)
1208 /* TODO */
1211 int css_enable_mcsse(void)
1213 trace_css_enable_facility("mcsse");
1214 channel_subsys->max_cssid = MAX_CSSID;
1215 return 0;
1218 int css_enable_mss(void)
1220 trace_css_enable_facility("mss");
1221 channel_subsys->max_ssid = MAX_SSID;
1222 return 0;
1225 static void css_init(void)
1227 channel_subsys = g_malloc0(sizeof(*channel_subsys));
1228 QTAILQ_INIT(&channel_subsys->pending_crws);
1229 channel_subsys->do_crw_mchk = true;
1230 channel_subsys->crws_lost = false;
1231 channel_subsys->chnmon_active = false;
1233 machine_init(css_init);
1235 void css_reset_sch(SubchDev *sch)
1237 PMCW *p = &sch->curr_status.pmcw;
1239 p->intparm = 0;
1240 p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
1241 PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
1242 PMCW_FLAGS_MASK_MP | PMCW_FLAGS_MASK_TF);
1243 p->flags |= PMCW_FLAGS_MASK_DNV;
1244 p->devno = sch->devno;
1245 p->pim = 0x80;
1246 p->lpm = p->pim;
1247 p->pnom = 0;
1248 p->lpum = 0;
1249 p->mbi = 0;
1250 p->pom = 0xff;
1251 p->pam = 0x80;
1252 p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_XMWME |
1253 PMCW_CHARS_MASK_CSENSE);
1255 memset(&sch->curr_status.scsw, 0, sizeof(sch->curr_status.scsw));
1256 sch->curr_status.mba = 0;
1258 sch->channel_prog = 0x0;
1259 sch->last_cmd_valid = false;
1260 sch->orb = NULL;
1263 void css_reset(void)
1265 CrwContainer *crw_cont;
1267 /* Clean up monitoring. */
1268 channel_subsys->chnmon_active = false;
1269 channel_subsys->chnmon_area = 0;
1271 /* Clear pending CRWs. */
1272 while ((crw_cont = QTAILQ_FIRST(&channel_subsys->pending_crws))) {
1273 QTAILQ_REMOVE(&channel_subsys->pending_crws, crw_cont, sibling);
1274 g_free(crw_cont);
1276 channel_subsys->do_crw_mchk = true;
1277 channel_subsys->crws_lost = false;
1279 /* Reset maximum ids. */
1280 channel_subsys->max_cssid = 0;
1281 channel_subsys->max_ssid = 0;