4 * handles SCLP event types
5 * - Signal Quiesce - system power down
6 * - ASCII Console Data - VT220 read and write
8 * Copyright IBM, Corp. 2012
11 * Heinz Graalfs <graalfs@de.ibm.com>
13 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
14 * option) any later version. See the COPYING file in the top-level directory.
18 #include "qemu/osdep.h"
19 #include "qapi/error.h"
20 #include "qemu/module.h"
22 #include "hw/s390x/sclp.h"
23 #include "migration/vmstate.h"
24 #include "hw/s390x/event-facility.h"
26 typedef struct SCLPEventsBus
{
30 /* we need to save 32 bit chunks for compatibility */
31 #ifdef HOST_WORDS_BIGENDIAN
32 #define RECV_MASK_LOWER 1
33 #define RECV_MASK_UPPER 0
34 #else /* little endian host */
35 #define RECV_MASK_LOWER 0
36 #define RECV_MASK_UPPER 1
39 struct SCLPEventFacility
{
40 SysBusDevice parent_obj
;
42 /* guest's receive mask */
44 uint32_t receive_mask_pieces
[2];
45 sccb_mask_t receive_mask
;
48 * when false, we keep the same broken, backwards compatible behaviour as
49 * before, allowing only masks of size exactly 4; when true, we implement
50 * the architecture correctly, allowing all valid mask sizes. Needed for
51 * migration toward older versions.
53 bool allow_all_mask_sizes
;
54 /* length of the receive mask */
58 /* return true if any child has event pending set */
59 static bool event_pending(SCLPEventFacility
*ef
)
63 SCLPEventClass
*event_class
;
65 QTAILQ_FOREACH(kid
, &ef
->sbus
.qbus
.children
, sibling
) {
66 DeviceState
*qdev
= kid
->child
;
67 event
= DO_UPCAST(SCLPEvent
, qdev
, qdev
);
68 event_class
= SCLP_EVENT_GET_CLASS(event
);
69 if (event
->event_pending
&&
70 event_class
->get_send_mask() & ef
->receive_mask
) {
77 static sccb_mask_t
get_host_send_mask(SCLPEventFacility
*ef
)
81 SCLPEventClass
*child
;
85 QTAILQ_FOREACH(kid
, &ef
->sbus
.qbus
.children
, sibling
) {
86 DeviceState
*qdev
= kid
->child
;
87 child
= SCLP_EVENT_GET_CLASS((SCLPEvent
*) qdev
);
88 mask
|= child
->get_send_mask();
93 static sccb_mask_t
get_host_receive_mask(SCLPEventFacility
*ef
)
97 SCLPEventClass
*child
;
101 QTAILQ_FOREACH(kid
, &ef
->sbus
.qbus
.children
, sibling
) {
102 DeviceState
*qdev
= kid
->child
;
103 child
= SCLP_EVENT_GET_CLASS((SCLPEvent
*) qdev
);
104 mask
|= child
->get_receive_mask();
109 static uint16_t write_event_length_check(SCCB
*sccb
)
113 EventBufferHeader
*event
;
114 WriteEventData
*wed
= (WriteEventData
*) sccb
;
116 event
= (EventBufferHeader
*) &wed
->ebh
;
117 for (slen
= sccb_data_len(sccb
); slen
> 0; slen
-= elen
) {
118 elen
= be16_to_cpu(event
->length
);
119 if (elen
< sizeof(*event
) || elen
> slen
) {
120 return SCLP_RC_EVENT_BUFFER_SYNTAX_ERROR
;
122 event
= (void *) event
+ elen
;
125 return SCLP_RC_INCONSISTENT_LENGTHS
;
127 return SCLP_RC_NORMAL_COMPLETION
;
130 static uint16_t handle_write_event_buf(SCLPEventFacility
*ef
,
131 EventBufferHeader
*event_buf
, SCCB
*sccb
)
138 rc
= SCLP_RC_INVALID_FUNCTION
;
140 QTAILQ_FOREACH(kid
, &ef
->sbus
.qbus
.children
, sibling
) {
141 DeviceState
*qdev
= kid
->child
;
142 event
= (SCLPEvent
*) qdev
;
143 ec
= SCLP_EVENT_GET_CLASS(event
);
145 if (ec
->write_event_data
&&
146 ec
->can_handle_event(event_buf
->type
)) {
147 rc
= ec
->write_event_data(event
, event_buf
);
154 static uint16_t handle_sccb_write_events(SCLPEventFacility
*ef
, SCCB
*sccb
)
159 EventBufferHeader
*event_buf
;
160 WriteEventData
*wed
= (WriteEventData
*) sccb
;
162 event_buf
= &wed
->ebh
;
163 rc
= SCLP_RC_NORMAL_COMPLETION
;
165 /* loop over all contained event buffers */
166 for (slen
= sccb_data_len(sccb
); slen
> 0; slen
-= elen
) {
167 elen
= be16_to_cpu(event_buf
->length
);
169 /* in case of a previous error mark all trailing buffers
171 if (rc
!= SCLP_RC_NORMAL_COMPLETION
) {
172 event_buf
->flags
&= ~(SCLP_EVENT_BUFFER_ACCEPTED
);
174 rc
= handle_write_event_buf(ef
, event_buf
, sccb
);
176 event_buf
= (void *) event_buf
+ elen
;
181 static void write_event_data(SCLPEventFacility
*ef
, SCCB
*sccb
)
183 if (sccb
->h
.function_code
!= SCLP_FC_NORMAL_WRITE
) {
184 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_FUNCTION
);
187 if (be16_to_cpu(sccb
->h
.length
) < 8) {
188 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH
);
191 /* first do a sanity check of the write events */
192 sccb
->h
.response_code
= cpu_to_be16(write_event_length_check(sccb
));
194 /* if no early error, then execute */
195 if (sccb
->h
.response_code
== be16_to_cpu(SCLP_RC_NORMAL_COMPLETION
)) {
196 sccb
->h
.response_code
=
197 cpu_to_be16(handle_sccb_write_events(ef
, sccb
));
204 static uint16_t handle_sccb_read_events(SCLPEventFacility
*ef
, SCCB
*sccb
,
213 EventBufferHeader
*event_buf
;
214 ReadEventData
*red
= (ReadEventData
*) sccb
;
216 event_buf
= &red
->ebh
;
217 event_buf
->length
= 0;
218 slen
= sizeof(sccb
->data
);
220 rc
= SCLP_RC_NO_EVENT_BUFFERS_STORED
;
222 QTAILQ_FOREACH(kid
, &ef
->sbus
.qbus
.children
, sibling
) {
223 DeviceState
*qdev
= kid
->child
;
224 event
= (SCLPEvent
*) qdev
;
225 ec
= SCLP_EVENT_GET_CLASS(event
);
227 if (mask
& ec
->get_send_mask()) {
228 if (ec
->read_event_data(event
, event_buf
, &slen
)) {
229 elen
= be16_to_cpu(event_buf
->length
);
230 event_buf
= (EventBufferHeader
*) ((char *)event_buf
+ elen
);
231 rc
= SCLP_RC_NORMAL_COMPLETION
;
236 if (sccb
->h
.control_mask
[2] & SCLP_VARIABLE_LENGTH_RESPONSE
) {
237 /* architecture suggests to reset variable-length-response bit */
238 sccb
->h
.control_mask
[2] &= ~SCLP_VARIABLE_LENGTH_RESPONSE
;
239 /* with a new length value */
240 sccb
->h
.length
= cpu_to_be16(SCCB_SIZE
- slen
);
245 /* copy up to src_len bytes and fill the rest of dst with zeroes */
246 static void copy_mask(uint8_t *dst
, uint8_t *src
, uint16_t dst_len
,
251 for (i
= 0; i
< dst_len
; i
++) {
252 dst
[i
] = i
< src_len
? src
[i
] : 0;
256 static void read_event_data(SCLPEventFacility
*ef
, SCCB
*sccb
)
258 sccb_mask_t sclp_active_selection_mask
;
259 sccb_mask_t sclp_cp_receive_mask
;
261 ReadEventData
*red
= (ReadEventData
*) sccb
;
263 if (be16_to_cpu(sccb
->h
.length
) != SCCB_SIZE
) {
264 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH
);
268 sclp_cp_receive_mask
= ef
->receive_mask
;
270 /* get active selection mask */
271 switch (sccb
->h
.function_code
) {
272 case SCLP_UNCONDITIONAL_READ
:
273 sclp_active_selection_mask
= sclp_cp_receive_mask
;
275 case SCLP_SELECTIVE_READ
:
276 copy_mask((uint8_t *)&sclp_active_selection_mask
, (uint8_t *)&red
->mask
,
277 sizeof(sclp_active_selection_mask
), ef
->mask_length
);
278 sclp_active_selection_mask
= be64_to_cpu(sclp_active_selection_mask
);
279 if (!sclp_cp_receive_mask
||
280 (sclp_active_selection_mask
& ~sclp_cp_receive_mask
)) {
281 sccb
->h
.response_code
=
282 cpu_to_be16(SCLP_RC_INVALID_SELECTION_MASK
);
287 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_FUNCTION
);
290 sccb
->h
.response_code
= cpu_to_be16(
291 handle_sccb_read_events(ef
, sccb
, sclp_active_selection_mask
));
297 static void write_event_mask(SCLPEventFacility
*ef
, SCCB
*sccb
)
299 WriteEventMask
*we_mask
= (WriteEventMask
*) sccb
;
300 uint16_t mask_length
= be16_to_cpu(we_mask
->mask_length
);
301 sccb_mask_t tmp_mask
;
303 if (!mask_length
|| (mask_length
> SCLP_EVENT_MASK_LEN_MAX
) ||
304 ((mask_length
!= 4) && !ef
->allow_all_mask_sizes
)) {
305 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_MASK_LENGTH
);
310 * Note: We currently only support masks up to 8 byte length;
311 * the remainder is filled up with zeroes. Older Linux
312 * kernels use a 4 byte mask length, newer ones can use both
313 * 8 or 4 depending on what is available on the host.
316 /* keep track of the guest's capability masks */
317 copy_mask((uint8_t *)&tmp_mask
, WEM_CP_RECEIVE_MASK(we_mask
, mask_length
),
318 sizeof(tmp_mask
), mask_length
);
319 ef
->receive_mask
= be64_to_cpu(tmp_mask
);
321 /* return the SCLP's capability masks to the guest */
322 tmp_mask
= cpu_to_be64(get_host_receive_mask(ef
));
323 copy_mask(WEM_RECEIVE_MASK(we_mask
, mask_length
), (uint8_t *)&tmp_mask
,
324 mask_length
, sizeof(tmp_mask
));
325 tmp_mask
= cpu_to_be64(get_host_send_mask(ef
));
326 copy_mask(WEM_SEND_MASK(we_mask
, mask_length
), (uint8_t *)&tmp_mask
,
327 mask_length
, sizeof(tmp_mask
));
329 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_COMPLETION
);
330 ef
->mask_length
= mask_length
;
336 /* qemu object creation and initialization functions */
338 #define TYPE_SCLP_EVENTS_BUS "s390-sclp-events-bus"
340 static void sclp_events_bus_realize(BusState
*bus
, Error
**errp
)
344 /* TODO: recursive realization has to be done in common code */
345 QTAILQ_FOREACH(kid
, &bus
->children
, sibling
) {
346 DeviceState
*dev
= kid
->child
;
348 object_property_set_bool(OBJECT(dev
), true, "realized", errp
);
355 static void sclp_events_bus_class_init(ObjectClass
*klass
, void *data
)
357 BusClass
*bc
= BUS_CLASS(klass
);
359 bc
->realize
= sclp_events_bus_realize
;
362 static const TypeInfo sclp_events_bus_info
= {
363 .name
= TYPE_SCLP_EVENTS_BUS
,
365 .class_init
= sclp_events_bus_class_init
,
368 static void command_handler(SCLPEventFacility
*ef
, SCCB
*sccb
, uint64_t code
)
370 switch (code
& SCLP_CMD_CODE_MASK
) {
371 case SCLP_CMD_READ_EVENT_DATA
:
372 read_event_data(ef
, sccb
);
374 case SCLP_CMD_WRITE_EVENT_DATA
:
375 write_event_data(ef
, sccb
);
377 case SCLP_CMD_WRITE_EVENT_MASK
:
378 write_event_mask(ef
, sccb
);
381 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
386 static bool vmstate_event_facility_mask64_needed(void *opaque
)
388 SCLPEventFacility
*ef
= opaque
;
390 return (ef
->receive_mask
& 0xFFFFFFFF) != 0;
393 static bool vmstate_event_facility_mask_length_needed(void *opaque
)
395 SCLPEventFacility
*ef
= opaque
;
397 return ef
->allow_all_mask_sizes
;
400 static const VMStateDescription vmstate_event_facility_mask64
= {
401 .name
= "vmstate-event-facility/mask64",
403 .minimum_version_id
= 0,
404 .needed
= vmstate_event_facility_mask64_needed
,
405 .fields
= (VMStateField
[]) {
406 VMSTATE_UINT32(receive_mask_pieces
[RECV_MASK_LOWER
], SCLPEventFacility
),
407 VMSTATE_END_OF_LIST()
411 static const VMStateDescription vmstate_event_facility_mask_length
= {
412 .name
= "vmstate-event-facility/mask_length",
414 .minimum_version_id
= 0,
415 .needed
= vmstate_event_facility_mask_length_needed
,
416 .fields
= (VMStateField
[]) {
417 VMSTATE_UINT16(mask_length
, SCLPEventFacility
),
418 VMSTATE_END_OF_LIST()
422 static const VMStateDescription vmstate_event_facility
= {
423 .name
= "vmstate-event-facility",
425 .minimum_version_id
= 0,
426 .fields
= (VMStateField
[]) {
427 VMSTATE_UINT32(receive_mask_pieces
[RECV_MASK_UPPER
], SCLPEventFacility
),
428 VMSTATE_END_OF_LIST()
430 .subsections
= (const VMStateDescription
* []) {
431 &vmstate_event_facility_mask64
,
432 &vmstate_event_facility_mask_length
,
437 static void sclp_event_set_allow_all_mask_sizes(Object
*obj
, bool value
,
440 SCLPEventFacility
*ef
= (SCLPEventFacility
*)obj
;
442 ef
->allow_all_mask_sizes
= value
;
445 static bool sclp_event_get_allow_all_mask_sizes(Object
*obj
, Error
**e
)
447 SCLPEventFacility
*ef
= (SCLPEventFacility
*)obj
;
449 return ef
->allow_all_mask_sizes
;
452 static void init_event_facility(Object
*obj
)
454 SCLPEventFacility
*event_facility
= EVENT_FACILITY(obj
);
455 DeviceState
*sdev
= DEVICE(obj
);
458 event_facility
->mask_length
= 4;
459 event_facility
->allow_all_mask_sizes
= true;
460 object_property_add_bool(obj
, "allow_all_mask_sizes",
461 sclp_event_get_allow_all_mask_sizes
,
462 sclp_event_set_allow_all_mask_sizes
, NULL
);
463 /* Spawn a new bus for SCLP events */
464 qbus_create_inplace(&event_facility
->sbus
, sizeof(event_facility
->sbus
),
465 TYPE_SCLP_EVENTS_BUS
, sdev
, NULL
);
467 new = object_new(TYPE_SCLP_QUIESCE
);
468 object_property_add_child(obj
, TYPE_SCLP_QUIESCE
, new, NULL
);
470 qdev_set_parent_bus(DEVICE(new), BUS(&event_facility
->sbus
));
472 new = object_new(TYPE_SCLP_CPU_HOTPLUG
);
473 object_property_add_child(obj
, TYPE_SCLP_CPU_HOTPLUG
, new, NULL
);
475 qdev_set_parent_bus(DEVICE(new), BUS(&event_facility
->sbus
));
476 /* the facility will automatically realize the devices via the bus */
479 static void reset_event_facility(DeviceState
*dev
)
481 SCLPEventFacility
*sdev
= EVENT_FACILITY(dev
);
483 sdev
->receive_mask
= 0;
486 static void init_event_facility_class(ObjectClass
*klass
, void *data
)
488 SysBusDeviceClass
*sbdc
= SYS_BUS_DEVICE_CLASS(klass
);
489 DeviceClass
*dc
= DEVICE_CLASS(sbdc
);
490 SCLPEventFacilityClass
*k
= EVENT_FACILITY_CLASS(dc
);
492 dc
->reset
= reset_event_facility
;
493 dc
->vmsd
= &vmstate_event_facility
;
494 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
495 k
->command_handler
= command_handler
;
496 k
->event_pending
= event_pending
;
499 static const TypeInfo sclp_event_facility_info
= {
500 .name
= TYPE_SCLP_EVENT_FACILITY
,
501 .parent
= TYPE_SYS_BUS_DEVICE
,
502 .instance_init
= init_event_facility
,
503 .instance_size
= sizeof(SCLPEventFacility
),
504 .class_init
= init_event_facility_class
,
505 .class_size
= sizeof(SCLPEventFacilityClass
),
508 static void event_realize(DeviceState
*qdev
, Error
**errp
)
510 SCLPEvent
*event
= SCLP_EVENT(qdev
);
511 SCLPEventClass
*child
= SCLP_EVENT_GET_CLASS(event
);
514 int rc
= child
->init(event
);
516 error_setg(errp
, "SCLP event initialization failed.");
522 static void event_class_init(ObjectClass
*klass
, void *data
)
524 DeviceClass
*dc
= DEVICE_CLASS(klass
);
526 dc
->bus_type
= TYPE_SCLP_EVENTS_BUS
;
527 dc
->realize
= event_realize
;
530 static const TypeInfo sclp_event_type_info
= {
531 .name
= TYPE_SCLP_EVENT
,
532 .parent
= TYPE_DEVICE
,
533 .instance_size
= sizeof(SCLPEvent
),
534 .class_init
= event_class_init
,
535 .class_size
= sizeof(SCLPEventClass
),
539 static void register_types(void)
541 type_register_static(&sclp_events_bus_info
);
542 type_register_static(&sclp_event_facility_info
);
543 type_register_static(&sclp_event_type_info
);
546 type_init(register_types
)
548 BusState
*sclp_get_event_facility_bus(void)
553 busobj
= object_resolve_path_type("", TYPE_SCLP_EVENTS_BUS
, NULL
);
554 sbus
= OBJECT_CHECK(SCLPEventsBus
, busobj
, TYPE_SCLP_EVENTS_BUS
);