tcg: fix cpu_io_recompile
[qemu/ar7.git] / hw / s390x / event-facility.c
blob9c24bc6f7cecb45d2378792b25ae105a73d7d74c
1 /*
2 * SCLP
3 * Event Facility
4 * handles SCLP event types
5 * - Signal Quiesce - system power down
6 * - ASCII Console Data - VT220 read and write
8 * Copyright IBM, Corp. 2012
10 * Authors:
11 * Heinz Graalfs <graalfs@de.ibm.com>
13 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
14 * option) any later version. See the COPYING file in the top-level directory.
18 #include "qemu/osdep.h"
19 #include "qapi/error.h"
20 #include "sysemu/sysemu.h"
22 #include "hw/s390x/sclp.h"
23 #include "hw/s390x/event-facility.h"
25 typedef struct SCLPEventsBus {
26 BusState qbus;
27 } SCLPEventsBus;
29 struct SCLPEventFacility {
30 SysBusDevice parent_obj;
31 SCLPEventsBus sbus;
32 /* guest's receive mask */
33 sccb_mask_t receive_mask;
35 * when false, we keep the same broken, backwards compatible behaviour as
36 * before, allowing only masks of size exactly 4; when true, we implement
37 * the architecture correctly, allowing all valid mask sizes. Needed for
38 * migration toward older versions.
40 bool allow_all_mask_sizes;
41 /* length of the receive mask */
42 uint16_t mask_length;
45 /* return true if any child has event pending set */
46 static bool event_pending(SCLPEventFacility *ef)
48 BusChild *kid;
49 SCLPEvent *event;
50 SCLPEventClass *event_class;
52 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
53 DeviceState *qdev = kid->child;
54 event = DO_UPCAST(SCLPEvent, qdev, qdev);
55 event_class = SCLP_EVENT_GET_CLASS(event);
56 if (event->event_pending &&
57 event_class->get_send_mask() & ef->receive_mask) {
58 return true;
61 return false;
64 static sccb_mask_t get_host_send_mask(SCLPEventFacility *ef)
66 sccb_mask_t mask;
67 BusChild *kid;
68 SCLPEventClass *child;
70 mask = 0;
72 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
73 DeviceState *qdev = kid->child;
74 child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev);
75 mask |= child->get_send_mask();
77 return mask;
80 static sccb_mask_t get_host_receive_mask(SCLPEventFacility *ef)
82 sccb_mask_t mask;
83 BusChild *kid;
84 SCLPEventClass *child;
86 mask = 0;
88 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
89 DeviceState *qdev = kid->child;
90 child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev);
91 mask |= child->get_receive_mask();
93 return mask;
96 static uint16_t write_event_length_check(SCCB *sccb)
98 int slen;
99 unsigned elen = 0;
100 EventBufferHeader *event;
101 WriteEventData *wed = (WriteEventData *) sccb;
103 event = (EventBufferHeader *) &wed->ebh;
104 for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) {
105 elen = be16_to_cpu(event->length);
106 if (elen < sizeof(*event) || elen > slen) {
107 return SCLP_RC_EVENT_BUFFER_SYNTAX_ERROR;
109 event = (void *) event + elen;
111 if (slen) {
112 return SCLP_RC_INCONSISTENT_LENGTHS;
114 return SCLP_RC_NORMAL_COMPLETION;
117 static uint16_t handle_write_event_buf(SCLPEventFacility *ef,
118 EventBufferHeader *event_buf, SCCB *sccb)
120 uint16_t rc;
121 BusChild *kid;
122 SCLPEvent *event;
123 SCLPEventClass *ec;
125 rc = SCLP_RC_INVALID_FUNCTION;
127 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
128 DeviceState *qdev = kid->child;
129 event = (SCLPEvent *) qdev;
130 ec = SCLP_EVENT_GET_CLASS(event);
132 if (ec->write_event_data &&
133 ec->can_handle_event(event_buf->type)) {
134 rc = ec->write_event_data(event, event_buf);
135 break;
138 return rc;
141 static uint16_t handle_sccb_write_events(SCLPEventFacility *ef, SCCB *sccb)
143 uint16_t rc;
144 int slen;
145 unsigned elen = 0;
146 EventBufferHeader *event_buf;
147 WriteEventData *wed = (WriteEventData *) sccb;
149 event_buf = &wed->ebh;
150 rc = SCLP_RC_NORMAL_COMPLETION;
152 /* loop over all contained event buffers */
153 for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) {
154 elen = be16_to_cpu(event_buf->length);
156 /* in case of a previous error mark all trailing buffers
157 * as not accepted */
158 if (rc != SCLP_RC_NORMAL_COMPLETION) {
159 event_buf->flags &= ~(SCLP_EVENT_BUFFER_ACCEPTED);
160 } else {
161 rc = handle_write_event_buf(ef, event_buf, sccb);
163 event_buf = (void *) event_buf + elen;
165 return rc;
168 static void write_event_data(SCLPEventFacility *ef, SCCB *sccb)
170 if (sccb->h.function_code != SCLP_FC_NORMAL_WRITE) {
171 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION);
172 goto out;
174 if (be16_to_cpu(sccb->h.length) < 8) {
175 sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH);
176 goto out;
178 /* first do a sanity check of the write events */
179 sccb->h.response_code = cpu_to_be16(write_event_length_check(sccb));
181 /* if no early error, then execute */
182 if (sccb->h.response_code == be16_to_cpu(SCLP_RC_NORMAL_COMPLETION)) {
183 sccb->h.response_code =
184 cpu_to_be16(handle_sccb_write_events(ef, sccb));
187 out:
188 return;
191 static uint16_t handle_sccb_read_events(SCLPEventFacility *ef, SCCB *sccb,
192 sccb_mask_t mask)
194 uint16_t rc;
195 int slen;
196 unsigned elen;
197 BusChild *kid;
198 SCLPEvent *event;
199 SCLPEventClass *ec;
200 EventBufferHeader *event_buf;
201 ReadEventData *red = (ReadEventData *) sccb;
203 event_buf = &red->ebh;
204 event_buf->length = 0;
205 slen = sizeof(sccb->data);
207 rc = SCLP_RC_NO_EVENT_BUFFERS_STORED;
209 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
210 DeviceState *qdev = kid->child;
211 event = (SCLPEvent *) qdev;
212 ec = SCLP_EVENT_GET_CLASS(event);
214 if (mask & ec->get_send_mask()) {
215 if (ec->read_event_data(event, event_buf, &slen)) {
216 elen = be16_to_cpu(event_buf->length);
217 event_buf = (EventBufferHeader *) ((char *)event_buf + elen);
218 rc = SCLP_RC_NORMAL_COMPLETION;
223 if (sccb->h.control_mask[2] & SCLP_VARIABLE_LENGTH_RESPONSE) {
224 /* architecture suggests to reset variable-length-response bit */
225 sccb->h.control_mask[2] &= ~SCLP_VARIABLE_LENGTH_RESPONSE;
226 /* with a new length value */
227 sccb->h.length = cpu_to_be16(SCCB_SIZE - slen);
229 return rc;
232 /* copy up to src_len bytes and fill the rest of dst with zeroes */
233 static void copy_mask(uint8_t *dst, uint8_t *src, uint16_t dst_len,
234 uint16_t src_len)
236 int i;
238 for (i = 0; i < dst_len; i++) {
239 dst[i] = i < src_len ? src[i] : 0;
243 static void read_event_data(SCLPEventFacility *ef, SCCB *sccb)
245 sccb_mask_t sclp_active_selection_mask;
246 sccb_mask_t sclp_cp_receive_mask;
248 ReadEventData *red = (ReadEventData *) sccb;
250 if (be16_to_cpu(sccb->h.length) != SCCB_SIZE) {
251 sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH);
252 goto out;
255 sclp_cp_receive_mask = ef->receive_mask;
257 /* get active selection mask */
258 switch (sccb->h.function_code) {
259 case SCLP_UNCONDITIONAL_READ:
260 sclp_active_selection_mask = sclp_cp_receive_mask;
261 break;
262 case SCLP_SELECTIVE_READ:
263 copy_mask((uint8_t *)&sclp_active_selection_mask, (uint8_t *)&red->mask,
264 sizeof(sclp_active_selection_mask), ef->mask_length);
265 sclp_active_selection_mask = be32_to_cpu(sclp_active_selection_mask);
266 if (!sclp_cp_receive_mask ||
267 (sclp_active_selection_mask & ~sclp_cp_receive_mask)) {
268 sccb->h.response_code =
269 cpu_to_be16(SCLP_RC_INVALID_SELECTION_MASK);
270 goto out;
272 break;
273 default:
274 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION);
275 goto out;
277 sccb->h.response_code = cpu_to_be16(
278 handle_sccb_read_events(ef, sccb, sclp_active_selection_mask));
280 out:
281 return;
284 static void write_event_mask(SCLPEventFacility *ef, SCCB *sccb)
286 WriteEventMask *we_mask = (WriteEventMask *) sccb;
287 uint16_t mask_length = be16_to_cpu(we_mask->mask_length);
288 sccb_mask_t tmp_mask;
290 if (!mask_length || (mask_length > SCLP_EVENT_MASK_LEN_MAX) ||
291 ((mask_length != 4) && !ef->allow_all_mask_sizes)) {
292 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_MASK_LENGTH);
293 goto out;
297 * Note: We currently only support masks up to 4 byte length;
298 * the remainder is filled up with zeroes. Linux uses
299 * a 4 byte mask length.
302 /* keep track of the guest's capability masks */
303 copy_mask((uint8_t *)&tmp_mask, WEM_CP_RECEIVE_MASK(we_mask, mask_length),
304 sizeof(tmp_mask), mask_length);
305 ef->receive_mask = be32_to_cpu(tmp_mask);
307 /* return the SCLP's capability masks to the guest */
308 tmp_mask = cpu_to_be32(get_host_receive_mask(ef));
309 copy_mask(WEM_RECEIVE_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask,
310 mask_length, sizeof(tmp_mask));
311 tmp_mask = cpu_to_be32(get_host_send_mask(ef));
312 copy_mask(WEM_SEND_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask,
313 mask_length, sizeof(tmp_mask));
315 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
316 ef->mask_length = mask_length;
318 out:
319 return;
322 /* qemu object creation and initialization functions */
324 #define TYPE_SCLP_EVENTS_BUS "s390-sclp-events-bus"
326 static void sclp_events_bus_realize(BusState *bus, Error **errp)
328 BusChild *kid;
330 /* TODO: recursive realization has to be done in common code */
331 QTAILQ_FOREACH(kid, &bus->children, sibling) {
332 DeviceState *dev = kid->child;
334 object_property_set_bool(OBJECT(dev), true, "realized", errp);
335 if (*errp) {
336 return;
341 static void sclp_events_bus_class_init(ObjectClass *klass, void *data)
343 BusClass *bc = BUS_CLASS(klass);
345 bc->realize = sclp_events_bus_realize;
348 static const TypeInfo sclp_events_bus_info = {
349 .name = TYPE_SCLP_EVENTS_BUS,
350 .parent = TYPE_BUS,
351 .class_init = sclp_events_bus_class_init,
354 static void command_handler(SCLPEventFacility *ef, SCCB *sccb, uint64_t code)
356 switch (code & SCLP_CMD_CODE_MASK) {
357 case SCLP_CMD_READ_EVENT_DATA:
358 read_event_data(ef, sccb);
359 break;
360 case SCLP_CMD_WRITE_EVENT_DATA:
361 write_event_data(ef, sccb);
362 break;
363 case SCLP_CMD_WRITE_EVENT_MASK:
364 write_event_mask(ef, sccb);
365 break;
366 default:
367 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
368 break;
372 static bool vmstate_event_facility_mask_length_needed(void *opaque)
374 SCLPEventFacility *ef = opaque;
376 return ef->allow_all_mask_sizes;
379 static const VMStateDescription vmstate_event_facility_mask_length = {
380 .name = "vmstate-event-facility/mask_length",
381 .version_id = 0,
382 .minimum_version_id = 0,
383 .needed = vmstate_event_facility_mask_length_needed,
384 .fields = (VMStateField[]) {
385 VMSTATE_UINT16(mask_length, SCLPEventFacility),
386 VMSTATE_END_OF_LIST()
390 static const VMStateDescription vmstate_event_facility = {
391 .name = "vmstate-event-facility",
392 .version_id = 0,
393 .minimum_version_id = 0,
394 .fields = (VMStateField[]) {
395 VMSTATE_UINT32(receive_mask, SCLPEventFacility),
396 VMSTATE_END_OF_LIST()
398 .subsections = (const VMStateDescription * []) {
399 &vmstate_event_facility_mask_length,
400 NULL
404 static void sclp_event_set_allow_all_mask_sizes(Object *obj, bool value,
405 Error **errp)
407 SCLPEventFacility *ef = (SCLPEventFacility *)obj;
409 ef->allow_all_mask_sizes = value;
412 static bool sclp_event_get_allow_all_mask_sizes(Object *obj, Error **e)
414 SCLPEventFacility *ef = (SCLPEventFacility *)obj;
416 return ef->allow_all_mask_sizes;
419 static void init_event_facility(Object *obj)
421 SCLPEventFacility *event_facility = EVENT_FACILITY(obj);
422 DeviceState *sdev = DEVICE(obj);
423 Object *new;
425 event_facility->mask_length = 4;
426 event_facility->allow_all_mask_sizes = true;
427 object_property_add_bool(obj, "allow_all_mask_sizes",
428 sclp_event_get_allow_all_mask_sizes,
429 sclp_event_set_allow_all_mask_sizes, NULL);
430 /* Spawn a new bus for SCLP events */
431 qbus_create_inplace(&event_facility->sbus, sizeof(event_facility->sbus),
432 TYPE_SCLP_EVENTS_BUS, sdev, NULL);
434 new = object_new(TYPE_SCLP_QUIESCE);
435 object_property_add_child(obj, TYPE_SCLP_QUIESCE, new, NULL);
436 object_unref(new);
437 qdev_set_parent_bus(DEVICE(new), &event_facility->sbus.qbus);
439 new = object_new(TYPE_SCLP_CPU_HOTPLUG);
440 object_property_add_child(obj, TYPE_SCLP_CPU_HOTPLUG, new, NULL);
441 object_unref(new);
442 qdev_set_parent_bus(DEVICE(new), &event_facility->sbus.qbus);
443 /* the facility will automatically realize the devices via the bus */
446 static void reset_event_facility(DeviceState *dev)
448 SCLPEventFacility *sdev = EVENT_FACILITY(dev);
450 sdev->receive_mask = 0;
453 static void init_event_facility_class(ObjectClass *klass, void *data)
455 SysBusDeviceClass *sbdc = SYS_BUS_DEVICE_CLASS(klass);
456 DeviceClass *dc = DEVICE_CLASS(sbdc);
457 SCLPEventFacilityClass *k = EVENT_FACILITY_CLASS(dc);
459 dc->reset = reset_event_facility;
460 dc->vmsd = &vmstate_event_facility;
461 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
462 k->command_handler = command_handler;
463 k->event_pending = event_pending;
466 static const TypeInfo sclp_event_facility_info = {
467 .name = TYPE_SCLP_EVENT_FACILITY,
468 .parent = TYPE_SYS_BUS_DEVICE,
469 .instance_init = init_event_facility,
470 .instance_size = sizeof(SCLPEventFacility),
471 .class_init = init_event_facility_class,
472 .class_size = sizeof(SCLPEventFacilityClass),
475 static void event_realize(DeviceState *qdev, Error **errp)
477 SCLPEvent *event = SCLP_EVENT(qdev);
478 SCLPEventClass *child = SCLP_EVENT_GET_CLASS(event);
480 if (child->init) {
481 int rc = child->init(event);
482 if (rc < 0) {
483 error_setg(errp, "SCLP event initialization failed.");
484 return;
489 static void event_class_init(ObjectClass *klass, void *data)
491 DeviceClass *dc = DEVICE_CLASS(klass);
493 dc->bus_type = TYPE_SCLP_EVENTS_BUS;
494 dc->realize = event_realize;
497 static const TypeInfo sclp_event_type_info = {
498 .name = TYPE_SCLP_EVENT,
499 .parent = TYPE_DEVICE,
500 .instance_size = sizeof(SCLPEvent),
501 .class_init = event_class_init,
502 .class_size = sizeof(SCLPEventClass),
503 .abstract = true,
506 static void register_types(void)
508 type_register_static(&sclp_events_bus_info);
509 type_register_static(&sclp_event_facility_info);
510 type_register_static(&sclp_event_type_info);
513 type_init(register_types)