xen/pt: Check for return values for xen_host_pci_[get|set] in init
[qemu.git] / hw / s390x / sclp.c
blobfd277e1bf04e22110f6268570af7e0d3704ce99a
1 /*
2 * SCLP Support
4 * Copyright IBM, Corp. 2012
6 * Authors:
7 * Christian Borntraeger <borntraeger@de.ibm.com>
8 * Heinz Graalfs <graalfs@linux.vnet.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
11 * option) any later version. See the COPYING file in the top-level directory.
15 #include "cpu.h"
16 #include "sysemu/kvm.h"
17 #include "exec/memory.h"
18 #include "sysemu/sysemu.h"
19 #include "exec/address-spaces.h"
20 #include "hw/boards.h"
21 #include "hw/s390x/sclp.h"
22 #include "hw/s390x/event-facility.h"
23 #include "hw/s390x/s390-pci-bus.h"
25 static inline SCLPDevice *get_sclp_device(void)
27 return SCLP(object_resolve_path_type("", TYPE_SCLP, NULL));
30 /* Provide information about the configuration, CPUs and storage */
31 static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb)
33 ReadInfo *read_info = (ReadInfo *) sccb;
34 MachineState *machine = MACHINE(qdev_get_machine());
35 sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
36 CPUState *cpu;
37 int cpu_count = 0;
38 int i = 0;
39 int rnsize, rnmax;
40 int slots = MIN(machine->ram_slots, s390_get_memslot_count(kvm_state));
42 CPU_FOREACH(cpu) {
43 cpu_count++;
46 /* CPU information */
47 read_info->entries_cpu = cpu_to_be16(cpu_count);
48 read_info->offset_cpu = cpu_to_be16(offsetof(ReadInfo, entries));
49 read_info->highest_cpu = cpu_to_be16(max_cpus);
51 for (i = 0; i < cpu_count; i++) {
52 read_info->entries[i].address = i;
53 read_info->entries[i].type = 0;
56 read_info->facilities = cpu_to_be64(SCLP_HAS_CPU_INFO |
57 SCLP_HAS_PCI_RECONFIG);
59 /* Memory Hotplug is only supported for the ccw machine type */
60 if (mhd) {
61 mhd->standby_subregion_size = MEM_SECTION_SIZE;
62 /* Deduct the memory slot already used for core */
63 if (slots > 0) {
64 while ((mhd->standby_subregion_size * (slots - 1)
65 < mhd->standby_mem_size)) {
66 mhd->standby_subregion_size = mhd->standby_subregion_size << 1;
70 * Initialize mapping of guest standby memory sections indicating which
71 * are and are not online. Assume all standby memory begins offline.
73 if (mhd->standby_state_map == 0) {
74 if (mhd->standby_mem_size % mhd->standby_subregion_size) {
75 mhd->standby_state_map = g_malloc0((mhd->standby_mem_size /
76 mhd->standby_subregion_size + 1) *
77 (mhd->standby_subregion_size /
78 MEM_SECTION_SIZE));
79 } else {
80 mhd->standby_state_map = g_malloc0(mhd->standby_mem_size /
81 MEM_SECTION_SIZE);
84 mhd->padded_ram_size = ram_size + mhd->pad_size;
85 mhd->rzm = 1 << mhd->increment_size;
87 read_info->facilities |= cpu_to_be64(SCLP_FC_ASSIGN_ATTACH_READ_STOR);
90 rnsize = 1 << (sclp->increment_size - 20);
91 if (rnsize <= 128) {
92 read_info->rnsize = rnsize;
93 } else {
94 read_info->rnsize = 0;
95 read_info->rnsize2 = cpu_to_be32(rnsize);
98 rnmax = machine->maxram_size >> sclp->increment_size;
99 if (rnmax < 0x10000) {
100 read_info->rnmax = cpu_to_be16(rnmax);
101 } else {
102 read_info->rnmax = cpu_to_be16(0);
103 read_info->rnmax2 = cpu_to_be64(rnmax);
106 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
109 static void read_storage_element0_info(SCLPDevice *sclp, SCCB *sccb)
111 int i, assigned;
112 int subincrement_id = SCLP_STARTING_SUBINCREMENT_ID;
113 ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb;
114 sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
116 if (!mhd) {
117 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
118 return;
121 if ((ram_size >> mhd->increment_size) >= 0x10000) {
122 sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION);
123 return;
126 /* Return information regarding core memory */
127 storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0);
128 assigned = ram_size >> mhd->increment_size;
129 storage_info->assigned = cpu_to_be16(assigned);
131 for (i = 0; i < assigned; i++) {
132 storage_info->entries[i] = cpu_to_be32(subincrement_id);
133 subincrement_id += SCLP_INCREMENT_UNIT;
135 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
138 static void read_storage_element1_info(SCLPDevice *sclp, SCCB *sccb)
140 ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb;
141 sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
143 if (!mhd) {
144 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
145 return;
148 if ((mhd->standby_mem_size >> mhd->increment_size) >= 0x10000) {
149 sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION);
150 return;
153 /* Return information regarding standby memory */
154 storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0);
155 storage_info->assigned = cpu_to_be16(mhd->standby_mem_size >>
156 mhd->increment_size);
157 storage_info->standby = cpu_to_be16(mhd->standby_mem_size >>
158 mhd->increment_size);
159 sccb->h.response_code = cpu_to_be16(SCLP_RC_STANDBY_READ_COMPLETION);
162 static void attach_storage_element(SCLPDevice *sclp, SCCB *sccb,
163 uint16_t element)
165 int i, assigned, subincrement_id;
166 AttachStorageElement *attach_info = (AttachStorageElement *) sccb;
167 sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
169 if (!mhd) {
170 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
171 return;
174 if (element != 1) {
175 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
176 return;
179 assigned = mhd->standby_mem_size >> mhd->increment_size;
180 attach_info->assigned = cpu_to_be16(assigned);
181 subincrement_id = ((ram_size >> mhd->increment_size) << 16)
182 + SCLP_STARTING_SUBINCREMENT_ID;
183 for (i = 0; i < assigned; i++) {
184 attach_info->entries[i] = cpu_to_be32(subincrement_id);
185 subincrement_id += SCLP_INCREMENT_UNIT;
187 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
190 static void assign_storage(SCLPDevice *sclp, SCCB *sccb)
192 MemoryRegion *mr = NULL;
193 uint64_t this_subregion_size;
194 AssignStorage *assign_info = (AssignStorage *) sccb;
195 sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
196 ram_addr_t assign_addr;
197 MemoryRegion *sysmem = get_system_memory();
199 if (!mhd) {
200 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
201 return;
203 assign_addr = (assign_info->rn - 1) * mhd->rzm;
205 if ((assign_addr % MEM_SECTION_SIZE == 0) &&
206 (assign_addr >= mhd->padded_ram_size)) {
207 /* Re-use existing memory region if found */
208 mr = memory_region_find(sysmem, assign_addr, 1).mr;
209 memory_region_unref(mr);
210 if (!mr) {
212 MemoryRegion *standby_ram = g_new(MemoryRegion, 1);
214 /* offset to align to standby_subregion_size for allocation */
215 ram_addr_t offset = assign_addr -
216 (assign_addr - mhd->padded_ram_size)
217 % mhd->standby_subregion_size;
219 /* strlen("standby.ram") + 4 (Max of KVM_MEMORY_SLOTS) + NULL */
220 char id[16];
221 snprintf(id, 16, "standby.ram%d",
222 (int)((offset - mhd->padded_ram_size) /
223 mhd->standby_subregion_size) + 1);
225 /* Allocate a subregion of the calculated standby_subregion_size */
226 if (offset + mhd->standby_subregion_size >
227 mhd->padded_ram_size + mhd->standby_mem_size) {
228 this_subregion_size = mhd->padded_ram_size +
229 mhd->standby_mem_size - offset;
230 } else {
231 this_subregion_size = mhd->standby_subregion_size;
234 memory_region_init_ram(standby_ram, NULL, id, this_subregion_size, &error_abort);
235 /* This is a hack to make memory hotunplug work again. Once we have
236 * subdevices, we have to unparent them when unassigning memory,
237 * instead of doing it via the ref count of the MemoryRegion. */
238 object_ref(OBJECT(standby_ram));
239 object_unparent(OBJECT(standby_ram));
240 vmstate_register_ram_global(standby_ram);
241 memory_region_add_subregion(sysmem, offset, standby_ram);
243 /* The specified subregion is no longer in standby */
244 mhd->standby_state_map[(assign_addr - mhd->padded_ram_size)
245 / MEM_SECTION_SIZE] = 1;
247 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
250 static void unassign_storage(SCLPDevice *sclp, SCCB *sccb)
252 MemoryRegion *mr = NULL;
253 AssignStorage *assign_info = (AssignStorage *) sccb;
254 sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
255 ram_addr_t unassign_addr;
256 MemoryRegion *sysmem = get_system_memory();
258 if (!mhd) {
259 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
260 return;
262 unassign_addr = (assign_info->rn - 1) * mhd->rzm;
264 /* if the addr is a multiple of 256 MB */
265 if ((unassign_addr % MEM_SECTION_SIZE == 0) &&
266 (unassign_addr >= mhd->padded_ram_size)) {
267 mhd->standby_state_map[(unassign_addr -
268 mhd->padded_ram_size) / MEM_SECTION_SIZE] = 0;
270 /* find the specified memory region and destroy it */
271 mr = memory_region_find(sysmem, unassign_addr, 1).mr;
272 memory_region_unref(mr);
273 if (mr) {
274 int i;
275 int is_removable = 1;
276 ram_addr_t map_offset = (unassign_addr - mhd->padded_ram_size -
277 (unassign_addr - mhd->padded_ram_size)
278 % mhd->standby_subregion_size);
279 /* Mark all affected subregions as 'standby' once again */
280 for (i = 0;
281 i < (mhd->standby_subregion_size / MEM_SECTION_SIZE);
282 i++) {
284 if (mhd->standby_state_map[i + map_offset / MEM_SECTION_SIZE]) {
285 is_removable = 0;
286 break;
289 if (is_removable) {
290 memory_region_del_subregion(sysmem, mr);
291 object_unref(OBJECT(mr));
295 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
298 /* Provide information about the CPU */
299 static void sclp_read_cpu_info(SCLPDevice *sclp, SCCB *sccb)
301 ReadCpuInfo *cpu_info = (ReadCpuInfo *) sccb;
302 CPUState *cpu;
303 int cpu_count = 0;
304 int i = 0;
306 CPU_FOREACH(cpu) {
307 cpu_count++;
310 cpu_info->nr_configured = cpu_to_be16(cpu_count);
311 cpu_info->offset_configured = cpu_to_be16(offsetof(ReadCpuInfo, entries));
312 cpu_info->nr_standby = cpu_to_be16(0);
314 /* The standby offset is 16-byte for each CPU */
315 cpu_info->offset_standby = cpu_to_be16(cpu_info->offset_configured
316 + cpu_info->nr_configured*sizeof(CPUEntry));
318 for (i = 0; i < cpu_count; i++) {
319 cpu_info->entries[i].address = i;
320 cpu_info->entries[i].type = 0;
323 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
326 static void sclp_execute(SCLPDevice *sclp, SCCB *sccb, uint32_t code)
328 SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp);
329 SCLPEventFacility *ef = sclp->event_facility;
330 SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef);
332 switch (code & SCLP_CMD_CODE_MASK) {
333 case SCLP_CMDW_READ_SCP_INFO:
334 case SCLP_CMDW_READ_SCP_INFO_FORCED:
335 sclp_c->read_SCP_info(sclp, sccb);
336 break;
337 case SCLP_CMDW_READ_CPU_INFO:
338 sclp_c->read_cpu_info(sclp, sccb);
339 break;
340 case SCLP_READ_STORAGE_ELEMENT_INFO:
341 if (code & 0xff00) {
342 sclp_c->read_storage_element1_info(sclp, sccb);
343 } else {
344 sclp_c->read_storage_element0_info(sclp, sccb);
346 break;
347 case SCLP_ATTACH_STORAGE_ELEMENT:
348 sclp_c->attach_storage_element(sclp, sccb, (code & 0xff00) >> 8);
349 break;
350 case SCLP_ASSIGN_STORAGE:
351 sclp_c->assign_storage(sclp, sccb);
352 break;
353 case SCLP_UNASSIGN_STORAGE:
354 sclp_c->unassign_storage(sclp, sccb);
355 break;
356 case SCLP_CMDW_CONFIGURE_PCI:
357 s390_pci_sclp_configure(1, sccb);
358 break;
359 case SCLP_CMDW_DECONFIGURE_PCI:
360 s390_pci_sclp_configure(0, sccb);
361 break;
362 default:
363 efc->command_handler(ef, sccb, code);
364 break;
368 int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code)
370 SCLPDevice *sclp = get_sclp_device();
371 SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp);
372 int r = 0;
373 SCCB work_sccb;
375 hwaddr sccb_len = sizeof(SCCB);
377 /* first some basic checks on program checks */
378 if (env->psw.mask & PSW_MASK_PSTATE) {
379 r = -PGM_PRIVILEGED;
380 goto out;
382 if (cpu_physical_memory_is_io(sccb)) {
383 r = -PGM_ADDRESSING;
384 goto out;
386 if ((sccb & ~0x1fffUL) == 0 || (sccb & ~0x1fffUL) == env->psa
387 || (sccb & ~0x7ffffff8UL) != 0) {
388 r = -PGM_SPECIFICATION;
389 goto out;
393 * we want to work on a private copy of the sccb, to prevent guests
394 * from playing dirty tricks by modifying the memory content after
395 * the host has checked the values
397 cpu_physical_memory_read(sccb, &work_sccb, sccb_len);
399 /* Valid sccb sizes */
400 if (be16_to_cpu(work_sccb.h.length) < sizeof(SCCBHeader) ||
401 be16_to_cpu(work_sccb.h.length) > SCCB_SIZE) {
402 r = -PGM_SPECIFICATION;
403 goto out;
406 sclp_c->execute(sclp, (SCCB *)&work_sccb, code);
408 cpu_physical_memory_write(sccb, &work_sccb,
409 be16_to_cpu(work_sccb.h.length));
411 sclp_c->service_interrupt(sclp, sccb);
413 out:
414 return r;
417 static void service_interrupt(SCLPDevice *sclp, uint32_t sccb)
419 SCLPEventFacility *ef = sclp->event_facility;
420 SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef);
422 uint32_t param = sccb & ~3;
424 /* Indicate whether an event is still pending */
425 param |= efc->event_pending(ef) ? 1 : 0;
427 if (!param) {
428 /* No need to send an interrupt, there's nothing to be notified about */
429 return;
431 s390_sclp_extint(param);
434 void sclp_service_interrupt(uint32_t sccb)
436 SCLPDevice *sclp = get_sclp_device();
437 SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp);
439 sclp_c->service_interrupt(sclp, sccb);
442 /* qemu object creation and initialization functions */
444 void s390_sclp_init(void)
446 Object *new = object_new(TYPE_SCLP);
448 object_property_add_child(qdev_get_machine(), TYPE_SCLP, new,
449 NULL);
450 object_unref(OBJECT(new));
451 qdev_init_nofail(DEVICE(new));
454 static void sclp_realize(DeviceState *dev, Error **errp)
456 MachineState *machine = MACHINE(qdev_get_machine());
457 SCLPDevice *sclp = SCLP(dev);
458 Error *l_err = NULL;
459 uint64_t hw_limit;
460 int ret;
462 object_property_set_bool(OBJECT(sclp->event_facility), true, "realized",
463 &l_err);
464 if (l_err) {
465 goto error;
468 ret = s390_set_memory_limit(machine->maxram_size, &hw_limit);
469 if (ret == -E2BIG) {
470 error_setg(&l_err, "qemu: host supports a maximum of %" PRIu64 " GB",
471 hw_limit >> 30);
472 goto error;
473 } else if (ret) {
474 error_setg(&l_err, "qemu: setting the guest size failed");
475 goto error;
477 return;
478 error:
479 assert(l_err);
480 error_propagate(errp, l_err);
483 static void sclp_memory_init(SCLPDevice *sclp)
485 MachineState *machine = MACHINE(qdev_get_machine());
486 ram_addr_t initial_mem = machine->ram_size;
487 ram_addr_t max_mem = machine->maxram_size;
488 ram_addr_t standby_mem = max_mem - initial_mem;
489 ram_addr_t pad_mem = 0;
490 int increment_size = 20;
492 /* The storage increment size is a multiple of 1M and is a power of 2.
493 * The number of storage increments must be MAX_STORAGE_INCREMENTS or fewer.
494 * The variable 'increment_size' is an exponent of 2 that can be
495 * used to calculate the size (in bytes) of an increment. */
496 while ((initial_mem >> increment_size) > MAX_STORAGE_INCREMENTS) {
497 increment_size++;
499 if (machine->ram_slots) {
500 while ((standby_mem >> increment_size) > MAX_STORAGE_INCREMENTS) {
501 increment_size++;
504 sclp->increment_size = increment_size;
506 /* The core and standby memory areas need to be aligned with
507 * the increment size. In effect, this can cause the
508 * user-specified memory size to be rounded down to align
509 * with the nearest increment boundary. */
510 initial_mem = initial_mem >> increment_size << increment_size;
511 standby_mem = standby_mem >> increment_size << increment_size;
513 /* If the size of ram is not on a MEM_SECTION_SIZE boundary,
514 calculate the pad size necessary to force this boundary. */
515 if (machine->ram_slots && standby_mem) {
516 sclpMemoryHotplugDev *mhd = init_sclp_memory_hotplug_dev();
518 if (initial_mem % MEM_SECTION_SIZE) {
519 pad_mem = MEM_SECTION_SIZE - initial_mem % MEM_SECTION_SIZE;
521 mhd->increment_size = increment_size;
522 mhd->pad_size = pad_mem;
523 mhd->standby_mem_size = standby_mem;
525 machine->ram_size = initial_mem;
526 machine->maxram_size = initial_mem + pad_mem + standby_mem;
527 /* let's propagate the changed ram size into the global variable. */
528 ram_size = initial_mem;
531 static void sclp_init(Object *obj)
533 SCLPDevice *sclp = SCLP(obj);
534 Object *new;
536 new = object_new(TYPE_SCLP_EVENT_FACILITY);
537 object_property_add_child(obj, TYPE_SCLP_EVENT_FACILITY, new, NULL);
538 /* qdev_device_add searches the sysbus for TYPE_SCLP_EVENTS_BUS */
539 qdev_set_parent_bus(DEVICE(new), sysbus_get_default());
540 object_unref(new);
541 sclp->event_facility = EVENT_FACILITY(new);
543 sclp_memory_init(sclp);
546 static void sclp_class_init(ObjectClass *oc, void *data)
548 SCLPDeviceClass *sc = SCLP_CLASS(oc);
549 DeviceClass *dc = DEVICE_CLASS(oc);
551 dc->desc = "SCLP (Service-Call Logical Processor)";
552 dc->realize = sclp_realize;
553 dc->hotpluggable = false;
554 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
556 sc->read_SCP_info = read_SCP_info;
557 sc->read_storage_element0_info = read_storage_element0_info;
558 sc->read_storage_element1_info = read_storage_element1_info;
559 sc->attach_storage_element = attach_storage_element;
560 sc->assign_storage = assign_storage;
561 sc->unassign_storage = unassign_storage;
562 sc->read_cpu_info = sclp_read_cpu_info;
563 sc->execute = sclp_execute;
564 sc->service_interrupt = service_interrupt;
567 static TypeInfo sclp_info = {
568 .name = TYPE_SCLP,
569 .parent = TYPE_DEVICE,
570 .instance_init = sclp_init,
571 .instance_size = sizeof(SCLPDevice),
572 .class_init = sclp_class_init,
573 .class_size = sizeof(SCLPDeviceClass),
576 sclpMemoryHotplugDev *init_sclp_memory_hotplug_dev(void)
578 DeviceState *dev;
579 dev = qdev_create(NULL, TYPE_SCLP_MEMORY_HOTPLUG_DEV);
580 object_property_add_child(qdev_get_machine(),
581 TYPE_SCLP_MEMORY_HOTPLUG_DEV,
582 OBJECT(dev), NULL);
583 qdev_init_nofail(dev);
584 return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
585 TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL));
588 sclpMemoryHotplugDev *get_sclp_memory_hotplug_dev(void)
590 return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
591 TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL));
594 static void sclp_memory_hotplug_dev_class_init(ObjectClass *klass,
595 void *data)
597 DeviceClass *dc = DEVICE_CLASS(klass);
599 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
602 static TypeInfo sclp_memory_hotplug_dev_info = {
603 .name = TYPE_SCLP_MEMORY_HOTPLUG_DEV,
604 .parent = TYPE_SYS_BUS_DEVICE,
605 .instance_size = sizeof(sclpMemoryHotplugDev),
606 .class_init = sclp_memory_hotplug_dev_class_init,
609 static void register_types(void)
611 type_register_static(&sclp_memory_hotplug_dev_info);
612 type_register_static(&sclp_info);
614 type_init(register_types);