4 * Copyright IBM, Corp. 2012
7 * Christian Borntraeger <borntraeger@de.ibm.com>
8 * Heinz Graalfs <graalfs@linux.vnet.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
11 * option) any later version. See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
18 #include "sysemu/kvm.h"
19 #include "exec/memory.h"
20 #include "sysemu/sysemu.h"
21 #include "exec/address-spaces.h"
22 #include "hw/boards.h"
23 #include "hw/s390x/sclp.h"
24 #include "hw/s390x/event-facility.h"
25 #include "hw/s390x/s390-pci-bus.h"
27 static inline SCLPDevice
*get_sclp_device(void)
29 static SCLPDevice
*sclp
;
32 sclp
= SCLP(object_resolve_path_type("", TYPE_SCLP
, NULL
));
37 static void prepare_cpu_entries(SCLPDevice
*sclp
, CPUEntry
*entry
, int count
)
39 uint8_t features
[SCCB_CPU_FEATURE_LEN
] = { 0 };
42 s390_get_feat_block(S390_FEAT_TYPE_SCLP_CPU
, features
);
43 for (i
= 0; i
< count
; i
++) {
46 memcpy(entry
[i
].features
, features
, sizeof(entry
[i
].features
));
50 /* Provide information about the configuration, CPUs and storage */
51 static void read_SCP_info(SCLPDevice
*sclp
, SCCB
*sccb
)
53 ReadInfo
*read_info
= (ReadInfo
*) sccb
;
54 MachineState
*machine
= MACHINE(qdev_get_machine());
55 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
59 int slots
= MIN(machine
->ram_slots
, s390_get_memslot_count(kvm_state
));
66 read_info
->entries_cpu
= cpu_to_be16(cpu_count
);
67 read_info
->offset_cpu
= cpu_to_be16(offsetof(ReadInfo
, entries
));
68 read_info
->highest_cpu
= cpu_to_be16(max_cpus
);
70 /* Configuration Characteristic (Extension) */
71 s390_get_feat_block(S390_FEAT_TYPE_SCLP_CONF_CHAR
,
72 read_info
->conf_char
);
73 s390_get_feat_block(S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT
,
74 read_info
->conf_char_ext
);
76 prepare_cpu_entries(sclp
, read_info
->entries
, cpu_count
);
78 read_info
->facilities
= cpu_to_be64(SCLP_HAS_CPU_INFO
|
79 SCLP_HAS_PCI_RECONFIG
);
81 /* Memory Hotplug is only supported for the ccw machine type */
83 mhd
->standby_subregion_size
= MEM_SECTION_SIZE
;
84 /* Deduct the memory slot already used for core */
86 while ((mhd
->standby_subregion_size
* (slots
- 1)
87 < mhd
->standby_mem_size
)) {
88 mhd
->standby_subregion_size
= mhd
->standby_subregion_size
<< 1;
92 * Initialize mapping of guest standby memory sections indicating which
93 * are and are not online. Assume all standby memory begins offline.
95 if (mhd
->standby_state_map
== 0) {
96 if (mhd
->standby_mem_size
% mhd
->standby_subregion_size
) {
97 mhd
->standby_state_map
= g_malloc0((mhd
->standby_mem_size
/
98 mhd
->standby_subregion_size
+ 1) *
99 (mhd
->standby_subregion_size
/
102 mhd
->standby_state_map
= g_malloc0(mhd
->standby_mem_size
/
106 mhd
->padded_ram_size
= ram_size
+ mhd
->pad_size
;
107 mhd
->rzm
= 1 << mhd
->increment_size
;
109 read_info
->facilities
|= cpu_to_be64(SCLP_FC_ASSIGN_ATTACH_READ_STOR
);
112 rnsize
= 1 << (sclp
->increment_size
- 20);
114 read_info
->rnsize
= rnsize
;
116 read_info
->rnsize
= 0;
117 read_info
->rnsize2
= cpu_to_be32(rnsize
);
120 rnmax
= machine
->maxram_size
>> sclp
->increment_size
;
121 if (rnmax
< 0x10000) {
122 read_info
->rnmax
= cpu_to_be16(rnmax
);
124 read_info
->rnmax
= cpu_to_be16(0);
125 read_info
->rnmax2
= cpu_to_be64(rnmax
);
128 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION
);
131 static void read_storage_element0_info(SCLPDevice
*sclp
, SCCB
*sccb
)
134 int subincrement_id
= SCLP_STARTING_SUBINCREMENT_ID
;
135 ReadStorageElementInfo
*storage_info
= (ReadStorageElementInfo
*) sccb
;
136 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
139 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
143 if ((ram_size
>> mhd
->increment_size
) >= 0x10000) {
144 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION
);
148 /* Return information regarding core memory */
149 storage_info
->max_id
= cpu_to_be16(mhd
->standby_mem_size
? 1 : 0);
150 assigned
= ram_size
>> mhd
->increment_size
;
151 storage_info
->assigned
= cpu_to_be16(assigned
);
153 for (i
= 0; i
< assigned
; i
++) {
154 storage_info
->entries
[i
] = cpu_to_be32(subincrement_id
);
155 subincrement_id
+= SCLP_INCREMENT_UNIT
;
157 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION
);
160 static void read_storage_element1_info(SCLPDevice
*sclp
, SCCB
*sccb
)
162 ReadStorageElementInfo
*storage_info
= (ReadStorageElementInfo
*) sccb
;
163 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
166 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
170 if ((mhd
->standby_mem_size
>> mhd
->increment_size
) >= 0x10000) {
171 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION
);
175 /* Return information regarding standby memory */
176 storage_info
->max_id
= cpu_to_be16(mhd
->standby_mem_size
? 1 : 0);
177 storage_info
->assigned
= cpu_to_be16(mhd
->standby_mem_size
>>
178 mhd
->increment_size
);
179 storage_info
->standby
= cpu_to_be16(mhd
->standby_mem_size
>>
180 mhd
->increment_size
);
181 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_STANDBY_READ_COMPLETION
);
184 static void attach_storage_element(SCLPDevice
*sclp
, SCCB
*sccb
,
187 int i
, assigned
, subincrement_id
;
188 AttachStorageElement
*attach_info
= (AttachStorageElement
*) sccb
;
189 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
192 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
197 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
201 assigned
= mhd
->standby_mem_size
>> mhd
->increment_size
;
202 attach_info
->assigned
= cpu_to_be16(assigned
);
203 subincrement_id
= ((ram_size
>> mhd
->increment_size
) << 16)
204 + SCLP_STARTING_SUBINCREMENT_ID
;
205 for (i
= 0; i
< assigned
; i
++) {
206 attach_info
->entries
[i
] = cpu_to_be32(subincrement_id
);
207 subincrement_id
+= SCLP_INCREMENT_UNIT
;
209 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_COMPLETION
);
212 static void assign_storage(SCLPDevice
*sclp
, SCCB
*sccb
)
214 MemoryRegion
*mr
= NULL
;
215 uint64_t this_subregion_size
;
216 AssignStorage
*assign_info
= (AssignStorage
*) sccb
;
217 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
218 ram_addr_t assign_addr
;
219 MemoryRegion
*sysmem
= get_system_memory();
222 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
225 assign_addr
= (assign_info
->rn
- 1) * mhd
->rzm
;
227 if ((assign_addr
% MEM_SECTION_SIZE
== 0) &&
228 (assign_addr
>= mhd
->padded_ram_size
)) {
229 /* Re-use existing memory region if found */
230 mr
= memory_region_find(sysmem
, assign_addr
, 1).mr
;
231 memory_region_unref(mr
);
234 MemoryRegion
*standby_ram
= g_new(MemoryRegion
, 1);
236 /* offset to align to standby_subregion_size for allocation */
237 ram_addr_t offset
= assign_addr
-
238 (assign_addr
- mhd
->padded_ram_size
)
239 % mhd
->standby_subregion_size
;
241 /* strlen("standby.ram") + 4 (Max of KVM_MEMORY_SLOTS) + NULL */
243 snprintf(id
, 16, "standby.ram%d",
244 (int)((offset
- mhd
->padded_ram_size
) /
245 mhd
->standby_subregion_size
) + 1);
247 /* Allocate a subregion of the calculated standby_subregion_size */
248 if (offset
+ mhd
->standby_subregion_size
>
249 mhd
->padded_ram_size
+ mhd
->standby_mem_size
) {
250 this_subregion_size
= mhd
->padded_ram_size
+
251 mhd
->standby_mem_size
- offset
;
253 this_subregion_size
= mhd
->standby_subregion_size
;
256 memory_region_init_ram(standby_ram
, NULL
, id
, this_subregion_size
,
258 /* This is a hack to make memory hotunplug work again. Once we have
259 * subdevices, we have to unparent them when unassigning memory,
260 * instead of doing it via the ref count of the MemoryRegion. */
261 object_ref(OBJECT(standby_ram
));
262 object_unparent(OBJECT(standby_ram
));
263 vmstate_register_ram_global(standby_ram
);
264 memory_region_add_subregion(sysmem
, offset
, standby_ram
);
266 /* The specified subregion is no longer in standby */
267 mhd
->standby_state_map
[(assign_addr
- mhd
->padded_ram_size
)
268 / MEM_SECTION_SIZE
] = 1;
270 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_COMPLETION
);
273 static void unassign_storage(SCLPDevice
*sclp
, SCCB
*sccb
)
275 MemoryRegion
*mr
= NULL
;
276 AssignStorage
*assign_info
= (AssignStorage
*) sccb
;
277 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
278 ram_addr_t unassign_addr
;
279 MemoryRegion
*sysmem
= get_system_memory();
282 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
285 unassign_addr
= (assign_info
->rn
- 1) * mhd
->rzm
;
287 /* if the addr is a multiple of 256 MB */
288 if ((unassign_addr
% MEM_SECTION_SIZE
== 0) &&
289 (unassign_addr
>= mhd
->padded_ram_size
)) {
290 mhd
->standby_state_map
[(unassign_addr
-
291 mhd
->padded_ram_size
) / MEM_SECTION_SIZE
] = 0;
293 /* find the specified memory region and destroy it */
294 mr
= memory_region_find(sysmem
, unassign_addr
, 1).mr
;
295 memory_region_unref(mr
);
298 int is_removable
= 1;
299 ram_addr_t map_offset
= (unassign_addr
- mhd
->padded_ram_size
-
300 (unassign_addr
- mhd
->padded_ram_size
)
301 % mhd
->standby_subregion_size
);
302 /* Mark all affected subregions as 'standby' once again */
304 i
< (mhd
->standby_subregion_size
/ MEM_SECTION_SIZE
);
307 if (mhd
->standby_state_map
[i
+ map_offset
/ MEM_SECTION_SIZE
]) {
313 memory_region_del_subregion(sysmem
, mr
);
314 object_unref(OBJECT(mr
));
318 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_COMPLETION
);
321 /* Provide information about the CPU */
322 static void sclp_read_cpu_info(SCLPDevice
*sclp
, SCCB
*sccb
)
324 ReadCpuInfo
*cpu_info
= (ReadCpuInfo
*) sccb
;
332 cpu_info
->nr_configured
= cpu_to_be16(cpu_count
);
333 cpu_info
->offset_configured
= cpu_to_be16(offsetof(ReadCpuInfo
, entries
));
334 cpu_info
->nr_standby
= cpu_to_be16(0);
336 /* The standby offset is 16-byte for each CPU */
337 cpu_info
->offset_standby
= cpu_to_be16(cpu_info
->offset_configured
338 + cpu_info
->nr_configured
*sizeof(CPUEntry
));
340 prepare_cpu_entries(sclp
, cpu_info
->entries
, cpu_count
);
342 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION
);
345 static void sclp_execute(SCLPDevice
*sclp
, SCCB
*sccb
, uint32_t code
)
347 SCLPDeviceClass
*sclp_c
= SCLP_GET_CLASS(sclp
);
348 SCLPEventFacility
*ef
= sclp
->event_facility
;
349 SCLPEventFacilityClass
*efc
= EVENT_FACILITY_GET_CLASS(ef
);
351 switch (code
& SCLP_CMD_CODE_MASK
) {
352 case SCLP_CMDW_READ_SCP_INFO
:
353 case SCLP_CMDW_READ_SCP_INFO_FORCED
:
354 sclp_c
->read_SCP_info(sclp
, sccb
);
356 case SCLP_CMDW_READ_CPU_INFO
:
357 sclp_c
->read_cpu_info(sclp
, sccb
);
359 case SCLP_READ_STORAGE_ELEMENT_INFO
:
361 sclp_c
->read_storage_element1_info(sclp
, sccb
);
363 sclp_c
->read_storage_element0_info(sclp
, sccb
);
366 case SCLP_ATTACH_STORAGE_ELEMENT
:
367 sclp_c
->attach_storage_element(sclp
, sccb
, (code
& 0xff00) >> 8);
369 case SCLP_ASSIGN_STORAGE
:
370 sclp_c
->assign_storage(sclp
, sccb
);
372 case SCLP_UNASSIGN_STORAGE
:
373 sclp_c
->unassign_storage(sclp
, sccb
);
375 case SCLP_CMDW_CONFIGURE_PCI
:
376 s390_pci_sclp_configure(sccb
);
378 case SCLP_CMDW_DECONFIGURE_PCI
:
379 s390_pci_sclp_deconfigure(sccb
);
382 efc
->command_handler(ef
, sccb
, code
);
387 int sclp_service_call(CPUS390XState
*env
, uint64_t sccb
, uint32_t code
)
389 SCLPDevice
*sclp
= get_sclp_device();
390 SCLPDeviceClass
*sclp_c
= SCLP_GET_CLASS(sclp
);
394 hwaddr sccb_len
= sizeof(SCCB
);
396 /* first some basic checks on program checks */
397 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
401 if (cpu_physical_memory_is_io(sccb
)) {
405 if ((sccb
& ~0x1fffUL
) == 0 || (sccb
& ~0x1fffUL
) == env
->psa
406 || (sccb
& ~0x7ffffff8UL
) != 0) {
407 r
= -PGM_SPECIFICATION
;
412 * we want to work on a private copy of the sccb, to prevent guests
413 * from playing dirty tricks by modifying the memory content after
414 * the host has checked the values
416 cpu_physical_memory_read(sccb
, &work_sccb
, sccb_len
);
418 /* Valid sccb sizes */
419 if (be16_to_cpu(work_sccb
.h
.length
) < sizeof(SCCBHeader
) ||
420 be16_to_cpu(work_sccb
.h
.length
) > SCCB_SIZE
) {
421 r
= -PGM_SPECIFICATION
;
425 sclp_c
->execute(sclp
, (SCCB
*)&work_sccb
, code
);
427 cpu_physical_memory_write(sccb
, &work_sccb
,
428 be16_to_cpu(work_sccb
.h
.length
));
430 sclp_c
->service_interrupt(sclp
, sccb
);
436 static void service_interrupt(SCLPDevice
*sclp
, uint32_t sccb
)
438 SCLPEventFacility
*ef
= sclp
->event_facility
;
439 SCLPEventFacilityClass
*efc
= EVENT_FACILITY_GET_CLASS(ef
);
441 uint32_t param
= sccb
& ~3;
443 /* Indicate whether an event is still pending */
444 param
|= efc
->event_pending(ef
) ? 1 : 0;
447 /* No need to send an interrupt, there's nothing to be notified about */
450 s390_sclp_extint(param
);
453 void sclp_service_interrupt(uint32_t sccb
)
455 SCLPDevice
*sclp
= get_sclp_device();
456 SCLPDeviceClass
*sclp_c
= SCLP_GET_CLASS(sclp
);
458 sclp_c
->service_interrupt(sclp
, sccb
);
461 /* qemu object creation and initialization functions */
463 void s390_sclp_init(void)
465 Object
*new = object_new(TYPE_SCLP
);
467 object_property_add_child(qdev_get_machine(), TYPE_SCLP
, new,
469 object_unref(OBJECT(new));
470 qdev_init_nofail(DEVICE(new));
473 static void sclp_realize(DeviceState
*dev
, Error
**errp
)
475 MachineState
*machine
= MACHINE(qdev_get_machine());
476 SCLPDevice
*sclp
= SCLP(dev
);
481 object_property_set_bool(OBJECT(sclp
->event_facility
), true, "realized",
487 * qdev_device_add searches the sysbus for TYPE_SCLP_EVENTS_BUS. As long
488 * as we can't find a fitting bus via the qom tree, we have to add the
489 * event facility to the sysbus, so e.g. a sclp console can be created.
491 qdev_set_parent_bus(DEVICE(sclp
->event_facility
), sysbus_get_default());
493 ret
= s390_set_memory_limit(machine
->maxram_size
, &hw_limit
);
495 error_setg(&err
, "qemu: host supports a maximum of %" PRIu64
" GB",
498 error_setg(&err
, "qemu: setting the guest size failed");
502 error_propagate(errp
, err
);
505 static void sclp_memory_init(SCLPDevice
*sclp
)
507 MachineState
*machine
= MACHINE(qdev_get_machine());
508 ram_addr_t initial_mem
= machine
->ram_size
;
509 ram_addr_t max_mem
= machine
->maxram_size
;
510 ram_addr_t standby_mem
= max_mem
- initial_mem
;
511 ram_addr_t pad_mem
= 0;
512 int increment_size
= 20;
514 /* The storage increment size is a multiple of 1M and is a power of 2.
515 * The number of storage increments must be MAX_STORAGE_INCREMENTS or fewer.
516 * The variable 'increment_size' is an exponent of 2 that can be
517 * used to calculate the size (in bytes) of an increment. */
518 while ((initial_mem
>> increment_size
) > MAX_STORAGE_INCREMENTS
) {
521 if (machine
->ram_slots
) {
522 while ((standby_mem
>> increment_size
) > MAX_STORAGE_INCREMENTS
) {
526 sclp
->increment_size
= increment_size
;
528 /* The core and standby memory areas need to be aligned with
529 * the increment size. In effect, this can cause the
530 * user-specified memory size to be rounded down to align
531 * with the nearest increment boundary. */
532 initial_mem
= initial_mem
>> increment_size
<< increment_size
;
533 standby_mem
= standby_mem
>> increment_size
<< increment_size
;
535 /* If the size of ram is not on a MEM_SECTION_SIZE boundary,
536 calculate the pad size necessary to force this boundary. */
537 if (machine
->ram_slots
&& standby_mem
) {
538 sclpMemoryHotplugDev
*mhd
= init_sclp_memory_hotplug_dev();
540 if (initial_mem
% MEM_SECTION_SIZE
) {
541 pad_mem
= MEM_SECTION_SIZE
- initial_mem
% MEM_SECTION_SIZE
;
543 mhd
->increment_size
= increment_size
;
544 mhd
->pad_size
= pad_mem
;
545 mhd
->standby_mem_size
= standby_mem
;
547 machine
->ram_size
= initial_mem
;
548 machine
->maxram_size
= initial_mem
+ pad_mem
+ standby_mem
;
549 /* let's propagate the changed ram size into the global variable. */
550 ram_size
= initial_mem
;
553 static void sclp_init(Object
*obj
)
555 SCLPDevice
*sclp
= SCLP(obj
);
558 new = object_new(TYPE_SCLP_EVENT_FACILITY
);
559 object_property_add_child(obj
, TYPE_SCLP_EVENT_FACILITY
, new, NULL
);
561 sclp
->event_facility
= EVENT_FACILITY(new);
563 sclp_memory_init(sclp
);
566 static void sclp_class_init(ObjectClass
*oc
, void *data
)
568 SCLPDeviceClass
*sc
= SCLP_CLASS(oc
);
569 DeviceClass
*dc
= DEVICE_CLASS(oc
);
571 dc
->desc
= "SCLP (Service-Call Logical Processor)";
572 dc
->realize
= sclp_realize
;
573 dc
->hotpluggable
= false;
574 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
576 sc
->read_SCP_info
= read_SCP_info
;
577 sc
->read_storage_element0_info
= read_storage_element0_info
;
578 sc
->read_storage_element1_info
= read_storage_element1_info
;
579 sc
->attach_storage_element
= attach_storage_element
;
580 sc
->assign_storage
= assign_storage
;
581 sc
->unassign_storage
= unassign_storage
;
582 sc
->read_cpu_info
= sclp_read_cpu_info
;
583 sc
->execute
= sclp_execute
;
584 sc
->service_interrupt
= service_interrupt
;
587 static TypeInfo sclp_info
= {
589 .parent
= TYPE_DEVICE
,
590 .instance_init
= sclp_init
,
591 .instance_size
= sizeof(SCLPDevice
),
592 .class_init
= sclp_class_init
,
593 .class_size
= sizeof(SCLPDeviceClass
),
596 sclpMemoryHotplugDev
*init_sclp_memory_hotplug_dev(void)
599 dev
= qdev_create(NULL
, TYPE_SCLP_MEMORY_HOTPLUG_DEV
);
600 object_property_add_child(qdev_get_machine(),
601 TYPE_SCLP_MEMORY_HOTPLUG_DEV
,
603 qdev_init_nofail(dev
);
604 return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
605 TYPE_SCLP_MEMORY_HOTPLUG_DEV
, NULL
));
608 sclpMemoryHotplugDev
*get_sclp_memory_hotplug_dev(void)
610 return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
611 TYPE_SCLP_MEMORY_HOTPLUG_DEV
, NULL
));
614 static void sclp_memory_hotplug_dev_class_init(ObjectClass
*klass
,
617 DeviceClass
*dc
= DEVICE_CLASS(klass
);
619 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
622 static TypeInfo sclp_memory_hotplug_dev_info
= {
623 .name
= TYPE_SCLP_MEMORY_HOTPLUG_DEV
,
624 .parent
= TYPE_SYS_BUS_DEVICE
,
625 .instance_size
= sizeof(sclpMemoryHotplugDev
),
626 .class_init
= sclp_memory_hotplug_dev_class_init
,
629 static void register_types(void)
631 type_register_static(&sclp_memory_hotplug_dev_info
);
632 type_register_static(&sclp_info
);
634 type_init(register_types
);