4 * Copyright IBM, Corp. 2012
7 * Christian Borntraeger <borntraeger@de.ibm.com>
8 * Heinz Graalfs <graalfs@linux.vnet.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
11 * option) any later version. See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
18 #include "sysemu/kvm.h"
19 #include "exec/memory.h"
20 #include "sysemu/sysemu.h"
21 #include "exec/address-spaces.h"
22 #include "hw/boards.h"
23 #include "hw/s390x/sclp.h"
24 #include "hw/s390x/event-facility.h"
25 #include "hw/s390x/s390-pci-bus.h"
27 static inline SCLPDevice
*get_sclp_device(void)
29 static SCLPDevice
*sclp
;
32 sclp
= SCLP(object_resolve_path_type("", TYPE_SCLP
, NULL
));
37 static void prepare_cpu_entries(SCLPDevice
*sclp
, CPUEntry
*entry
, int count
)
39 uint8_t features
[SCCB_CPU_FEATURE_LEN
] = { 0 };
42 s390_get_feat_block(S390_FEAT_TYPE_SCLP_CPU
, features
);
43 for (i
= 0; i
< count
; i
++) {
46 memcpy(entry
[i
].features
, features
, sizeof(entry
[i
].features
));
50 /* Provide information about the configuration, CPUs and storage */
51 static void read_SCP_info(SCLPDevice
*sclp
, SCCB
*sccb
)
53 ReadInfo
*read_info
= (ReadInfo
*) sccb
;
54 MachineState
*machine
= MACHINE(qdev_get_machine());
55 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
59 int slots
= MIN(machine
->ram_slots
, s390_get_memslot_count(kvm_state
));
66 read_info
->entries_cpu
= cpu_to_be16(cpu_count
);
67 read_info
->offset_cpu
= cpu_to_be16(offsetof(ReadInfo
, entries
));
68 read_info
->highest_cpu
= cpu_to_be16(max_cpus
);
70 read_info
->ibc_val
= cpu_to_be32(s390_get_ibc_val());
72 /* Configuration Characteristic (Extension) */
73 s390_get_feat_block(S390_FEAT_TYPE_SCLP_CONF_CHAR
,
74 read_info
->conf_char
);
75 s390_get_feat_block(S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT
,
76 read_info
->conf_char_ext
);
78 prepare_cpu_entries(sclp
, read_info
->entries
, cpu_count
);
80 read_info
->facilities
= cpu_to_be64(SCLP_HAS_CPU_INFO
|
81 SCLP_HAS_PCI_RECONFIG
);
83 /* Memory Hotplug is only supported for the ccw machine type */
85 mhd
->standby_subregion_size
= MEM_SECTION_SIZE
;
86 /* Deduct the memory slot already used for core */
88 while ((mhd
->standby_subregion_size
* (slots
- 1)
89 < mhd
->standby_mem_size
)) {
90 mhd
->standby_subregion_size
= mhd
->standby_subregion_size
<< 1;
94 * Initialize mapping of guest standby memory sections indicating which
95 * are and are not online. Assume all standby memory begins offline.
97 if (mhd
->standby_state_map
== 0) {
98 if (mhd
->standby_mem_size
% mhd
->standby_subregion_size
) {
99 mhd
->standby_state_map
= g_malloc0((mhd
->standby_mem_size
/
100 mhd
->standby_subregion_size
+ 1) *
101 (mhd
->standby_subregion_size
/
104 mhd
->standby_state_map
= g_malloc0(mhd
->standby_mem_size
/
108 mhd
->padded_ram_size
= ram_size
+ mhd
->pad_size
;
109 mhd
->rzm
= 1 << mhd
->increment_size
;
111 read_info
->facilities
|= cpu_to_be64(SCLP_FC_ASSIGN_ATTACH_READ_STOR
);
113 read_info
->mha_pow
= s390_get_mha_pow();
114 read_info
->hmfai
= cpu_to_be32(s390_get_hmfai());
116 rnsize
= 1 << (sclp
->increment_size
- 20);
118 read_info
->rnsize
= rnsize
;
120 read_info
->rnsize
= 0;
121 read_info
->rnsize2
= cpu_to_be32(rnsize
);
124 rnmax
= machine
->maxram_size
>> sclp
->increment_size
;
125 if (rnmax
< 0x10000) {
126 read_info
->rnmax
= cpu_to_be16(rnmax
);
128 read_info
->rnmax
= cpu_to_be16(0);
129 read_info
->rnmax2
= cpu_to_be64(rnmax
);
132 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION
);
135 static void read_storage_element0_info(SCLPDevice
*sclp
, SCCB
*sccb
)
138 int subincrement_id
= SCLP_STARTING_SUBINCREMENT_ID
;
139 ReadStorageElementInfo
*storage_info
= (ReadStorageElementInfo
*) sccb
;
140 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
143 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
147 if ((ram_size
>> mhd
->increment_size
) >= 0x10000) {
148 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION
);
152 /* Return information regarding core memory */
153 storage_info
->max_id
= cpu_to_be16(mhd
->standby_mem_size
? 1 : 0);
154 assigned
= ram_size
>> mhd
->increment_size
;
155 storage_info
->assigned
= cpu_to_be16(assigned
);
157 for (i
= 0; i
< assigned
; i
++) {
158 storage_info
->entries
[i
] = cpu_to_be32(subincrement_id
);
159 subincrement_id
+= SCLP_INCREMENT_UNIT
;
161 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION
);
164 static void read_storage_element1_info(SCLPDevice
*sclp
, SCCB
*sccb
)
166 ReadStorageElementInfo
*storage_info
= (ReadStorageElementInfo
*) sccb
;
167 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
170 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
174 if ((mhd
->standby_mem_size
>> mhd
->increment_size
) >= 0x10000) {
175 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION
);
179 /* Return information regarding standby memory */
180 storage_info
->max_id
= cpu_to_be16(mhd
->standby_mem_size
? 1 : 0);
181 storage_info
->assigned
= cpu_to_be16(mhd
->standby_mem_size
>>
182 mhd
->increment_size
);
183 storage_info
->standby
= cpu_to_be16(mhd
->standby_mem_size
>>
184 mhd
->increment_size
);
185 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_STANDBY_READ_COMPLETION
);
188 static void attach_storage_element(SCLPDevice
*sclp
, SCCB
*sccb
,
191 int i
, assigned
, subincrement_id
;
192 AttachStorageElement
*attach_info
= (AttachStorageElement
*) sccb
;
193 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
196 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
201 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
205 assigned
= mhd
->standby_mem_size
>> mhd
->increment_size
;
206 attach_info
->assigned
= cpu_to_be16(assigned
);
207 subincrement_id
= ((ram_size
>> mhd
->increment_size
) << 16)
208 + SCLP_STARTING_SUBINCREMENT_ID
;
209 for (i
= 0; i
< assigned
; i
++) {
210 attach_info
->entries
[i
] = cpu_to_be32(subincrement_id
);
211 subincrement_id
+= SCLP_INCREMENT_UNIT
;
213 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_COMPLETION
);
216 static void assign_storage(SCLPDevice
*sclp
, SCCB
*sccb
)
218 MemoryRegion
*mr
= NULL
;
219 uint64_t this_subregion_size
;
220 AssignStorage
*assign_info
= (AssignStorage
*) sccb
;
221 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
222 ram_addr_t assign_addr
;
223 MemoryRegion
*sysmem
= get_system_memory();
226 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
229 assign_addr
= (assign_info
->rn
- 1) * mhd
->rzm
;
231 if ((assign_addr
% MEM_SECTION_SIZE
== 0) &&
232 (assign_addr
>= mhd
->padded_ram_size
)) {
233 /* Re-use existing memory region if found */
234 mr
= memory_region_find(sysmem
, assign_addr
, 1).mr
;
235 memory_region_unref(mr
);
238 MemoryRegion
*standby_ram
= g_new(MemoryRegion
, 1);
240 /* offset to align to standby_subregion_size for allocation */
241 ram_addr_t offset
= assign_addr
-
242 (assign_addr
- mhd
->padded_ram_size
)
243 % mhd
->standby_subregion_size
;
245 /* strlen("standby.ram") + 4 (Max of KVM_MEMORY_SLOTS) + NULL */
247 snprintf(id
, 16, "standby.ram%d",
248 (int)((offset
- mhd
->padded_ram_size
) /
249 mhd
->standby_subregion_size
) + 1);
251 /* Allocate a subregion of the calculated standby_subregion_size */
252 if (offset
+ mhd
->standby_subregion_size
>
253 mhd
->padded_ram_size
+ mhd
->standby_mem_size
) {
254 this_subregion_size
= mhd
->padded_ram_size
+
255 mhd
->standby_mem_size
- offset
;
257 this_subregion_size
= mhd
->standby_subregion_size
;
260 memory_region_init_ram(standby_ram
, NULL
, id
, this_subregion_size
,
262 /* This is a hack to make memory hotunplug work again. Once we have
263 * subdevices, we have to unparent them when unassigning memory,
264 * instead of doing it via the ref count of the MemoryRegion. */
265 object_ref(OBJECT(standby_ram
));
266 object_unparent(OBJECT(standby_ram
));
267 vmstate_register_ram_global(standby_ram
);
268 memory_region_add_subregion(sysmem
, offset
, standby_ram
);
270 /* The specified subregion is no longer in standby */
271 mhd
->standby_state_map
[(assign_addr
- mhd
->padded_ram_size
)
272 / MEM_SECTION_SIZE
] = 1;
274 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_COMPLETION
);
277 static void unassign_storage(SCLPDevice
*sclp
, SCCB
*sccb
)
279 MemoryRegion
*mr
= NULL
;
280 AssignStorage
*assign_info
= (AssignStorage
*) sccb
;
281 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
282 ram_addr_t unassign_addr
;
283 MemoryRegion
*sysmem
= get_system_memory();
286 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
289 unassign_addr
= (assign_info
->rn
- 1) * mhd
->rzm
;
291 /* if the addr is a multiple of 256 MB */
292 if ((unassign_addr
% MEM_SECTION_SIZE
== 0) &&
293 (unassign_addr
>= mhd
->padded_ram_size
)) {
294 mhd
->standby_state_map
[(unassign_addr
-
295 mhd
->padded_ram_size
) / MEM_SECTION_SIZE
] = 0;
297 /* find the specified memory region and destroy it */
298 mr
= memory_region_find(sysmem
, unassign_addr
, 1).mr
;
299 memory_region_unref(mr
);
302 int is_removable
= 1;
303 ram_addr_t map_offset
= (unassign_addr
- mhd
->padded_ram_size
-
304 (unassign_addr
- mhd
->padded_ram_size
)
305 % mhd
->standby_subregion_size
);
306 /* Mark all affected subregions as 'standby' once again */
308 i
< (mhd
->standby_subregion_size
/ MEM_SECTION_SIZE
);
311 if (mhd
->standby_state_map
[i
+ map_offset
/ MEM_SECTION_SIZE
]) {
317 memory_region_del_subregion(sysmem
, mr
);
318 object_unref(OBJECT(mr
));
322 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_COMPLETION
);
325 /* Provide information about the CPU */
326 static void sclp_read_cpu_info(SCLPDevice
*sclp
, SCCB
*sccb
)
328 ReadCpuInfo
*cpu_info
= (ReadCpuInfo
*) sccb
;
336 cpu_info
->nr_configured
= cpu_to_be16(cpu_count
);
337 cpu_info
->offset_configured
= cpu_to_be16(offsetof(ReadCpuInfo
, entries
));
338 cpu_info
->nr_standby
= cpu_to_be16(0);
340 /* The standby offset is 16-byte for each CPU */
341 cpu_info
->offset_standby
= cpu_to_be16(cpu_info
->offset_configured
342 + cpu_info
->nr_configured
*sizeof(CPUEntry
));
344 prepare_cpu_entries(sclp
, cpu_info
->entries
, cpu_count
);
346 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION
);
349 static void sclp_execute(SCLPDevice
*sclp
, SCCB
*sccb
, uint32_t code
)
351 SCLPDeviceClass
*sclp_c
= SCLP_GET_CLASS(sclp
);
352 SCLPEventFacility
*ef
= sclp
->event_facility
;
353 SCLPEventFacilityClass
*efc
= EVENT_FACILITY_GET_CLASS(ef
);
355 switch (code
& SCLP_CMD_CODE_MASK
) {
356 case SCLP_CMDW_READ_SCP_INFO
:
357 case SCLP_CMDW_READ_SCP_INFO_FORCED
:
358 sclp_c
->read_SCP_info(sclp
, sccb
);
360 case SCLP_CMDW_READ_CPU_INFO
:
361 sclp_c
->read_cpu_info(sclp
, sccb
);
363 case SCLP_READ_STORAGE_ELEMENT_INFO
:
365 sclp_c
->read_storage_element1_info(sclp
, sccb
);
367 sclp_c
->read_storage_element0_info(sclp
, sccb
);
370 case SCLP_ATTACH_STORAGE_ELEMENT
:
371 sclp_c
->attach_storage_element(sclp
, sccb
, (code
& 0xff00) >> 8);
373 case SCLP_ASSIGN_STORAGE
:
374 sclp_c
->assign_storage(sclp
, sccb
);
376 case SCLP_UNASSIGN_STORAGE
:
377 sclp_c
->unassign_storage(sclp
, sccb
);
379 case SCLP_CMDW_CONFIGURE_PCI
:
380 s390_pci_sclp_configure(sccb
);
382 case SCLP_CMDW_DECONFIGURE_PCI
:
383 s390_pci_sclp_deconfigure(sccb
);
386 efc
->command_handler(ef
, sccb
, code
);
391 int sclp_service_call(CPUS390XState
*env
, uint64_t sccb
, uint32_t code
)
393 SCLPDevice
*sclp
= get_sclp_device();
394 SCLPDeviceClass
*sclp_c
= SCLP_GET_CLASS(sclp
);
398 hwaddr sccb_len
= sizeof(SCCB
);
400 /* first some basic checks on program checks */
401 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
405 if (cpu_physical_memory_is_io(sccb
)) {
409 if ((sccb
& ~0x1fffUL
) == 0 || (sccb
& ~0x1fffUL
) == env
->psa
410 || (sccb
& ~0x7ffffff8UL
) != 0) {
411 r
= -PGM_SPECIFICATION
;
416 * we want to work on a private copy of the sccb, to prevent guests
417 * from playing dirty tricks by modifying the memory content after
418 * the host has checked the values
420 cpu_physical_memory_read(sccb
, &work_sccb
, sccb_len
);
422 /* Valid sccb sizes */
423 if (be16_to_cpu(work_sccb
.h
.length
) < sizeof(SCCBHeader
) ||
424 be16_to_cpu(work_sccb
.h
.length
) > SCCB_SIZE
) {
425 r
= -PGM_SPECIFICATION
;
429 sclp_c
->execute(sclp
, &work_sccb
, code
);
431 cpu_physical_memory_write(sccb
, &work_sccb
,
432 be16_to_cpu(work_sccb
.h
.length
));
434 sclp_c
->service_interrupt(sclp
, sccb
);
440 static void service_interrupt(SCLPDevice
*sclp
, uint32_t sccb
)
442 SCLPEventFacility
*ef
= sclp
->event_facility
;
443 SCLPEventFacilityClass
*efc
= EVENT_FACILITY_GET_CLASS(ef
);
445 uint32_t param
= sccb
& ~3;
447 /* Indicate whether an event is still pending */
448 param
|= efc
->event_pending(ef
) ? 1 : 0;
451 /* No need to send an interrupt, there's nothing to be notified about */
454 s390_sclp_extint(param
);
457 void sclp_service_interrupt(uint32_t sccb
)
459 SCLPDevice
*sclp
= get_sclp_device();
460 SCLPDeviceClass
*sclp_c
= SCLP_GET_CLASS(sclp
);
462 sclp_c
->service_interrupt(sclp
, sccb
);
465 /* qemu object creation and initialization functions */
467 void s390_sclp_init(void)
469 Object
*new = object_new(TYPE_SCLP
);
471 object_property_add_child(qdev_get_machine(), TYPE_SCLP
, new,
473 object_unref(OBJECT(new));
474 qdev_init_nofail(DEVICE(new));
477 static void sclp_realize(DeviceState
*dev
, Error
**errp
)
479 MachineState
*machine
= MACHINE(qdev_get_machine());
480 SCLPDevice
*sclp
= SCLP(dev
);
485 object_property_set_bool(OBJECT(sclp
->event_facility
), true, "realized",
491 * qdev_device_add searches the sysbus for TYPE_SCLP_EVENTS_BUS. As long
492 * as we can't find a fitting bus via the qom tree, we have to add the
493 * event facility to the sysbus, so e.g. a sclp console can be created.
495 qdev_set_parent_bus(DEVICE(sclp
->event_facility
), sysbus_get_default());
497 ret
= s390_set_memory_limit(machine
->maxram_size
, &hw_limit
);
499 error_setg(&err
, "qemu: host supports a maximum of %" PRIu64
" GB",
502 error_setg(&err
, "qemu: setting the guest size failed");
506 error_propagate(errp
, err
);
509 static void sclp_memory_init(SCLPDevice
*sclp
)
511 MachineState
*machine
= MACHINE(qdev_get_machine());
512 ram_addr_t initial_mem
= machine
->ram_size
;
513 ram_addr_t max_mem
= machine
->maxram_size
;
514 ram_addr_t standby_mem
= max_mem
- initial_mem
;
515 ram_addr_t pad_mem
= 0;
516 int increment_size
= 20;
518 /* The storage increment size is a multiple of 1M and is a power of 2.
519 * The number of storage increments must be MAX_STORAGE_INCREMENTS or fewer.
520 * The variable 'increment_size' is an exponent of 2 that can be
521 * used to calculate the size (in bytes) of an increment. */
522 while ((initial_mem
>> increment_size
) > MAX_STORAGE_INCREMENTS
) {
525 if (machine
->ram_slots
) {
526 while ((standby_mem
>> increment_size
) > MAX_STORAGE_INCREMENTS
) {
530 sclp
->increment_size
= increment_size
;
532 /* The core and standby memory areas need to be aligned with
533 * the increment size. In effect, this can cause the
534 * user-specified memory size to be rounded down to align
535 * with the nearest increment boundary. */
536 initial_mem
= initial_mem
>> increment_size
<< increment_size
;
537 standby_mem
= standby_mem
>> increment_size
<< increment_size
;
539 /* If the size of ram is not on a MEM_SECTION_SIZE boundary,
540 calculate the pad size necessary to force this boundary. */
541 if (machine
->ram_slots
&& standby_mem
) {
542 sclpMemoryHotplugDev
*mhd
= init_sclp_memory_hotplug_dev();
544 if (initial_mem
% MEM_SECTION_SIZE
) {
545 pad_mem
= MEM_SECTION_SIZE
- initial_mem
% MEM_SECTION_SIZE
;
547 mhd
->increment_size
= increment_size
;
548 mhd
->pad_size
= pad_mem
;
549 mhd
->standby_mem_size
= standby_mem
;
551 machine
->ram_size
= initial_mem
;
552 machine
->maxram_size
= initial_mem
+ pad_mem
+ standby_mem
;
553 /* let's propagate the changed ram size into the global variable. */
554 ram_size
= initial_mem
;
557 static void sclp_init(Object
*obj
)
559 SCLPDevice
*sclp
= SCLP(obj
);
562 new = object_new(TYPE_SCLP_EVENT_FACILITY
);
563 object_property_add_child(obj
, TYPE_SCLP_EVENT_FACILITY
, new, NULL
);
565 sclp
->event_facility
= EVENT_FACILITY(new);
567 sclp_memory_init(sclp
);
570 static void sclp_class_init(ObjectClass
*oc
, void *data
)
572 SCLPDeviceClass
*sc
= SCLP_CLASS(oc
);
573 DeviceClass
*dc
= DEVICE_CLASS(oc
);
575 dc
->desc
= "SCLP (Service-Call Logical Processor)";
576 dc
->realize
= sclp_realize
;
577 dc
->hotpluggable
= false;
578 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
580 sc
->read_SCP_info
= read_SCP_info
;
581 sc
->read_storage_element0_info
= read_storage_element0_info
;
582 sc
->read_storage_element1_info
= read_storage_element1_info
;
583 sc
->attach_storage_element
= attach_storage_element
;
584 sc
->assign_storage
= assign_storage
;
585 sc
->unassign_storage
= unassign_storage
;
586 sc
->read_cpu_info
= sclp_read_cpu_info
;
587 sc
->execute
= sclp_execute
;
588 sc
->service_interrupt
= service_interrupt
;
591 static TypeInfo sclp_info
= {
593 .parent
= TYPE_DEVICE
,
594 .instance_init
= sclp_init
,
595 .instance_size
= sizeof(SCLPDevice
),
596 .class_init
= sclp_class_init
,
597 .class_size
= sizeof(SCLPDeviceClass
),
600 sclpMemoryHotplugDev
*init_sclp_memory_hotplug_dev(void)
603 dev
= qdev_create(NULL
, TYPE_SCLP_MEMORY_HOTPLUG_DEV
);
604 object_property_add_child(qdev_get_machine(),
605 TYPE_SCLP_MEMORY_HOTPLUG_DEV
,
607 qdev_init_nofail(dev
);
608 return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
609 TYPE_SCLP_MEMORY_HOTPLUG_DEV
, NULL
));
612 sclpMemoryHotplugDev
*get_sclp_memory_hotplug_dev(void)
614 return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
615 TYPE_SCLP_MEMORY_HOTPLUG_DEV
, NULL
));
618 static void sclp_memory_hotplug_dev_class_init(ObjectClass
*klass
,
621 DeviceClass
*dc
= DEVICE_CLASS(klass
);
623 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
626 static TypeInfo sclp_memory_hotplug_dev_info
= {
627 .name
= TYPE_SCLP_MEMORY_HOTPLUG_DEV
,
628 .parent
= TYPE_SYS_BUS_DEVICE
,
629 .instance_size
= sizeof(sclpMemoryHotplugDev
),
630 .class_init
= sclp_memory_hotplug_dev_class_init
,
633 static void register_types(void)
635 type_register_static(&sclp_memory_hotplug_dev_info
);
636 type_register_static(&sclp_info
);
638 type_init(register_types
);