4 * Copyright IBM, Corp. 2012
7 * Christian Borntraeger <borntraeger@de.ibm.com>
8 * Heinz Graalfs <graalfs@linux.vnet.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
11 * option) any later version. See the COPYING file in the top-level directory.
16 #include "sysemu/kvm.h"
17 #include "exec/memory.h"
18 #include "sysemu/sysemu.h"
19 #include "exec/address-spaces.h"
20 #include "hw/boards.h"
21 #include "hw/s390x/sclp.h"
22 #include "hw/s390x/event-facility.h"
23 #include "hw/s390x/s390-pci-bus.h"
25 static inline SCLPDevice
*get_sclp_device(void)
27 return SCLP(object_resolve_path_type("", TYPE_SCLP
, NULL
));
30 /* Provide information about the configuration, CPUs and storage */
31 static void read_SCP_info(SCLPDevice
*sclp
, SCCB
*sccb
)
33 ReadInfo
*read_info
= (ReadInfo
*) sccb
;
34 MachineState
*machine
= MACHINE(qdev_get_machine());
35 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
40 int slots
= MIN(machine
->ram_slots
, s390_get_memslot_count(kvm_state
));
47 read_info
->entries_cpu
= cpu_to_be16(cpu_count
);
48 read_info
->offset_cpu
= cpu_to_be16(offsetof(ReadInfo
, entries
));
49 read_info
->highest_cpu
= cpu_to_be16(max_cpus
);
51 for (i
= 0; i
< cpu_count
; i
++) {
52 read_info
->entries
[i
].address
= i
;
53 read_info
->entries
[i
].type
= 0;
56 read_info
->facilities
= cpu_to_be64(SCLP_HAS_CPU_INFO
|
57 SCLP_HAS_PCI_RECONFIG
);
59 /* Memory Hotplug is only supported for the ccw machine type */
61 mhd
->standby_subregion_size
= MEM_SECTION_SIZE
;
62 /* Deduct the memory slot already used for core */
64 while ((mhd
->standby_subregion_size
* (slots
- 1)
65 < mhd
->standby_mem_size
)) {
66 mhd
->standby_subregion_size
= mhd
->standby_subregion_size
<< 1;
70 * Initialize mapping of guest standby memory sections indicating which
71 * are and are not online. Assume all standby memory begins offline.
73 if (mhd
->standby_state_map
== 0) {
74 if (mhd
->standby_mem_size
% mhd
->standby_subregion_size
) {
75 mhd
->standby_state_map
= g_malloc0((mhd
->standby_mem_size
/
76 mhd
->standby_subregion_size
+ 1) *
77 (mhd
->standby_subregion_size
/
80 mhd
->standby_state_map
= g_malloc0(mhd
->standby_mem_size
/
84 mhd
->padded_ram_size
= ram_size
+ mhd
->pad_size
;
85 mhd
->rzm
= 1 << mhd
->increment_size
;
87 read_info
->facilities
|= cpu_to_be64(SCLP_FC_ASSIGN_ATTACH_READ_STOR
);
90 rnsize
= 1 << (sclp
->increment_size
- 20);
92 read_info
->rnsize
= rnsize
;
94 read_info
->rnsize
= 0;
95 read_info
->rnsize2
= cpu_to_be32(rnsize
);
98 rnmax
= machine
->maxram_size
>> sclp
->increment_size
;
99 if (rnmax
< 0x10000) {
100 read_info
->rnmax
= cpu_to_be16(rnmax
);
102 read_info
->rnmax
= cpu_to_be16(0);
103 read_info
->rnmax2
= cpu_to_be64(rnmax
);
106 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION
);
109 static void read_storage_element0_info(SCLPDevice
*sclp
, SCCB
*sccb
)
112 int subincrement_id
= SCLP_STARTING_SUBINCREMENT_ID
;
113 ReadStorageElementInfo
*storage_info
= (ReadStorageElementInfo
*) sccb
;
114 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
117 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
121 if ((ram_size
>> mhd
->increment_size
) >= 0x10000) {
122 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION
);
126 /* Return information regarding core memory */
127 storage_info
->max_id
= cpu_to_be16(mhd
->standby_mem_size
? 1 : 0);
128 assigned
= ram_size
>> mhd
->increment_size
;
129 storage_info
->assigned
= cpu_to_be16(assigned
);
131 for (i
= 0; i
< assigned
; i
++) {
132 storage_info
->entries
[i
] = cpu_to_be32(subincrement_id
);
133 subincrement_id
+= SCLP_INCREMENT_UNIT
;
135 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION
);
138 static void read_storage_element1_info(SCLPDevice
*sclp
, SCCB
*sccb
)
140 ReadStorageElementInfo
*storage_info
= (ReadStorageElementInfo
*) sccb
;
141 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
144 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
148 if ((mhd
->standby_mem_size
>> mhd
->increment_size
) >= 0x10000) {
149 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION
);
153 /* Return information regarding standby memory */
154 storage_info
->max_id
= cpu_to_be16(mhd
->standby_mem_size
? 1 : 0);
155 storage_info
->assigned
= cpu_to_be16(mhd
->standby_mem_size
>>
156 mhd
->increment_size
);
157 storage_info
->standby
= cpu_to_be16(mhd
->standby_mem_size
>>
158 mhd
->increment_size
);
159 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_STANDBY_READ_COMPLETION
);
162 static void attach_storage_element(SCLPDevice
*sclp
, SCCB
*sccb
,
165 int i
, assigned
, subincrement_id
;
166 AttachStorageElement
*attach_info
= (AttachStorageElement
*) sccb
;
167 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
170 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
175 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
179 assigned
= mhd
->standby_mem_size
>> mhd
->increment_size
;
180 attach_info
->assigned
= cpu_to_be16(assigned
);
181 subincrement_id
= ((ram_size
>> mhd
->increment_size
) << 16)
182 + SCLP_STARTING_SUBINCREMENT_ID
;
183 for (i
= 0; i
< assigned
; i
++) {
184 attach_info
->entries
[i
] = cpu_to_be32(subincrement_id
);
185 subincrement_id
+= SCLP_INCREMENT_UNIT
;
187 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_COMPLETION
);
190 static void assign_storage(SCLPDevice
*sclp
, SCCB
*sccb
)
192 MemoryRegion
*mr
= NULL
;
193 uint64_t this_subregion_size
;
194 AssignStorage
*assign_info
= (AssignStorage
*) sccb
;
195 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
196 ram_addr_t assign_addr
;
197 MemoryRegion
*sysmem
= get_system_memory();
200 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
203 assign_addr
= (assign_info
->rn
- 1) * mhd
->rzm
;
205 if ((assign_addr
% MEM_SECTION_SIZE
== 0) &&
206 (assign_addr
>= mhd
->padded_ram_size
)) {
207 /* Re-use existing memory region if found */
208 mr
= memory_region_find(sysmem
, assign_addr
, 1).mr
;
209 memory_region_unref(mr
);
212 MemoryRegion
*standby_ram
= g_new(MemoryRegion
, 1);
214 /* offset to align to standby_subregion_size for allocation */
215 ram_addr_t offset
= assign_addr
-
216 (assign_addr
- mhd
->padded_ram_size
)
217 % mhd
->standby_subregion_size
;
219 /* strlen("standby.ram") + 4 (Max of KVM_MEMORY_SLOTS) + NULL */
221 snprintf(id
, 16, "standby.ram%d",
222 (int)((offset
- mhd
->padded_ram_size
) /
223 mhd
->standby_subregion_size
) + 1);
225 /* Allocate a subregion of the calculated standby_subregion_size */
226 if (offset
+ mhd
->standby_subregion_size
>
227 mhd
->padded_ram_size
+ mhd
->standby_mem_size
) {
228 this_subregion_size
= mhd
->padded_ram_size
+
229 mhd
->standby_mem_size
- offset
;
231 this_subregion_size
= mhd
->standby_subregion_size
;
234 memory_region_init_ram(standby_ram
, NULL
, id
, this_subregion_size
, &error_abort
);
235 /* This is a hack to make memory hotunplug work again. Once we have
236 * subdevices, we have to unparent them when unassigning memory,
237 * instead of doing it via the ref count of the MemoryRegion. */
238 object_ref(OBJECT(standby_ram
));
239 object_unparent(OBJECT(standby_ram
));
240 vmstate_register_ram_global(standby_ram
);
241 memory_region_add_subregion(sysmem
, offset
, standby_ram
);
243 /* The specified subregion is no longer in standby */
244 mhd
->standby_state_map
[(assign_addr
- mhd
->padded_ram_size
)
245 / MEM_SECTION_SIZE
] = 1;
247 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_COMPLETION
);
250 static void unassign_storage(SCLPDevice
*sclp
, SCCB
*sccb
)
252 MemoryRegion
*mr
= NULL
;
253 AssignStorage
*assign_info
= (AssignStorage
*) sccb
;
254 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
255 ram_addr_t unassign_addr
;
256 MemoryRegion
*sysmem
= get_system_memory();
259 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
262 unassign_addr
= (assign_info
->rn
- 1) * mhd
->rzm
;
264 /* if the addr is a multiple of 256 MB */
265 if ((unassign_addr
% MEM_SECTION_SIZE
== 0) &&
266 (unassign_addr
>= mhd
->padded_ram_size
)) {
267 mhd
->standby_state_map
[(unassign_addr
-
268 mhd
->padded_ram_size
) / MEM_SECTION_SIZE
] = 0;
270 /* find the specified memory region and destroy it */
271 mr
= memory_region_find(sysmem
, unassign_addr
, 1).mr
;
272 memory_region_unref(mr
);
275 int is_removable
= 1;
276 ram_addr_t map_offset
= (unassign_addr
- mhd
->padded_ram_size
-
277 (unassign_addr
- mhd
->padded_ram_size
)
278 % mhd
->standby_subregion_size
);
279 /* Mark all affected subregions as 'standby' once again */
281 i
< (mhd
->standby_subregion_size
/ MEM_SECTION_SIZE
);
284 if (mhd
->standby_state_map
[i
+ map_offset
/ MEM_SECTION_SIZE
]) {
290 memory_region_del_subregion(sysmem
, mr
);
291 object_unref(OBJECT(mr
));
295 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_COMPLETION
);
298 /* Provide information about the CPU */
299 static void sclp_read_cpu_info(SCLPDevice
*sclp
, SCCB
*sccb
)
301 ReadCpuInfo
*cpu_info
= (ReadCpuInfo
*) sccb
;
310 cpu_info
->nr_configured
= cpu_to_be16(cpu_count
);
311 cpu_info
->offset_configured
= cpu_to_be16(offsetof(ReadCpuInfo
, entries
));
312 cpu_info
->nr_standby
= cpu_to_be16(0);
314 /* The standby offset is 16-byte for each CPU */
315 cpu_info
->offset_standby
= cpu_to_be16(cpu_info
->offset_configured
316 + cpu_info
->nr_configured
*sizeof(CPUEntry
));
318 for (i
= 0; i
< cpu_count
; i
++) {
319 cpu_info
->entries
[i
].address
= i
;
320 cpu_info
->entries
[i
].type
= 0;
323 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION
);
326 static void sclp_execute(SCLPDevice
*sclp
, SCCB
*sccb
, uint32_t code
)
328 SCLPDeviceClass
*sclp_c
= SCLP_GET_CLASS(sclp
);
329 SCLPEventFacility
*ef
= sclp
->event_facility
;
330 SCLPEventFacilityClass
*efc
= EVENT_FACILITY_GET_CLASS(ef
);
332 switch (code
& SCLP_CMD_CODE_MASK
) {
333 case SCLP_CMDW_READ_SCP_INFO
:
334 case SCLP_CMDW_READ_SCP_INFO_FORCED
:
335 sclp_c
->read_SCP_info(sclp
, sccb
);
337 case SCLP_CMDW_READ_CPU_INFO
:
338 sclp_c
->read_cpu_info(sclp
, sccb
);
340 case SCLP_READ_STORAGE_ELEMENT_INFO
:
342 sclp_c
->read_storage_element1_info(sclp
, sccb
);
344 sclp_c
->read_storage_element0_info(sclp
, sccb
);
347 case SCLP_ATTACH_STORAGE_ELEMENT
:
348 sclp_c
->attach_storage_element(sclp
, sccb
, (code
& 0xff00) >> 8);
350 case SCLP_ASSIGN_STORAGE
:
351 sclp_c
->assign_storage(sclp
, sccb
);
353 case SCLP_UNASSIGN_STORAGE
:
354 sclp_c
->unassign_storage(sclp
, sccb
);
356 case SCLP_CMDW_CONFIGURE_PCI
:
357 s390_pci_sclp_configure(1, sccb
);
359 case SCLP_CMDW_DECONFIGURE_PCI
:
360 s390_pci_sclp_configure(0, sccb
);
363 efc
->command_handler(ef
, sccb
, code
);
368 int sclp_service_call(CPUS390XState
*env
, uint64_t sccb
, uint32_t code
)
370 SCLPDevice
*sclp
= get_sclp_device();
371 SCLPDeviceClass
*sclp_c
= SCLP_GET_CLASS(sclp
);
375 hwaddr sccb_len
= sizeof(SCCB
);
377 /* first some basic checks on program checks */
378 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
382 if (cpu_physical_memory_is_io(sccb
)) {
386 if ((sccb
& ~0x1fffUL
) == 0 || (sccb
& ~0x1fffUL
) == env
->psa
387 || (sccb
& ~0x7ffffff8UL
) != 0) {
388 r
= -PGM_SPECIFICATION
;
393 * we want to work on a private copy of the sccb, to prevent guests
394 * from playing dirty tricks by modifying the memory content after
395 * the host has checked the values
397 cpu_physical_memory_read(sccb
, &work_sccb
, sccb_len
);
399 /* Valid sccb sizes */
400 if (be16_to_cpu(work_sccb
.h
.length
) < sizeof(SCCBHeader
) ||
401 be16_to_cpu(work_sccb
.h
.length
) > SCCB_SIZE
) {
402 r
= -PGM_SPECIFICATION
;
406 sclp_c
->execute(sclp
, (SCCB
*)&work_sccb
, code
);
408 cpu_physical_memory_write(sccb
, &work_sccb
,
409 be16_to_cpu(work_sccb
.h
.length
));
411 sclp_c
->service_interrupt(sclp
, sccb
);
417 static void service_interrupt(SCLPDevice
*sclp
, uint32_t sccb
)
419 SCLPEventFacility
*ef
= sclp
->event_facility
;
420 SCLPEventFacilityClass
*efc
= EVENT_FACILITY_GET_CLASS(ef
);
422 uint32_t param
= sccb
& ~3;
424 /* Indicate whether an event is still pending */
425 param
|= efc
->event_pending(ef
) ? 1 : 0;
428 /* No need to send an interrupt, there's nothing to be notified about */
431 s390_sclp_extint(param
);
434 void sclp_service_interrupt(uint32_t sccb
)
436 SCLPDevice
*sclp
= get_sclp_device();
437 SCLPDeviceClass
*sclp_c
= SCLP_GET_CLASS(sclp
);
439 sclp_c
->service_interrupt(sclp
, sccb
);
442 /* qemu object creation and initialization functions */
444 void s390_sclp_init(void)
446 Object
*new = object_new(TYPE_SCLP
);
448 object_property_add_child(qdev_get_machine(), TYPE_SCLP
, new,
450 object_unref(OBJECT(new));
451 qdev_init_nofail(DEVICE(new));
454 static void sclp_realize(DeviceState
*dev
, Error
**errp
)
456 MachineState
*machine
= MACHINE(qdev_get_machine());
457 SCLPDevice
*sclp
= SCLP(dev
);
462 object_property_set_bool(OBJECT(sclp
->event_facility
), true, "realized",
468 ret
= s390_set_memory_limit(machine
->maxram_size
, &hw_limit
);
470 error_setg(&l_err
, "qemu: host supports a maximum of %" PRIu64
" GB",
474 error_setg(&l_err
, "qemu: setting the guest size failed");
480 error_propagate(errp
, l_err
);
483 static void sclp_memory_init(SCLPDevice
*sclp
)
485 MachineState
*machine
= MACHINE(qdev_get_machine());
486 ram_addr_t initial_mem
= machine
->ram_size
;
487 ram_addr_t max_mem
= machine
->maxram_size
;
488 ram_addr_t standby_mem
= max_mem
- initial_mem
;
489 ram_addr_t pad_mem
= 0;
490 int increment_size
= 20;
492 /* The storage increment size is a multiple of 1M and is a power of 2.
493 * The number of storage increments must be MAX_STORAGE_INCREMENTS or fewer.
494 * The variable 'increment_size' is an exponent of 2 that can be
495 * used to calculate the size (in bytes) of an increment. */
496 while ((initial_mem
>> increment_size
) > MAX_STORAGE_INCREMENTS
) {
499 if (machine
->ram_slots
) {
500 while ((standby_mem
>> increment_size
) > MAX_STORAGE_INCREMENTS
) {
504 sclp
->increment_size
= increment_size
;
506 /* The core and standby memory areas need to be aligned with
507 * the increment size. In effect, this can cause the
508 * user-specified memory size to be rounded down to align
509 * with the nearest increment boundary. */
510 initial_mem
= initial_mem
>> increment_size
<< increment_size
;
511 standby_mem
= standby_mem
>> increment_size
<< increment_size
;
513 /* If the size of ram is not on a MEM_SECTION_SIZE boundary,
514 calculate the pad size necessary to force this boundary. */
515 if (machine
->ram_slots
&& standby_mem
) {
516 sclpMemoryHotplugDev
*mhd
= init_sclp_memory_hotplug_dev();
518 if (initial_mem
% MEM_SECTION_SIZE
) {
519 pad_mem
= MEM_SECTION_SIZE
- initial_mem
% MEM_SECTION_SIZE
;
521 mhd
->increment_size
= increment_size
;
522 mhd
->pad_size
= pad_mem
;
523 mhd
->standby_mem_size
= standby_mem
;
525 machine
->ram_size
= initial_mem
;
526 machine
->maxram_size
= initial_mem
+ pad_mem
+ standby_mem
;
527 /* let's propagate the changed ram size into the global variable. */
528 ram_size
= initial_mem
;
531 static void sclp_init(Object
*obj
)
533 SCLPDevice
*sclp
= SCLP(obj
);
536 new = object_new(TYPE_SCLP_EVENT_FACILITY
);
537 object_property_add_child(obj
, TYPE_SCLP_EVENT_FACILITY
, new, NULL
);
538 /* qdev_device_add searches the sysbus for TYPE_SCLP_EVENTS_BUS */
539 qdev_set_parent_bus(DEVICE(new), sysbus_get_default());
541 sclp
->event_facility
= EVENT_FACILITY(new);
543 sclp_memory_init(sclp
);
546 static void sclp_class_init(ObjectClass
*oc
, void *data
)
548 SCLPDeviceClass
*sc
= SCLP_CLASS(oc
);
549 DeviceClass
*dc
= DEVICE_CLASS(oc
);
551 dc
->desc
= "SCLP (Service-Call Logical Processor)";
552 dc
->realize
= sclp_realize
;
553 dc
->hotpluggable
= false;
554 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
556 sc
->read_SCP_info
= read_SCP_info
;
557 sc
->read_storage_element0_info
= read_storage_element0_info
;
558 sc
->read_storage_element1_info
= read_storage_element1_info
;
559 sc
->attach_storage_element
= attach_storage_element
;
560 sc
->assign_storage
= assign_storage
;
561 sc
->unassign_storage
= unassign_storage
;
562 sc
->read_cpu_info
= sclp_read_cpu_info
;
563 sc
->execute
= sclp_execute
;
564 sc
->service_interrupt
= service_interrupt
;
567 static TypeInfo sclp_info
= {
569 .parent
= TYPE_DEVICE
,
570 .instance_init
= sclp_init
,
571 .instance_size
= sizeof(SCLPDevice
),
572 .class_init
= sclp_class_init
,
573 .class_size
= sizeof(SCLPDeviceClass
),
576 sclpMemoryHotplugDev
*init_sclp_memory_hotplug_dev(void)
579 dev
= qdev_create(NULL
, TYPE_SCLP_MEMORY_HOTPLUG_DEV
);
580 object_property_add_child(qdev_get_machine(),
581 TYPE_SCLP_MEMORY_HOTPLUG_DEV
,
583 qdev_init_nofail(dev
);
584 return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
585 TYPE_SCLP_MEMORY_HOTPLUG_DEV
, NULL
));
588 sclpMemoryHotplugDev
*get_sclp_memory_hotplug_dev(void)
590 return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
591 TYPE_SCLP_MEMORY_HOTPLUG_DEV
, NULL
));
594 static void sclp_memory_hotplug_dev_class_init(ObjectClass
*klass
,
597 DeviceClass
*dc
= DEVICE_CLASS(klass
);
599 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
602 static TypeInfo sclp_memory_hotplug_dev_info
= {
603 .name
= TYPE_SCLP_MEMORY_HOTPLUG_DEV
,
604 .parent
= TYPE_SYS_BUS_DEVICE
,
605 .instance_size
= sizeof(sclpMemoryHotplugDev
),
606 .class_init
= sclp_memory_hotplug_dev_class_init
,
609 static void register_types(void)
611 type_register_static(&sclp_memory_hotplug_dev_info
);
612 type_register_static(&sclp_info
);
614 type_init(register_types
);