4 * Copyright IBM, Corp. 2012
7 * Christian Borntraeger <borntraeger@de.ibm.com>
8 * Heinz Graalfs <graalfs@linux.vnet.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
11 * option) any later version. See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
18 #include "exec/memory.h"
19 #include "sysemu/sysemu.h"
20 #include "exec/address-spaces.h"
21 #include "hw/boards.h"
22 #include "hw/s390x/sclp.h"
23 #include "hw/s390x/event-facility.h"
24 #include "hw/s390x/s390-pci-bus.h"
25 #include "hw/s390x/ipl.h"
27 static inline SCLPDevice
*get_sclp_device(void)
29 static SCLPDevice
*sclp
;
32 sclp
= SCLP(object_resolve_path_type("", TYPE_SCLP
, NULL
));
37 static void prepare_cpu_entries(SCLPDevice
*sclp
, CPUEntry
*entry
, int count
)
39 uint8_t features
[SCCB_CPU_FEATURE_LEN
] = { 0 };
42 s390_get_feat_block(S390_FEAT_TYPE_SCLP_CPU
, features
);
43 for (i
= 0; i
< count
; i
++) {
46 memcpy(entry
[i
].features
, features
, sizeof(entry
[i
].features
));
50 /* Provide information about the configuration, CPUs and storage */
51 static void read_SCP_info(SCLPDevice
*sclp
, SCCB
*sccb
)
53 ReadInfo
*read_info
= (ReadInfo
*) sccb
;
54 MachineState
*machine
= MACHINE(qdev_get_machine());
55 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
59 int slots
= MIN(machine
->ram_slots
, s390_get_memslot_count());
60 IplParameterBlock
*ipib
= s390_ipl_get_iplb();
67 read_info
->entries_cpu
= cpu_to_be16(cpu_count
);
68 read_info
->offset_cpu
= cpu_to_be16(offsetof(ReadInfo
, entries
));
69 read_info
->highest_cpu
= cpu_to_be16(max_cpus
);
71 read_info
->ibc_val
= cpu_to_be32(s390_get_ibc_val());
73 /* Configuration Characteristic (Extension) */
74 s390_get_feat_block(S390_FEAT_TYPE_SCLP_CONF_CHAR
,
75 read_info
->conf_char
);
76 s390_get_feat_block(S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT
,
77 read_info
->conf_char_ext
);
79 prepare_cpu_entries(sclp
, read_info
->entries
, cpu_count
);
81 read_info
->facilities
= cpu_to_be64(SCLP_HAS_CPU_INFO
|
82 SCLP_HAS_IOA_RECONFIG
);
84 /* Memory Hotplug is only supported for the ccw machine type */
86 mhd
->standby_subregion_size
= MEM_SECTION_SIZE
;
87 /* Deduct the memory slot already used for core */
89 while ((mhd
->standby_subregion_size
* (slots
- 1)
90 < mhd
->standby_mem_size
)) {
91 mhd
->standby_subregion_size
= mhd
->standby_subregion_size
<< 1;
95 * Initialize mapping of guest standby memory sections indicating which
96 * are and are not online. Assume all standby memory begins offline.
98 if (mhd
->standby_state_map
== 0) {
99 if (mhd
->standby_mem_size
% mhd
->standby_subregion_size
) {
100 mhd
->standby_state_map
= g_malloc0((mhd
->standby_mem_size
/
101 mhd
->standby_subregion_size
+ 1) *
102 (mhd
->standby_subregion_size
/
105 mhd
->standby_state_map
= g_malloc0(mhd
->standby_mem_size
/
109 mhd
->padded_ram_size
= ram_size
+ mhd
->pad_size
;
110 mhd
->rzm
= 1 << mhd
->increment_size
;
112 read_info
->facilities
|= cpu_to_be64(SCLP_FC_ASSIGN_ATTACH_READ_STOR
);
114 read_info
->mha_pow
= s390_get_mha_pow();
115 read_info
->hmfai
= cpu_to_be32(s390_get_hmfai());
117 rnsize
= 1 << (sclp
->increment_size
- 20);
119 read_info
->rnsize
= rnsize
;
121 read_info
->rnsize
= 0;
122 read_info
->rnsize2
= cpu_to_be32(rnsize
);
125 rnmax
= machine
->maxram_size
>> sclp
->increment_size
;
126 if (rnmax
< 0x10000) {
127 read_info
->rnmax
= cpu_to_be16(rnmax
);
129 read_info
->rnmax
= cpu_to_be16(0);
130 read_info
->rnmax2
= cpu_to_be64(rnmax
);
133 if (ipib
&& ipib
->flags
& DIAG308_FLAGS_LP_VALID
) {
134 memcpy(&read_info
->loadparm
, &ipib
->loadparm
,
135 sizeof(read_info
->loadparm
));
137 s390_ipl_set_loadparm(read_info
->loadparm
);
140 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION
);
143 static void read_storage_element0_info(SCLPDevice
*sclp
, SCCB
*sccb
)
146 int subincrement_id
= SCLP_STARTING_SUBINCREMENT_ID
;
147 ReadStorageElementInfo
*storage_info
= (ReadStorageElementInfo
*) sccb
;
148 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
151 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
155 if ((ram_size
>> mhd
->increment_size
) >= 0x10000) {
156 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION
);
160 /* Return information regarding core memory */
161 storage_info
->max_id
= cpu_to_be16(mhd
->standby_mem_size
? 1 : 0);
162 assigned
= ram_size
>> mhd
->increment_size
;
163 storage_info
->assigned
= cpu_to_be16(assigned
);
165 for (i
= 0; i
< assigned
; i
++) {
166 storage_info
->entries
[i
] = cpu_to_be32(subincrement_id
);
167 subincrement_id
+= SCLP_INCREMENT_UNIT
;
169 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION
);
172 static void read_storage_element1_info(SCLPDevice
*sclp
, SCCB
*sccb
)
174 ReadStorageElementInfo
*storage_info
= (ReadStorageElementInfo
*) sccb
;
175 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
178 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
182 if ((mhd
->standby_mem_size
>> mhd
->increment_size
) >= 0x10000) {
183 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION
);
187 /* Return information regarding standby memory */
188 storage_info
->max_id
= cpu_to_be16(mhd
->standby_mem_size
? 1 : 0);
189 storage_info
->assigned
= cpu_to_be16(mhd
->standby_mem_size
>>
190 mhd
->increment_size
);
191 storage_info
->standby
= cpu_to_be16(mhd
->standby_mem_size
>>
192 mhd
->increment_size
);
193 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_STANDBY_READ_COMPLETION
);
196 static void attach_storage_element(SCLPDevice
*sclp
, SCCB
*sccb
,
199 int i
, assigned
, subincrement_id
;
200 AttachStorageElement
*attach_info
= (AttachStorageElement
*) sccb
;
201 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
204 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
209 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
213 assigned
= mhd
->standby_mem_size
>> mhd
->increment_size
;
214 attach_info
->assigned
= cpu_to_be16(assigned
);
215 subincrement_id
= ((ram_size
>> mhd
->increment_size
) << 16)
216 + SCLP_STARTING_SUBINCREMENT_ID
;
217 for (i
= 0; i
< assigned
; i
++) {
218 attach_info
->entries
[i
] = cpu_to_be32(subincrement_id
);
219 subincrement_id
+= SCLP_INCREMENT_UNIT
;
221 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_COMPLETION
);
224 static void assign_storage(SCLPDevice
*sclp
, SCCB
*sccb
)
226 MemoryRegion
*mr
= NULL
;
227 uint64_t this_subregion_size
;
228 AssignStorage
*assign_info
= (AssignStorage
*) sccb
;
229 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
230 ram_addr_t assign_addr
;
231 MemoryRegion
*sysmem
= get_system_memory();
234 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
237 assign_addr
= (assign_info
->rn
- 1) * mhd
->rzm
;
239 if ((assign_addr
% MEM_SECTION_SIZE
== 0) &&
240 (assign_addr
>= mhd
->padded_ram_size
)) {
241 /* Re-use existing memory region if found */
242 mr
= memory_region_find(sysmem
, assign_addr
, 1).mr
;
243 memory_region_unref(mr
);
246 MemoryRegion
*standby_ram
= g_new(MemoryRegion
, 1);
248 /* offset to align to standby_subregion_size for allocation */
249 ram_addr_t offset
= assign_addr
-
250 (assign_addr
- mhd
->padded_ram_size
)
251 % mhd
->standby_subregion_size
;
253 /* strlen("standby.ram") + 4 (Max of KVM_MEMORY_SLOTS) + NULL */
255 snprintf(id
, 16, "standby.ram%d",
256 (int)((offset
- mhd
->padded_ram_size
) /
257 mhd
->standby_subregion_size
) + 1);
259 /* Allocate a subregion of the calculated standby_subregion_size */
260 if (offset
+ mhd
->standby_subregion_size
>
261 mhd
->padded_ram_size
+ mhd
->standby_mem_size
) {
262 this_subregion_size
= mhd
->padded_ram_size
+
263 mhd
->standby_mem_size
- offset
;
265 this_subregion_size
= mhd
->standby_subregion_size
;
268 memory_region_init_ram(standby_ram
, NULL
, id
, this_subregion_size
,
270 /* This is a hack to make memory hotunplug work again. Once we have
271 * subdevices, we have to unparent them when unassigning memory,
272 * instead of doing it via the ref count of the MemoryRegion. */
273 object_ref(OBJECT(standby_ram
));
274 object_unparent(OBJECT(standby_ram
));
275 memory_region_add_subregion(sysmem
, offset
, standby_ram
);
277 /* The specified subregion is no longer in standby */
278 mhd
->standby_state_map
[(assign_addr
- mhd
->padded_ram_size
)
279 / MEM_SECTION_SIZE
] = 1;
281 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_COMPLETION
);
284 static void unassign_storage(SCLPDevice
*sclp
, SCCB
*sccb
)
286 MemoryRegion
*mr
= NULL
;
287 AssignStorage
*assign_info
= (AssignStorage
*) sccb
;
288 sclpMemoryHotplugDev
*mhd
= get_sclp_memory_hotplug_dev();
289 ram_addr_t unassign_addr
;
290 MemoryRegion
*sysmem
= get_system_memory();
293 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND
);
296 unassign_addr
= (assign_info
->rn
- 1) * mhd
->rzm
;
298 /* if the addr is a multiple of 256 MB */
299 if ((unassign_addr
% MEM_SECTION_SIZE
== 0) &&
300 (unassign_addr
>= mhd
->padded_ram_size
)) {
301 mhd
->standby_state_map
[(unassign_addr
-
302 mhd
->padded_ram_size
) / MEM_SECTION_SIZE
] = 0;
304 /* find the specified memory region and destroy it */
305 mr
= memory_region_find(sysmem
, unassign_addr
, 1).mr
;
306 memory_region_unref(mr
);
309 int is_removable
= 1;
310 ram_addr_t map_offset
= (unassign_addr
- mhd
->padded_ram_size
-
311 (unassign_addr
- mhd
->padded_ram_size
)
312 % mhd
->standby_subregion_size
);
313 /* Mark all affected subregions as 'standby' once again */
315 i
< (mhd
->standby_subregion_size
/ MEM_SECTION_SIZE
);
318 if (mhd
->standby_state_map
[i
+ map_offset
/ MEM_SECTION_SIZE
]) {
324 memory_region_del_subregion(sysmem
, mr
);
325 object_unref(OBJECT(mr
));
329 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_COMPLETION
);
332 /* Provide information about the CPU */
333 static void sclp_read_cpu_info(SCLPDevice
*sclp
, SCCB
*sccb
)
335 ReadCpuInfo
*cpu_info
= (ReadCpuInfo
*) sccb
;
343 cpu_info
->nr_configured
= cpu_to_be16(cpu_count
);
344 cpu_info
->offset_configured
= cpu_to_be16(offsetof(ReadCpuInfo
, entries
));
345 cpu_info
->nr_standby
= cpu_to_be16(0);
347 /* The standby offset is 16-byte for each CPU */
348 cpu_info
->offset_standby
= cpu_to_be16(cpu_info
->offset_configured
349 + cpu_info
->nr_configured
*sizeof(CPUEntry
));
351 prepare_cpu_entries(sclp
, cpu_info
->entries
, cpu_count
);
353 sccb
->h
.response_code
= cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION
);
356 static void sclp_configure_io_adapter(SCLPDevice
*sclp
, SCCB
*sccb
,
361 if (be16_to_cpu(sccb
->h
.length
) < 16) {
362 rc
= SCLP_RC_INSUFFICIENT_SCCB_LENGTH
;
366 switch (((IoaCfgSccb
*)sccb
)->atype
) {
367 case SCLP_RECONFIG_PCI_ATYPE
:
368 if (s390_has_feat(S390_FEAT_ZPCI
)) {
370 s390_pci_sclp_configure(sccb
);
372 s390_pci_sclp_deconfigure(sccb
);
378 rc
= SCLP_RC_ADAPTER_TYPE_NOT_RECOGNIZED
;
382 sccb
->h
.response_code
= cpu_to_be16(rc
);
385 static void sclp_execute(SCLPDevice
*sclp
, SCCB
*sccb
, uint32_t code
)
387 SCLPDeviceClass
*sclp_c
= SCLP_GET_CLASS(sclp
);
388 SCLPEventFacility
*ef
= sclp
->event_facility
;
389 SCLPEventFacilityClass
*efc
= EVENT_FACILITY_GET_CLASS(ef
);
391 switch (code
& SCLP_CMD_CODE_MASK
) {
392 case SCLP_CMDW_READ_SCP_INFO
:
393 case SCLP_CMDW_READ_SCP_INFO_FORCED
:
394 sclp_c
->read_SCP_info(sclp
, sccb
);
396 case SCLP_CMDW_READ_CPU_INFO
:
397 sclp_c
->read_cpu_info(sclp
, sccb
);
399 case SCLP_READ_STORAGE_ELEMENT_INFO
:
401 sclp_c
->read_storage_element1_info(sclp
, sccb
);
403 sclp_c
->read_storage_element0_info(sclp
, sccb
);
406 case SCLP_ATTACH_STORAGE_ELEMENT
:
407 sclp_c
->attach_storage_element(sclp
, sccb
, (code
& 0xff00) >> 8);
409 case SCLP_ASSIGN_STORAGE
:
410 sclp_c
->assign_storage(sclp
, sccb
);
412 case SCLP_UNASSIGN_STORAGE
:
413 sclp_c
->unassign_storage(sclp
, sccb
);
415 case SCLP_CMDW_CONFIGURE_IOA
:
416 sclp_configure_io_adapter(sclp
, sccb
, true);
418 case SCLP_CMDW_DECONFIGURE_IOA
:
419 sclp_configure_io_adapter(sclp
, sccb
, false);
422 efc
->command_handler(ef
, sccb
, code
);
427 int sclp_service_call(CPUS390XState
*env
, uint64_t sccb
, uint32_t code
)
429 SCLPDevice
*sclp
= get_sclp_device();
430 SCLPDeviceClass
*sclp_c
= SCLP_GET_CLASS(sclp
);
434 hwaddr sccb_len
= sizeof(SCCB
);
436 /* first some basic checks on program checks */
437 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
441 if (cpu_physical_memory_is_io(sccb
)) {
445 if ((sccb
& ~0x1fffUL
) == 0 || (sccb
& ~0x1fffUL
) == env
->psa
446 || (sccb
& ~0x7ffffff8UL
) != 0) {
447 r
= -PGM_SPECIFICATION
;
452 * we want to work on a private copy of the sccb, to prevent guests
453 * from playing dirty tricks by modifying the memory content after
454 * the host has checked the values
456 cpu_physical_memory_read(sccb
, &work_sccb
, sccb_len
);
458 /* Valid sccb sizes */
459 if (be16_to_cpu(work_sccb
.h
.length
) < sizeof(SCCBHeader
) ||
460 be16_to_cpu(work_sccb
.h
.length
) > SCCB_SIZE
) {
461 r
= -PGM_SPECIFICATION
;
465 sclp_c
->execute(sclp
, &work_sccb
, code
);
467 cpu_physical_memory_write(sccb
, &work_sccb
,
468 be16_to_cpu(work_sccb
.h
.length
));
470 sclp_c
->service_interrupt(sclp
, sccb
);
476 static void service_interrupt(SCLPDevice
*sclp
, uint32_t sccb
)
478 SCLPEventFacility
*ef
= sclp
->event_facility
;
479 SCLPEventFacilityClass
*efc
= EVENT_FACILITY_GET_CLASS(ef
);
481 uint32_t param
= sccb
& ~3;
483 /* Indicate whether an event is still pending */
484 param
|= efc
->event_pending(ef
) ? 1 : 0;
487 /* No need to send an interrupt, there's nothing to be notified about */
490 s390_sclp_extint(param
);
493 void sclp_service_interrupt(uint32_t sccb
)
495 SCLPDevice
*sclp
= get_sclp_device();
496 SCLPDeviceClass
*sclp_c
= SCLP_GET_CLASS(sclp
);
498 sclp_c
->service_interrupt(sclp
, sccb
);
501 /* qemu object creation and initialization functions */
503 void s390_sclp_init(void)
505 Object
*new = object_new(TYPE_SCLP
);
507 object_property_add_child(qdev_get_machine(), TYPE_SCLP
, new,
509 object_unref(OBJECT(new));
510 qdev_init_nofail(DEVICE(new));
513 static void sclp_realize(DeviceState
*dev
, Error
**errp
)
515 MachineState
*machine
= MACHINE(qdev_get_machine());
516 SCLPDevice
*sclp
= SCLP(dev
);
521 object_property_set_bool(OBJECT(sclp
->event_facility
), true, "realized",
527 * qdev_device_add searches the sysbus for TYPE_SCLP_EVENTS_BUS. As long
528 * as we can't find a fitting bus via the qom tree, we have to add the
529 * event facility to the sysbus, so e.g. a sclp console can be created.
531 qdev_set_parent_bus(DEVICE(sclp
->event_facility
), sysbus_get_default());
533 ret
= s390_set_memory_limit(machine
->maxram_size
, &hw_limit
);
535 error_setg(&err
, "host supports a maximum of %" PRIu64
" GB",
538 error_setg(&err
, "setting the guest size failed");
542 error_propagate(errp
, err
);
545 static void sclp_memory_init(SCLPDevice
*sclp
)
547 MachineState
*machine
= MACHINE(qdev_get_machine());
548 ram_addr_t initial_mem
= machine
->ram_size
;
549 ram_addr_t max_mem
= machine
->maxram_size
;
550 ram_addr_t standby_mem
= max_mem
- initial_mem
;
551 ram_addr_t pad_mem
= 0;
552 int increment_size
= 20;
554 /* The storage increment size is a multiple of 1M and is a power of 2.
555 * The number of storage increments must be MAX_STORAGE_INCREMENTS or fewer.
556 * The variable 'increment_size' is an exponent of 2 that can be
557 * used to calculate the size (in bytes) of an increment. */
558 while ((initial_mem
>> increment_size
) > MAX_STORAGE_INCREMENTS
) {
561 if (machine
->ram_slots
) {
562 while ((standby_mem
>> increment_size
) > MAX_STORAGE_INCREMENTS
) {
566 sclp
->increment_size
= increment_size
;
568 /* The core and standby memory areas need to be aligned with
569 * the increment size. In effect, this can cause the
570 * user-specified memory size to be rounded down to align
571 * with the nearest increment boundary. */
572 initial_mem
= initial_mem
>> increment_size
<< increment_size
;
573 standby_mem
= standby_mem
>> increment_size
<< increment_size
;
575 /* If the size of ram is not on a MEM_SECTION_SIZE boundary,
576 calculate the pad size necessary to force this boundary. */
577 if (machine
->ram_slots
&& standby_mem
) {
578 sclpMemoryHotplugDev
*mhd
= init_sclp_memory_hotplug_dev();
580 if (initial_mem
% MEM_SECTION_SIZE
) {
581 pad_mem
= MEM_SECTION_SIZE
- initial_mem
% MEM_SECTION_SIZE
;
583 mhd
->increment_size
= increment_size
;
584 mhd
->pad_size
= pad_mem
;
585 mhd
->standby_mem_size
= standby_mem
;
587 machine
->ram_size
= initial_mem
;
588 machine
->maxram_size
= initial_mem
+ pad_mem
+ standby_mem
;
589 /* let's propagate the changed ram size into the global variable. */
590 ram_size
= initial_mem
;
593 static void sclp_init(Object
*obj
)
595 SCLPDevice
*sclp
= SCLP(obj
);
598 new = object_new(TYPE_SCLP_EVENT_FACILITY
);
599 object_property_add_child(obj
, TYPE_SCLP_EVENT_FACILITY
, new, NULL
);
601 sclp
->event_facility
= EVENT_FACILITY(new);
603 sclp_memory_init(sclp
);
606 static void sclp_class_init(ObjectClass
*oc
, void *data
)
608 SCLPDeviceClass
*sc
= SCLP_CLASS(oc
);
609 DeviceClass
*dc
= DEVICE_CLASS(oc
);
611 dc
->desc
= "SCLP (Service-Call Logical Processor)";
612 dc
->realize
= sclp_realize
;
613 dc
->hotpluggable
= false;
614 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
616 sc
->read_SCP_info
= read_SCP_info
;
617 sc
->read_storage_element0_info
= read_storage_element0_info
;
618 sc
->read_storage_element1_info
= read_storage_element1_info
;
619 sc
->attach_storage_element
= attach_storage_element
;
620 sc
->assign_storage
= assign_storage
;
621 sc
->unassign_storage
= unassign_storage
;
622 sc
->read_cpu_info
= sclp_read_cpu_info
;
623 sc
->execute
= sclp_execute
;
624 sc
->service_interrupt
= service_interrupt
;
627 static TypeInfo sclp_info
= {
629 .parent
= TYPE_DEVICE
,
630 .instance_init
= sclp_init
,
631 .instance_size
= sizeof(SCLPDevice
),
632 .class_init
= sclp_class_init
,
633 .class_size
= sizeof(SCLPDeviceClass
),
636 sclpMemoryHotplugDev
*init_sclp_memory_hotplug_dev(void)
639 dev
= qdev_create(NULL
, TYPE_SCLP_MEMORY_HOTPLUG_DEV
);
640 object_property_add_child(qdev_get_machine(),
641 TYPE_SCLP_MEMORY_HOTPLUG_DEV
,
643 qdev_init_nofail(dev
);
644 return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
645 TYPE_SCLP_MEMORY_HOTPLUG_DEV
, NULL
));
648 sclpMemoryHotplugDev
*get_sclp_memory_hotplug_dev(void)
650 return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
651 TYPE_SCLP_MEMORY_HOTPLUG_DEV
, NULL
));
654 static void sclp_memory_hotplug_dev_class_init(ObjectClass
*klass
,
657 DeviceClass
*dc
= DEVICE_CLASS(klass
);
659 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
662 static TypeInfo sclp_memory_hotplug_dev_info
= {
663 .name
= TYPE_SCLP_MEMORY_HOTPLUG_DEV
,
664 .parent
= TYPE_SYS_BUS_DEVICE
,
665 .instance_size
= sizeof(sclpMemoryHotplugDev
),
666 .class_init
= sclp_memory_hotplug_dev_class_init
,
669 static void register_types(void)
671 type_register_static(&sclp_memory_hotplug_dev_info
);
672 type_register_static(&sclp_info
);
674 type_init(register_types
);