Merge remote-tracking branch 'remotes/huth-gitlab/tags/pull-request-2021-01-11v2...
[qemu/ar7.git] / hw / mem / memory-device.c
blobcf0627fd01c1abd411385b8bfc337f16cf7ebe37
1 /*
2 * Memory Device Interface
4 * Copyright ProfitBricks GmbH 2012
5 * Copyright (C) 2014 Red Hat Inc
6 * Copyright (c) 2018 Red Hat Inc
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include "hw/mem/memory-device.h"
14 #include "qapi/error.h"
15 #include "hw/boards.h"
16 #include "qemu/range.h"
17 #include "hw/virtio/vhost.h"
18 #include "sysemu/kvm.h"
19 #include "trace.h"
21 static gint memory_device_addr_sort(gconstpointer a, gconstpointer b)
23 const MemoryDeviceState *md_a = MEMORY_DEVICE(a);
24 const MemoryDeviceState *md_b = MEMORY_DEVICE(b);
25 const MemoryDeviceClass *mdc_a = MEMORY_DEVICE_GET_CLASS(a);
26 const MemoryDeviceClass *mdc_b = MEMORY_DEVICE_GET_CLASS(b);
27 const uint64_t addr_a = mdc_a->get_addr(md_a);
28 const uint64_t addr_b = mdc_b->get_addr(md_b);
30 if (addr_a > addr_b) {
31 return 1;
32 } else if (addr_a < addr_b) {
33 return -1;
35 return 0;
38 static int memory_device_build_list(Object *obj, void *opaque)
40 GSList **list = opaque;
42 if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) {
43 DeviceState *dev = DEVICE(obj);
44 if (dev->realized) { /* only realized memory devices matter */
45 *list = g_slist_insert_sorted(*list, dev, memory_device_addr_sort);
49 object_child_foreach(obj, memory_device_build_list, opaque);
50 return 0;
53 static int memory_device_used_region_size(Object *obj, void *opaque)
55 uint64_t *size = opaque;
57 if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) {
58 const DeviceState *dev = DEVICE(obj);
59 const MemoryDeviceState *md = MEMORY_DEVICE(obj);
61 if (dev->realized) {
62 *size += memory_device_get_region_size(md, &error_abort);
66 object_child_foreach(obj, memory_device_used_region_size, opaque);
67 return 0;
70 static void memory_device_check_addable(MachineState *ms, uint64_t size,
71 Error **errp)
73 uint64_t used_region_size = 0;
75 /* we will need a new memory slot for kvm and vhost */
76 if (kvm_enabled() && !kvm_has_free_slot(ms)) {
77 error_setg(errp, "hypervisor has no free memory slots left");
78 return;
80 if (!vhost_has_free_slot()) {
81 error_setg(errp, "a used vhost backend has no free memory slots left");
82 return;
85 /* will we exceed the total amount of memory specified */
86 memory_device_used_region_size(OBJECT(ms), &used_region_size);
87 if (used_region_size + size < used_region_size ||
88 used_region_size + size > ms->maxram_size - ms->ram_size) {
89 error_setg(errp, "not enough space, currently 0x%" PRIx64
90 " in use of total space for memory devices 0x" RAM_ADDR_FMT,
91 used_region_size, ms->maxram_size - ms->ram_size);
92 return;
97 static uint64_t memory_device_get_free_addr(MachineState *ms,
98 const uint64_t *hint,
99 uint64_t align, uint64_t size,
100 Error **errp)
102 Error *err = NULL;
103 GSList *list = NULL, *item;
104 Range as, new = range_empty;
106 if (!ms->device_memory) {
107 error_setg(errp, "memory devices (e.g. for memory hotplug) are not "
108 "supported by the machine");
109 return 0;
112 if (!memory_region_size(&ms->device_memory->mr)) {
113 error_setg(errp, "memory devices (e.g. for memory hotplug) are not "
114 "enabled, please specify the maxmem option");
115 return 0;
117 range_init_nofail(&as, ms->device_memory->base,
118 memory_region_size(&ms->device_memory->mr));
120 /* start of address space indicates the maximum alignment we expect */
121 if (!QEMU_IS_ALIGNED(range_lob(&as), align)) {
122 warn_report("the alignment (0x%" PRIx64 ") exceeds the expected"
123 " maximum alignment, memory will get fragmented and not"
124 " all 'maxmem' might be usable for memory devices.",
125 align);
128 memory_device_check_addable(ms, size, &err);
129 if (err) {
130 error_propagate(errp, err);
131 return 0;
134 if (hint && !QEMU_IS_ALIGNED(*hint, align)) {
135 error_setg(errp, "address must be aligned to 0x%" PRIx64 " bytes",
136 align);
137 return 0;
140 if (!QEMU_IS_ALIGNED(size, align)) {
141 error_setg(errp, "backend memory size must be multiple of 0x%"
142 PRIx64, align);
143 return 0;
146 if (hint) {
147 if (range_init(&new, *hint, size) || !range_contains_range(&as, &new)) {
148 error_setg(errp, "can't add memory device [0x%" PRIx64 ":0x%" PRIx64
149 "], usable range for memory devices [0x%" PRIx64 ":0x%"
150 PRIx64 "]", *hint, size, range_lob(&as),
151 range_size(&as));
152 return 0;
154 } else {
155 if (range_init(&new, QEMU_ALIGN_UP(range_lob(&as), align), size)) {
156 error_setg(errp, "can't add memory device, device too big");
157 return 0;
161 /* find address range that will fit new memory device */
162 object_child_foreach(OBJECT(ms), memory_device_build_list, &list);
163 for (item = list; item; item = g_slist_next(item)) {
164 const MemoryDeviceState *md = item->data;
165 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(OBJECT(md));
166 uint64_t next_addr;
167 Range tmp;
169 range_init_nofail(&tmp, mdc->get_addr(md),
170 memory_device_get_region_size(md, &error_abort));
172 if (range_overlaps_range(&tmp, &new)) {
173 if (hint) {
174 const DeviceState *d = DEVICE(md);
175 error_setg(errp, "address range conflicts with memory device"
176 " id='%s'", d->id ? d->id : "(unnamed)");
177 goto out;
180 next_addr = QEMU_ALIGN_UP(range_upb(&tmp) + 1, align);
181 if (!next_addr || range_init(&new, next_addr, range_size(&new))) {
182 range_make_empty(&new);
183 break;
185 } else if (range_lob(&tmp) > range_upb(&new)) {
186 break;
190 if (!range_contains_range(&as, &new)) {
191 error_setg(errp, "could not find position in guest address space for "
192 "memory device - memory fragmented due to alignments");
194 out:
195 g_slist_free(list);
196 return range_lob(&new);
199 MemoryDeviceInfoList *qmp_memory_device_list(void)
201 GSList *devices = NULL, *item;
202 MemoryDeviceInfoList *list = NULL, *prev = NULL;
204 object_child_foreach(qdev_get_machine(), memory_device_build_list,
205 &devices);
207 for (item = devices; item; item = g_slist_next(item)) {
208 const MemoryDeviceState *md = MEMORY_DEVICE(item->data);
209 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(item->data);
210 MemoryDeviceInfoList *elem = g_new0(MemoryDeviceInfoList, 1);
211 MemoryDeviceInfo *info = g_new0(MemoryDeviceInfo, 1);
213 mdc->fill_device_info(md, info);
215 elem->value = info;
216 elem->next = NULL;
217 if (prev) {
218 prev->next = elem;
219 } else {
220 list = elem;
222 prev = elem;
225 g_slist_free(devices);
227 return list;
230 static int memory_device_plugged_size(Object *obj, void *opaque)
232 uint64_t *size = opaque;
234 if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) {
235 const DeviceState *dev = DEVICE(obj);
236 const MemoryDeviceState *md = MEMORY_DEVICE(obj);
237 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(obj);
239 if (dev->realized) {
240 *size += mdc->get_plugged_size(md, &error_abort);
244 object_child_foreach(obj, memory_device_plugged_size, opaque);
245 return 0;
248 uint64_t get_plugged_memory_size(void)
250 uint64_t size = 0;
252 memory_device_plugged_size(qdev_get_machine(), &size);
254 return size;
257 void memory_device_pre_plug(MemoryDeviceState *md, MachineState *ms,
258 const uint64_t *legacy_align, Error **errp)
260 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
261 Error *local_err = NULL;
262 uint64_t addr, align = 0;
263 MemoryRegion *mr;
265 mr = mdc->get_memory_region(md, &local_err);
266 if (local_err) {
267 goto out;
270 if (legacy_align) {
271 align = *legacy_align;
272 } else {
273 if (mdc->get_min_alignment) {
274 align = mdc->get_min_alignment(md);
276 align = MAX(align, memory_region_get_alignment(mr));
278 addr = mdc->get_addr(md);
279 addr = memory_device_get_free_addr(ms, !addr ? NULL : &addr, align,
280 memory_region_size(mr), &local_err);
281 if (local_err) {
282 goto out;
284 mdc->set_addr(md, addr, &local_err);
285 if (!local_err) {
286 trace_memory_device_pre_plug(DEVICE(md)->id ? DEVICE(md)->id : "",
287 addr);
289 out:
290 error_propagate(errp, local_err);
293 void memory_device_plug(MemoryDeviceState *md, MachineState *ms)
295 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
296 const uint64_t addr = mdc->get_addr(md);
297 MemoryRegion *mr;
300 * We expect that a previous call to memory_device_pre_plug() succeeded, so
301 * it can't fail at this point.
303 mr = mdc->get_memory_region(md, &error_abort);
304 g_assert(ms->device_memory);
306 memory_region_add_subregion(&ms->device_memory->mr,
307 addr - ms->device_memory->base, mr);
308 trace_memory_device_plug(DEVICE(md)->id ? DEVICE(md)->id : "", addr);
311 void memory_device_unplug(MemoryDeviceState *md, MachineState *ms)
313 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
314 MemoryRegion *mr;
317 * We expect that a previous call to memory_device_pre_plug() succeeded, so
318 * it can't fail at this point.
320 mr = mdc->get_memory_region(md, &error_abort);
321 g_assert(ms->device_memory);
323 memory_region_del_subregion(&ms->device_memory->mr, mr);
324 trace_memory_device_unplug(DEVICE(md)->id ? DEVICE(md)->id : "",
325 mdc->get_addr(md));
328 uint64_t memory_device_get_region_size(const MemoryDeviceState *md,
329 Error **errp)
331 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
332 MemoryRegion *mr;
334 /* dropping const here is fine as we don't touch the memory region */
335 mr = mdc->get_memory_region((MemoryDeviceState *)md, errp);
336 if (!mr) {
337 return 0;
340 return memory_region_size(mr);
343 static const TypeInfo memory_device_info = {
344 .name = TYPE_MEMORY_DEVICE,
345 .parent = TYPE_INTERFACE,
346 .class_size = sizeof(MemoryDeviceClass),
349 static void memory_device_register_types(void)
351 type_register_static(&memory_device_info);
354 type_init(memory_device_register_types)