char: introduce CharBackend
[qemu/ar7.git] / numa.c
blob9c09e45e7d4dec62084b27fb4c93d02fb6dcc574
1 /*
2 * NUMA parameter parsing routines
4 * Copyright (c) 2014 Fujitsu Ltd.
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "sysemu/numa.h"
27 #include "exec/cpu-common.h"
28 #include "qemu/bitmap.h"
29 #include "qom/cpu.h"
30 #include "qemu/error-report.h"
31 #include "include/exec/cpu-common.h" /* for RAM_ADDR_FMT */
32 #include "qapi-visit.h"
33 #include "qapi/opts-visitor.h"
34 #include "hw/boards.h"
35 #include "sysemu/hostmem.h"
36 #include "qmp-commands.h"
37 #include "hw/mem/pc-dimm.h"
38 #include "qemu/option.h"
39 #include "qemu/config-file.h"
41 QemuOptsList qemu_numa_opts = {
42 .name = "numa",
43 .implied_opt_name = "type",
44 .head = QTAILQ_HEAD_INITIALIZER(qemu_numa_opts.head),
45 .desc = { { 0 } } /* validated with OptsVisitor */
48 static int have_memdevs = -1;
49 static int max_numa_nodeid; /* Highest specified NUMA node ID, plus one.
50 * For all nodes, nodeid < max_numa_nodeid
52 int nb_numa_nodes;
53 NodeInfo numa_info[MAX_NODES];
55 void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node)
57 struct numa_addr_range *range;
60 * Memory-less nodes can come here with 0 size in which case,
61 * there is nothing to do.
63 if (!size) {
64 return;
67 range = g_malloc0(sizeof(*range));
68 range->mem_start = addr;
69 range->mem_end = addr + size - 1;
70 QLIST_INSERT_HEAD(&numa_info[node].addr, range, entry);
73 void numa_unset_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node)
75 struct numa_addr_range *range, *next;
77 QLIST_FOREACH_SAFE(range, &numa_info[node].addr, entry, next) {
78 if (addr == range->mem_start && (addr + size - 1) == range->mem_end) {
79 QLIST_REMOVE(range, entry);
80 g_free(range);
81 return;
86 static void numa_set_mem_ranges(void)
88 int i;
89 ram_addr_t mem_start = 0;
92 * Deduce start address of each node and use it to store
93 * the address range info in numa_info address range list
95 for (i = 0; i < nb_numa_nodes; i++) {
96 numa_set_mem_node_id(mem_start, numa_info[i].node_mem, i);
97 mem_start += numa_info[i].node_mem;
102 * Check if @addr falls under NUMA @node.
104 static bool numa_addr_belongs_to_node(ram_addr_t addr, uint32_t node)
106 struct numa_addr_range *range;
108 QLIST_FOREACH(range, &numa_info[node].addr, entry) {
109 if (addr >= range->mem_start && addr <= range->mem_end) {
110 return true;
113 return false;
117 * Given an address, return the index of the NUMA node to which the
118 * address belongs to.
120 uint32_t numa_get_node(ram_addr_t addr, Error **errp)
122 uint32_t i;
124 /* For non NUMA configurations, check if the addr falls under node 0 */
125 if (!nb_numa_nodes) {
126 if (numa_addr_belongs_to_node(addr, 0)) {
127 return 0;
131 for (i = 0; i < nb_numa_nodes; i++) {
132 if (numa_addr_belongs_to_node(addr, i)) {
133 return i;
137 error_setg(errp, "Address 0x" RAM_ADDR_FMT " doesn't belong to any "
138 "NUMA node", addr);
139 return -1;
142 static void numa_node_parse(NumaNodeOptions *node, QemuOpts *opts, Error **errp)
144 uint16_t nodenr;
145 uint16List *cpus = NULL;
147 if (node->has_nodeid) {
148 nodenr = node->nodeid;
149 } else {
150 nodenr = nb_numa_nodes;
153 if (nodenr >= MAX_NODES) {
154 error_setg(errp, "Max number of NUMA nodes reached: %"
155 PRIu16 "", nodenr);
156 return;
159 if (numa_info[nodenr].present) {
160 error_setg(errp, "Duplicate NUMA nodeid: %" PRIu16, nodenr);
161 return;
164 for (cpus = node->cpus; cpus; cpus = cpus->next) {
165 if (cpus->value >= max_cpus) {
166 error_setg(errp,
167 "CPU index (%" PRIu16 ")"
168 " should be smaller than maxcpus (%d)",
169 cpus->value, max_cpus);
170 return;
172 bitmap_set(numa_info[nodenr].node_cpu, cpus->value, 1);
175 if (node->has_mem && node->has_memdev) {
176 error_setg(errp, "qemu: cannot specify both mem= and memdev=");
177 return;
180 if (have_memdevs == -1) {
181 have_memdevs = node->has_memdev;
183 if (node->has_memdev != have_memdevs) {
184 error_setg(errp, "qemu: memdev option must be specified for either "
185 "all or no nodes");
186 return;
189 if (node->has_mem) {
190 uint64_t mem_size = node->mem;
191 const char *mem_str = qemu_opt_get(opts, "mem");
192 /* Fix up legacy suffix-less format */
193 if (g_ascii_isdigit(mem_str[strlen(mem_str) - 1])) {
194 mem_size <<= 20;
196 numa_info[nodenr].node_mem = mem_size;
198 if (node->has_memdev) {
199 Object *o;
200 o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL);
201 if (!o) {
202 error_setg(errp, "memdev=%s is ambiguous", node->memdev);
203 return;
206 object_ref(o);
207 numa_info[nodenr].node_mem = object_property_get_int(o, "size", NULL);
208 numa_info[nodenr].node_memdev = MEMORY_BACKEND(o);
210 numa_info[nodenr].present = true;
211 max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1);
214 static int parse_numa(void *opaque, QemuOpts *opts, Error **errp)
216 NumaOptions *object = NULL;
217 Error *err = NULL;
220 Visitor *v = opts_visitor_new(opts);
221 visit_type_NumaOptions(v, NULL, &object, &err);
222 visit_free(v);
225 if (err) {
226 goto end;
229 switch (object->type) {
230 case NUMA_OPTIONS_KIND_NODE:
231 numa_node_parse(object->u.node.data, opts, &err);
232 if (err) {
233 goto end;
235 nb_numa_nodes++;
236 break;
237 default:
238 abort();
241 end:
242 qapi_free_NumaOptions(object);
243 if (err) {
244 error_report_err(err);
245 return -1;
248 return 0;
251 static char *enumerate_cpus(unsigned long *cpus, int max_cpus)
253 int cpu;
254 bool first = true;
255 GString *s = g_string_new(NULL);
257 for (cpu = find_first_bit(cpus, max_cpus);
258 cpu < max_cpus;
259 cpu = find_next_bit(cpus, max_cpus, cpu + 1)) {
260 g_string_append_printf(s, "%s%d", first ? "" : " ", cpu);
261 first = false;
263 return g_string_free(s, FALSE);
266 static void validate_numa_cpus(void)
268 int i;
269 DECLARE_BITMAP(seen_cpus, MAX_CPUMASK_BITS);
271 bitmap_zero(seen_cpus, MAX_CPUMASK_BITS);
272 for (i = 0; i < nb_numa_nodes; i++) {
273 if (bitmap_intersects(seen_cpus, numa_info[i].node_cpu,
274 MAX_CPUMASK_BITS)) {
275 bitmap_and(seen_cpus, seen_cpus,
276 numa_info[i].node_cpu, MAX_CPUMASK_BITS);
277 error_report("CPU(s) present in multiple NUMA nodes: %s",
278 enumerate_cpus(seen_cpus, max_cpus));
279 exit(EXIT_FAILURE);
281 bitmap_or(seen_cpus, seen_cpus,
282 numa_info[i].node_cpu, MAX_CPUMASK_BITS);
285 if (!bitmap_full(seen_cpus, max_cpus)) {
286 char *msg;
287 bitmap_complement(seen_cpus, seen_cpus, max_cpus);
288 msg = enumerate_cpus(seen_cpus, max_cpus);
289 error_report("warning: CPU(s) not present in any NUMA nodes: %s", msg);
290 error_report("warning: All CPU(s) up to maxcpus should be described "
291 "in NUMA config");
292 g_free(msg);
296 void parse_numa_opts(MachineClass *mc)
298 int i;
300 if (qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, NULL, NULL)) {
301 exit(1);
304 assert(max_numa_nodeid <= MAX_NODES);
306 /* No support for sparse NUMA node IDs yet: */
307 for (i = max_numa_nodeid - 1; i >= 0; i--) {
308 /* Report large node IDs first, to make mistakes easier to spot */
309 if (!numa_info[i].present) {
310 error_report("numa: Node ID missing: %d", i);
311 exit(1);
315 /* This must be always true if all nodes are present: */
316 assert(nb_numa_nodes == max_numa_nodeid);
318 if (nb_numa_nodes > 0) {
319 uint64_t numa_total;
321 if (nb_numa_nodes > MAX_NODES) {
322 nb_numa_nodes = MAX_NODES;
325 /* If no memory size is given for any node, assume the default case
326 * and distribute the available memory equally across all nodes
328 for (i = 0; i < nb_numa_nodes; i++) {
329 if (numa_info[i].node_mem != 0) {
330 break;
333 if (i == nb_numa_nodes) {
334 uint64_t usedmem = 0;
336 /* On Linux, each node's border has to be 8MB aligned,
337 * the final node gets the rest.
339 for (i = 0; i < nb_numa_nodes - 1; i++) {
340 numa_info[i].node_mem = (ram_size / nb_numa_nodes) &
341 ~((1 << 23UL) - 1);
342 usedmem += numa_info[i].node_mem;
344 numa_info[i].node_mem = ram_size - usedmem;
347 numa_total = 0;
348 for (i = 0; i < nb_numa_nodes; i++) {
349 numa_total += numa_info[i].node_mem;
351 if (numa_total != ram_size) {
352 error_report("total memory for NUMA nodes (0x%" PRIx64 ")"
353 " should equal RAM size (0x" RAM_ADDR_FMT ")",
354 numa_total, ram_size);
355 exit(1);
358 for (i = 0; i < nb_numa_nodes; i++) {
359 QLIST_INIT(&numa_info[i].addr);
362 numa_set_mem_ranges();
364 for (i = 0; i < nb_numa_nodes; i++) {
365 if (!bitmap_empty(numa_info[i].node_cpu, MAX_CPUMASK_BITS)) {
366 break;
369 /* Historically VCPUs were assigned in round-robin order to NUMA
370 * nodes. However it causes issues with guest not handling it nice
371 * in case where cores/threads from a multicore CPU appear on
372 * different nodes. So allow boards to override default distribution
373 * rule grouping VCPUs by socket so that VCPUs from the same socket
374 * would be on the same node.
376 if (i == nb_numa_nodes) {
377 for (i = 0; i < max_cpus; i++) {
378 unsigned node_id = i % nb_numa_nodes;
379 if (mc->cpu_index_to_socket_id) {
380 node_id = mc->cpu_index_to_socket_id(i) % nb_numa_nodes;
383 set_bit(i, numa_info[node_id].node_cpu);
387 validate_numa_cpus();
388 } else {
389 numa_set_mem_node_id(0, ram_size, 0);
393 void numa_post_machine_init(void)
395 CPUState *cpu;
396 int i;
398 CPU_FOREACH(cpu) {
399 for (i = 0; i < nb_numa_nodes; i++) {
400 if (test_bit(cpu->cpu_index, numa_info[i].node_cpu)) {
401 cpu->numa_node = i;
407 static void allocate_system_memory_nonnuma(MemoryRegion *mr, Object *owner,
408 const char *name,
409 uint64_t ram_size)
411 if (mem_path) {
412 #ifdef __linux__
413 Error *err = NULL;
414 memory_region_init_ram_from_file(mr, owner, name, ram_size, false,
415 mem_path, &err);
416 if (err) {
417 error_report_err(err);
418 if (mem_prealloc) {
419 exit(1);
422 /* Legacy behavior: if allocation failed, fall back to
423 * regular RAM allocation.
425 memory_region_init_ram(mr, owner, name, ram_size, &error_fatal);
427 #else
428 fprintf(stderr, "-mem-path not supported on this host\n");
429 exit(1);
430 #endif
431 } else {
432 memory_region_init_ram(mr, owner, name, ram_size, &error_fatal);
434 vmstate_register_ram_global(mr);
437 void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner,
438 const char *name,
439 uint64_t ram_size)
441 uint64_t addr = 0;
442 int i;
444 if (nb_numa_nodes == 0 || !have_memdevs) {
445 allocate_system_memory_nonnuma(mr, owner, name, ram_size);
446 return;
449 memory_region_init(mr, owner, name, ram_size);
450 for (i = 0; i < MAX_NODES; i++) {
451 uint64_t size = numa_info[i].node_mem;
452 HostMemoryBackend *backend = numa_info[i].node_memdev;
453 if (!backend) {
454 continue;
456 MemoryRegion *seg = host_memory_backend_get_memory(backend,
457 &error_fatal);
459 if (memory_region_is_mapped(seg)) {
460 char *path = object_get_canonical_path_component(OBJECT(backend));
461 error_report("memory backend %s is used multiple times. Each "
462 "-numa option must use a different memdev value.",
463 path);
464 exit(1);
467 host_memory_backend_set_mapped(backend, true);
468 memory_region_add_subregion(mr, addr, seg);
469 vmstate_register_ram_global(seg);
470 addr += size;
474 static void numa_stat_memory_devices(uint64_t node_mem[])
476 MemoryDeviceInfoList *info_list = NULL;
477 MemoryDeviceInfoList **prev = &info_list;
478 MemoryDeviceInfoList *info;
480 qmp_pc_dimm_device_list(qdev_get_machine(), &prev);
481 for (info = info_list; info; info = info->next) {
482 MemoryDeviceInfo *value = info->value;
484 if (value) {
485 switch (value->type) {
486 case MEMORY_DEVICE_INFO_KIND_DIMM:
487 node_mem[value->u.dimm.data->node] += value->u.dimm.data->size;
488 break;
489 default:
490 break;
494 qapi_free_MemoryDeviceInfoList(info_list);
497 void query_numa_node_mem(uint64_t node_mem[])
499 int i;
501 if (nb_numa_nodes <= 0) {
502 return;
505 numa_stat_memory_devices(node_mem);
506 for (i = 0; i < nb_numa_nodes; i++) {
507 node_mem[i] += numa_info[i].node_mem;
511 static int query_memdev(Object *obj, void *opaque)
513 MemdevList **list = opaque;
514 MemdevList *m = NULL;
516 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
517 m = g_malloc0(sizeof(*m));
519 m->value = g_malloc0(sizeof(*m->value));
521 m->value->size = object_property_get_int(obj, "size",
522 &error_abort);
523 m->value->merge = object_property_get_bool(obj, "merge",
524 &error_abort);
525 m->value->dump = object_property_get_bool(obj, "dump",
526 &error_abort);
527 m->value->prealloc = object_property_get_bool(obj,
528 "prealloc",
529 &error_abort);
530 m->value->policy = object_property_get_enum(obj,
531 "policy",
532 "HostMemPolicy",
533 &error_abort);
534 object_property_get_uint16List(obj, "host-nodes",
535 &m->value->host_nodes,
536 &error_abort);
538 m->next = *list;
539 *list = m;
542 return 0;
545 MemdevList *qmp_query_memdev(Error **errp)
547 Object *obj = object_get_objects_root();
548 MemdevList *list = NULL;
550 object_child_foreach(obj, query_memdev, &list);
551 return list;
554 int numa_get_node_for_cpu(int idx)
556 int i;
558 for (i = 0; i < nb_numa_nodes; i++) {
559 if (test_bit(idx, numa_info[i].node_cpu)) {
560 break;
563 return i;