2 * NUMA parameter parsing routines
4 * Copyright (c) 2014 Fujitsu Ltd.
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
26 #include "qemu/units.h"
27 #include "sysemu/hostmem.h"
28 #include "sysemu/numa.h"
29 #include "exec/cpu-common.h"
30 #include "exec/ramlist.h"
31 #include "qemu/bitmap.h"
32 #include "qemu/error-report.h"
33 #include "qapi/error.h"
34 #include "qapi/opts-visitor.h"
35 #include "qapi/qapi-visit-machine.h"
36 #include "sysemu/qtest.h"
37 #include "hw/core/cpu.h"
38 #include "hw/mem/pc-dimm.h"
39 #include "migration/vmstate.h"
40 #include "hw/boards.h"
41 #include "hw/mem/memory-device.h"
42 #include "qemu/option.h"
43 #include "qemu/config-file.h"
44 #include "qemu/cutils.h"
46 QemuOptsList qemu_numa_opts
= {
48 .implied_opt_name
= "type",
49 .head
= QTAILQ_HEAD_INITIALIZER(qemu_numa_opts
.head
),
50 .desc
= { { 0 } } /* validated with OptsVisitor */
53 static int have_memdevs
;
54 bool numa_uses_legacy_mem(void)
60 static int max_numa_nodeid
; /* Highest specified NUMA node ID, plus one.
61 * For all nodes, nodeid < max_numa_nodeid
64 static void parse_numa_node(MachineState
*ms
, NumaNodeOptions
*node
,
69 uint16List
*cpus
= NULL
;
70 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
71 unsigned int max_cpus
= ms
->smp
.max_cpus
;
72 NodeInfo
*numa_info
= ms
->numa_state
->nodes
;
74 if (node
->has_nodeid
) {
75 nodenr
= node
->nodeid
;
77 nodenr
= ms
->numa_state
->num_nodes
;
80 if (nodenr
>= MAX_NODES
) {
81 error_setg(errp
, "Max number of NUMA nodes reached: %"
86 if (numa_info
[nodenr
].present
) {
87 error_setg(errp
, "Duplicate NUMA nodeid: %" PRIu16
, nodenr
);
92 * If not set the initiator, set it to MAX_NODES. And if
93 * HMAT is enabled and this node has no cpus, QEMU will raise error.
95 numa_info
[nodenr
].initiator
= MAX_NODES
;
96 if (node
->has_initiator
) {
97 if (!ms
->numa_state
->hmat_enabled
) {
98 error_setg(errp
, "ACPI Heterogeneous Memory Attribute Table "
99 "(HMAT) is disabled, enable it with -machine hmat=on "
100 "before using any of hmat specific options");
104 if (node
->initiator
>= MAX_NODES
) {
105 error_report("The initiator id %" PRIu16
" expects an integer "
106 "between 0 and %d", node
->initiator
,
111 numa_info
[nodenr
].initiator
= node
->initiator
;
114 for (cpus
= node
->cpus
; cpus
; cpus
= cpus
->next
) {
115 CpuInstanceProperties props
;
116 if (cpus
->value
>= max_cpus
) {
118 "CPU index (%" PRIu16
")"
119 " should be smaller than maxcpus (%d)",
120 cpus
->value
, max_cpus
);
123 props
= mc
->cpu_index_to_instance_props(ms
, cpus
->value
);
124 props
.node_id
= nodenr
;
125 props
.has_node_id
= true;
126 machine_set_cpu_numa_node(ms
, &props
, &err
);
128 error_propagate(errp
, err
);
133 have_memdevs
= have_memdevs
|| node
->memdev
;
134 have_mem
= have_mem
|| node
->has_mem
;
135 if ((node
->has_mem
&& have_memdevs
) || (node
->memdev
&& have_mem
)) {
136 error_setg(errp
, "numa configuration should use either mem= or memdev=,"
137 "mixing both is not allowed");
142 if (!mc
->numa_mem_supported
) {
143 error_setg(errp
, "Parameter -numa node,mem is not supported by this"
145 error_append_hint(errp
, "Use -numa node,memdev instead\n");
149 numa_info
[nodenr
].node_mem
= node
->mem
;
150 if (!qtest_enabled()) {
151 warn_report("Parameter -numa node,mem is deprecated,"
152 " use -numa node,memdev instead");
157 o
= object_resolve_path_type(node
->memdev
, TYPE_MEMORY_BACKEND
, NULL
);
159 error_setg(errp
, "memdev=%s is ambiguous", node
->memdev
);
164 numa_info
[nodenr
].node_mem
= object_property_get_uint(o
, "size", NULL
);
165 numa_info
[nodenr
].node_memdev
= MEMORY_BACKEND(o
);
168 numa_info
[nodenr
].present
= true;
169 max_numa_nodeid
= MAX(max_numa_nodeid
, nodenr
+ 1);
170 ms
->numa_state
->num_nodes
++;
174 void parse_numa_distance(MachineState
*ms
, NumaDistOptions
*dist
, Error
**errp
)
176 uint16_t src
= dist
->src
;
177 uint16_t dst
= dist
->dst
;
178 uint8_t val
= dist
->val
;
179 NodeInfo
*numa_info
= ms
->numa_state
->nodes
;
181 if (src
>= MAX_NODES
|| dst
>= MAX_NODES
) {
182 error_setg(errp
, "Parameter '%s' expects an integer between 0 and %d",
183 src
>= MAX_NODES
? "src" : "dst", MAX_NODES
- 1);
187 if (!numa_info
[src
].present
|| !numa_info
[dst
].present
) {
188 error_setg(errp
, "Source/Destination NUMA node is missing. "
189 "Please use '-numa node' option to declare it first.");
193 if (val
< NUMA_DISTANCE_MIN
) {
194 error_setg(errp
, "NUMA distance (%" PRIu8
") is invalid, "
195 "it shouldn't be less than %d.",
196 val
, NUMA_DISTANCE_MIN
);
200 if (src
== dst
&& val
!= NUMA_DISTANCE_MIN
) {
201 error_setg(errp
, "Local distance of node %d should be %d.",
202 src
, NUMA_DISTANCE_MIN
);
206 numa_info
[src
].distance
[dst
] = val
;
207 ms
->numa_state
->have_numa_distance
= true;
210 void parse_numa_hmat_lb(NumaState
*numa_state
, NumaHmatLBOptions
*node
,
213 int i
, first_bit
, last_bit
;
214 uint64_t max_entry
, temp_base
, bitmap_copy
;
215 NodeInfo
*numa_info
= numa_state
->nodes
;
216 HMAT_LB_Info
*hmat_lb
=
217 numa_state
->hmat_lb
[node
->hierarchy
][node
->data_type
];
218 HMAT_LB_Data lb_data
= {};
219 HMAT_LB_Data
*lb_temp
;
222 if (node
->initiator
> numa_state
->num_nodes
) {
223 error_setg(errp
, "Invalid initiator=%d, it should be less than %d",
224 node
->initiator
, numa_state
->num_nodes
);
227 if (node
->target
> numa_state
->num_nodes
) {
228 error_setg(errp
, "Invalid target=%d, it should be less than %d",
229 node
->target
, numa_state
->num_nodes
);
232 if (!numa_info
[node
->initiator
].has_cpu
) {
233 error_setg(errp
, "Invalid initiator=%d, it isn't an "
234 "initiator proximity domain", node
->initiator
);
237 if (!numa_info
[node
->target
].present
) {
238 error_setg(errp
, "The target=%d should point to an existing node",
244 hmat_lb
= g_malloc0(sizeof(*hmat_lb
));
245 numa_state
->hmat_lb
[node
->hierarchy
][node
->data_type
] = hmat_lb
;
246 hmat_lb
->list
= g_array_new(false, true, sizeof(HMAT_LB_Data
));
248 hmat_lb
->hierarchy
= node
->hierarchy
;
249 hmat_lb
->data_type
= node
->data_type
;
250 lb_data
.initiator
= node
->initiator
;
251 lb_data
.target
= node
->target
;
253 if (node
->data_type
<= HMATLB_DATA_TYPE_WRITE_LATENCY
) {
254 /* Input latency data */
256 if (!node
->has_latency
) {
257 error_setg(errp
, "Missing 'latency' option");
260 if (node
->has_bandwidth
) {
261 error_setg(errp
, "Invalid option 'bandwidth' since "
262 "the data type is latency");
266 /* Detect duplicate configuration */
267 for (i
= 0; i
< hmat_lb
->list
->len
; i
++) {
268 lb_temp
= &g_array_index(hmat_lb
->list
, HMAT_LB_Data
, i
);
270 if (node
->initiator
== lb_temp
->initiator
&&
271 node
->target
== lb_temp
->target
) {
272 error_setg(errp
, "Duplicate configuration of the latency for "
273 "initiator=%d and target=%d", node
->initiator
,
279 hmat_lb
->base
= hmat_lb
->base
? hmat_lb
->base
: UINT64_MAX
;
282 /* Calculate the temporary base and compressed latency */
283 max_entry
= node
->latency
;
285 while (QEMU_IS_ALIGNED(max_entry
, 10)) {
290 /* Calculate the max compressed latency */
291 temp_base
= MIN(hmat_lb
->base
, temp_base
);
292 max_entry
= node
->latency
/ hmat_lb
->base
;
293 max_entry
= MAX(hmat_lb
->range_bitmap
, max_entry
);
296 * For latency hmat_lb->range_bitmap record the max compressed
297 * latency which should be less than 0xFFFF (UINT16_MAX)
299 if (max_entry
>= UINT16_MAX
) {
300 error_setg(errp
, "Latency %" PRIu64
" between initiator=%d and "
301 "target=%d should not differ from previously entered "
302 "min or max values on more than %d", node
->latency
,
303 node
->initiator
, node
->target
, UINT16_MAX
- 1);
306 hmat_lb
->base
= temp_base
;
307 hmat_lb
->range_bitmap
= max_entry
;
311 * Set lb_info_provided bit 0 as 1,
312 * latency information is provided
314 numa_info
[node
->target
].lb_info_provided
|= BIT(0);
316 lb_data
.data
= node
->latency
;
317 } else if (node
->data_type
>= HMATLB_DATA_TYPE_ACCESS_BANDWIDTH
) {
318 /* Input bandwidth data */
319 if (!node
->has_bandwidth
) {
320 error_setg(errp
, "Missing 'bandwidth' option");
323 if (node
->has_latency
) {
324 error_setg(errp
, "Invalid option 'latency' since "
325 "the data type is bandwidth");
328 if (!QEMU_IS_ALIGNED(node
->bandwidth
, MiB
)) {
329 error_setg(errp
, "Bandwidth %" PRIu64
" between initiator=%d and "
330 "target=%d should be 1MB aligned", node
->bandwidth
,
331 node
->initiator
, node
->target
);
335 /* Detect duplicate configuration */
336 for (i
= 0; i
< hmat_lb
->list
->len
; i
++) {
337 lb_temp
= &g_array_index(hmat_lb
->list
, HMAT_LB_Data
, i
);
339 if (node
->initiator
== lb_temp
->initiator
&&
340 node
->target
== lb_temp
->target
) {
341 error_setg(errp
, "Duplicate configuration of the bandwidth for "
342 "initiator=%d and target=%d", node
->initiator
,
348 hmat_lb
->base
= hmat_lb
->base
? hmat_lb
->base
: 1;
350 if (node
->bandwidth
) {
351 /* Keep bitmap unchanged when bandwidth out of range */
352 bitmap_copy
= hmat_lb
->range_bitmap
;
353 bitmap_copy
|= node
->bandwidth
;
354 first_bit
= ctz64(bitmap_copy
);
355 temp_base
= UINT64_C(1) << first_bit
;
356 max_entry
= node
->bandwidth
/ temp_base
;
357 last_bit
= 64 - clz64(bitmap_copy
);
360 * For bandwidth, first_bit record the base unit of bandwidth bits,
361 * last_bit record the last bit of the max bandwidth. The max
362 * compressed bandwidth should be less than 0xFFFF (UINT16_MAX)
364 if ((last_bit
- first_bit
) > UINT16_BITS
||
365 max_entry
>= UINT16_MAX
) {
366 error_setg(errp
, "Bandwidth %" PRIu64
" between initiator=%d "
367 "and target=%d should not differ from previously "
368 "entered values on more than %d", node
->bandwidth
,
369 node
->initiator
, node
->target
, UINT16_MAX
- 1);
372 hmat_lb
->base
= temp_base
;
373 hmat_lb
->range_bitmap
= bitmap_copy
;
377 * Set lb_info_provided bit 1 as 1,
378 * bandwidth information is provided
380 numa_info
[node
->target
].lb_info_provided
|= BIT(1);
382 lb_data
.data
= node
->bandwidth
;
387 g_array_append_val(hmat_lb
->list
, lb_data
);
390 void parse_numa_hmat_cache(MachineState
*ms
, NumaHmatCacheOptions
*node
,
393 int nb_numa_nodes
= ms
->numa_state
->num_nodes
;
394 NodeInfo
*numa_info
= ms
->numa_state
->nodes
;
395 NumaHmatCacheOptions
*hmat_cache
= NULL
;
397 if (node
->node_id
>= nb_numa_nodes
) {
398 error_setg(errp
, "Invalid node-id=%" PRIu32
", it should be less "
399 "than %d", node
->node_id
, nb_numa_nodes
);
403 if (numa_info
[node
->node_id
].lb_info_provided
!= (BIT(0) | BIT(1))) {
404 error_setg(errp
, "The latency and bandwidth information of "
405 "node-id=%" PRIu32
" should be provided before memory side "
406 "cache attributes", node
->node_id
);
410 if (node
->level
< 1 || node
->level
>= HMAT_LB_LEVELS
) {
411 error_setg(errp
, "Invalid level=%" PRIu8
", it should be larger than 0 "
412 "and less than or equal to %d", node
->level
,
417 assert(node
->associativity
< HMAT_CACHE_ASSOCIATIVITY__MAX
);
418 assert(node
->policy
< HMAT_CACHE_WRITE_POLICY__MAX
);
419 if (ms
->numa_state
->hmat_cache
[node
->node_id
][node
->level
]) {
420 error_setg(errp
, "Duplicate configuration of the side cache for "
421 "node-id=%" PRIu32
" and level=%" PRIu8
,
422 node
->node_id
, node
->level
);
426 if ((node
->level
> 1) &&
427 ms
->numa_state
->hmat_cache
[node
->node_id
][node
->level
- 1] == NULL
) {
428 error_setg(errp
, "Cache level=%u shall be defined first",
433 if ((node
->level
> 1) &&
435 ms
->numa_state
->hmat_cache
[node
->node_id
][node
->level
- 1]->size
)) {
436 error_setg(errp
, "Invalid size=%" PRIu64
", the size of level=%" PRIu8
437 " should be larger than the size(%" PRIu64
") of "
438 "level=%u", node
->size
, node
->level
,
439 ms
->numa_state
->hmat_cache
[node
->node_id
]
440 [node
->level
- 1]->size
,
445 if ((node
->level
< HMAT_LB_LEVELS
- 1) &&
446 ms
->numa_state
->hmat_cache
[node
->node_id
][node
->level
+ 1] &&
448 ms
->numa_state
->hmat_cache
[node
->node_id
][node
->level
+ 1]->size
)) {
449 error_setg(errp
, "Invalid size=%" PRIu64
", the size of level=%" PRIu8
450 " should be less than the size(%" PRIu64
") of "
451 "level=%u", node
->size
, node
->level
,
452 ms
->numa_state
->hmat_cache
[node
->node_id
]
453 [node
->level
+ 1]->size
,
458 hmat_cache
= g_malloc0(sizeof(*hmat_cache
));
459 memcpy(hmat_cache
, node
, sizeof(*hmat_cache
));
460 ms
->numa_state
->hmat_cache
[node
->node_id
][node
->level
] = hmat_cache
;
463 void set_numa_options(MachineState
*ms
, NumaOptions
*object
, Error
**errp
)
465 if (!ms
->numa_state
) {
466 error_setg(errp
, "NUMA is not supported by this machine-type");
470 switch (object
->type
) {
471 case NUMA_OPTIONS_TYPE_NODE
:
472 parse_numa_node(ms
, &object
->u
.node
, errp
);
474 case NUMA_OPTIONS_TYPE_DIST
:
475 parse_numa_distance(ms
, &object
->u
.dist
, errp
);
477 case NUMA_OPTIONS_TYPE_CPU
:
478 if (!object
->u
.cpu
.has_node_id
) {
479 error_setg(errp
, "Missing mandatory node-id property");
482 if (!ms
->numa_state
->nodes
[object
->u
.cpu
.node_id
].present
) {
483 error_setg(errp
, "Invalid node-id=%" PRId64
", NUMA node must be "
484 "defined with -numa node,nodeid=ID before it's used with "
485 "-numa cpu,node-id=ID", object
->u
.cpu
.node_id
);
489 machine_set_cpu_numa_node(ms
,
490 qapi_NumaCpuOptions_base(&object
->u
.cpu
),
493 case NUMA_OPTIONS_TYPE_HMAT_LB
:
494 if (!ms
->numa_state
->hmat_enabled
) {
495 error_setg(errp
, "ACPI Heterogeneous Memory Attribute Table "
496 "(HMAT) is disabled, enable it with -machine hmat=on "
497 "before using any of hmat specific options");
501 parse_numa_hmat_lb(ms
->numa_state
, &object
->u
.hmat_lb
, errp
);
503 case NUMA_OPTIONS_TYPE_HMAT_CACHE
:
504 if (!ms
->numa_state
->hmat_enabled
) {
505 error_setg(errp
, "ACPI Heterogeneous Memory Attribute Table "
506 "(HMAT) is disabled, enable it with -machine hmat=on "
507 "before using any of hmat specific options");
511 parse_numa_hmat_cache(ms
, &object
->u
.hmat_cache
, errp
);
518 static int parse_numa(void *opaque
, QemuOpts
*opts
, Error
**errp
)
520 NumaOptions
*object
= NULL
;
521 MachineState
*ms
= MACHINE(opaque
);
523 Visitor
*v
= opts_visitor_new(opts
);
525 visit_type_NumaOptions(v
, NULL
, &object
, errp
);
531 /* Fix up legacy suffix-less format */
532 if ((object
->type
== NUMA_OPTIONS_TYPE_NODE
) && object
->u
.node
.has_mem
) {
533 const char *mem_str
= qemu_opt_get(opts
, "mem");
534 qemu_strtosz_MiB(mem_str
, NULL
, &object
->u
.node
.mem
);
537 set_numa_options(ms
, object
, &err
);
539 qapi_free_NumaOptions(object
);
541 error_propagate(errp
, err
);
548 /* If all node pair distances are symmetric, then only distances
549 * in one direction are enough. If there is even one asymmetric
550 * pair, though, then all distances must be provided. The
551 * distance from a node to itself is always NUMA_DISTANCE_MIN,
552 * so providing it is never necessary.
554 static void validate_numa_distance(MachineState
*ms
)
557 bool is_asymmetrical
= false;
558 int nb_numa_nodes
= ms
->numa_state
->num_nodes
;
559 NodeInfo
*numa_info
= ms
->numa_state
->nodes
;
561 for (src
= 0; src
< nb_numa_nodes
; src
++) {
562 for (dst
= src
; dst
< nb_numa_nodes
; dst
++) {
563 if (numa_info
[src
].distance
[dst
] == 0 &&
564 numa_info
[dst
].distance
[src
] == 0) {
566 error_report("The distance between node %d and %d is "
567 "missing, at least one distance value "
568 "between each nodes should be provided.",
574 if (numa_info
[src
].distance
[dst
] != 0 &&
575 numa_info
[dst
].distance
[src
] != 0 &&
576 numa_info
[src
].distance
[dst
] !=
577 numa_info
[dst
].distance
[src
]) {
578 is_asymmetrical
= true;
583 if (is_asymmetrical
) {
584 for (src
= 0; src
< nb_numa_nodes
; src
++) {
585 for (dst
= 0; dst
< nb_numa_nodes
; dst
++) {
586 if (src
!= dst
&& numa_info
[src
].distance
[dst
] == 0) {
587 error_report("At least one asymmetrical pair of "
588 "distances is given, please provide distances "
589 "for both directions of all node pairs.");
597 static void complete_init_numa_distance(MachineState
*ms
)
600 NodeInfo
*numa_info
= ms
->numa_state
->nodes
;
602 /* Fixup NUMA distance by symmetric policy because if it is an
603 * asymmetric distance table, it should be a complete table and
604 * there would not be any missing distance except local node, which
605 * is verified by validate_numa_distance above.
607 for (src
= 0; src
< ms
->numa_state
->num_nodes
; src
++) {
608 for (dst
= 0; dst
< ms
->numa_state
->num_nodes
; dst
++) {
609 if (numa_info
[src
].distance
[dst
] == 0) {
611 numa_info
[src
].distance
[dst
] = NUMA_DISTANCE_MIN
;
613 numa_info
[src
].distance
[dst
] = numa_info
[dst
].distance
[src
];
620 static void numa_init_memdev_container(MachineState
*ms
, MemoryRegion
*ram
)
625 for (i
= 0; i
< ms
->numa_state
->num_nodes
; i
++) {
626 uint64_t size
= ms
->numa_state
->nodes
[i
].node_mem
;
627 HostMemoryBackend
*backend
= ms
->numa_state
->nodes
[i
].node_memdev
;
631 MemoryRegion
*seg
= machine_consume_memdev(ms
, backend
);
632 memory_region_add_subregion(ram
, addr
, seg
);
637 void numa_complete_configuration(MachineState
*ms
)
640 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
641 NodeInfo
*numa_info
= ms
->numa_state
->nodes
;
644 * If memory hotplug is enabled (slot > 0) or memory devices are enabled
645 * (ms->maxram_size > ms->ram_size) but without '-numa' options explicitly on
646 * CLI, guests will break.
648 * Windows: won't enable memory hotplug without SRAT table at all
650 * Linux: if QEMU is started with initial memory all below 4Gb
651 * and no SRAT table present, guest kernel will use nommu DMA ops,
652 * which breaks 32bit hw drivers when memory is hotplugged and
653 * guest tries to use it with that drivers.
655 * Enable NUMA implicitly by adding a new NUMA node automatically.
657 * Or if MachineClass::auto_enable_numa is true and no NUMA nodes,
658 * assume there is just one node with whole RAM.
660 if (ms
->numa_state
->num_nodes
== 0 &&
661 ((ms
->ram_slots
&& mc
->auto_enable_numa_with_memhp
) ||
662 (ms
->maxram_size
> ms
->ram_size
&& mc
->auto_enable_numa_with_memdev
) ||
663 mc
->auto_enable_numa
)) {
664 NumaNodeOptions node
= { };
665 parse_numa_node(ms
, &node
, &error_abort
);
666 numa_info
[0].node_mem
= ms
->ram_size
;
669 assert(max_numa_nodeid
<= MAX_NODES
);
671 /* No support for sparse NUMA node IDs yet: */
672 for (i
= max_numa_nodeid
- 1; i
>= 0; i
--) {
673 /* Report large node IDs first, to make mistakes easier to spot */
674 if (!numa_info
[i
].present
) {
675 error_report("numa: Node ID missing: %d", i
);
680 /* This must be always true if all nodes are present: */
681 assert(ms
->numa_state
->num_nodes
== max_numa_nodeid
);
683 if (ms
->numa_state
->num_nodes
> 0) {
687 for (i
= 0; i
< ms
->numa_state
->num_nodes
; i
++) {
688 numa_total
+= numa_info
[i
].node_mem
;
690 if (numa_total
!= ms
->ram_size
) {
691 error_report("total memory for NUMA nodes (0x%" PRIx64
")"
692 " should equal RAM size (0x" RAM_ADDR_FMT
")",
693 numa_total
, ms
->ram_size
);
697 if (!numa_uses_legacy_mem() && mc
->default_ram_id
) {
699 error_report("'-machine memory-backend' and '-numa memdev'"
700 " properties are mutually exclusive");
703 ms
->ram
= g_new(MemoryRegion
, 1);
704 memory_region_init(ms
->ram
, OBJECT(ms
), mc
->default_ram_id
,
706 numa_init_memdev_container(ms
, ms
->ram
);
708 /* QEMU needs at least all unique node pair distances to build
709 * the whole NUMA distance table. QEMU treats the distance table
710 * as symmetric by default, i.e. distance A->B == distance B->A.
711 * Thus, QEMU is able to complete the distance table
712 * initialization even though only distance A->B is provided and
713 * distance B->A is not. QEMU knows the distance of a node to
714 * itself is always 10, so A->A distances may be omitted. When
715 * the distances of two nodes of a pair differ, i.e. distance
716 * A->B != distance B->A, then that means the distance table is
717 * asymmetric. In this case, the distances for both directions
718 * of all node pairs are required.
720 if (ms
->numa_state
->have_numa_distance
) {
721 /* Validate enough NUMA distance information was provided. */
722 validate_numa_distance(ms
);
724 /* Validation succeeded, now fill in any missing distances. */
725 complete_init_numa_distance(ms
);
730 void parse_numa_opts(MachineState
*ms
)
732 qemu_opts_foreach(qemu_find_opts("numa"), parse_numa
, ms
, &error_fatal
);
735 void numa_cpu_pre_plug(const CPUArchId
*slot
, DeviceState
*dev
, Error
**errp
)
737 int node_id
= object_property_get_int(OBJECT(dev
), "node-id", &error_abort
);
739 if (node_id
== CPU_UNSET_NUMA_NODE_ID
) {
740 /* due to bug in libvirt, it doesn't pass node-id from props on
741 * device_add as expected, so we have to fix it up here */
742 if (slot
->props
.has_node_id
) {
743 object_property_set_int(OBJECT(dev
), "node-id",
744 slot
->props
.node_id
, errp
);
746 } else if (node_id
!= slot
->props
.node_id
) {
747 error_setg(errp
, "invalid node-id, must be %"PRId64
,
748 slot
->props
.node_id
);
752 static void numa_stat_memory_devices(NumaNodeMem node_mem
[])
754 MemoryDeviceInfoList
*info_list
= qmp_memory_device_list();
755 MemoryDeviceInfoList
*info
;
756 PCDIMMDeviceInfo
*pcdimm_info
;
757 VirtioPMEMDeviceInfo
*vpi
;
758 VirtioMEMDeviceInfo
*vmi
;
759 SgxEPCDeviceInfo
*se
;
761 for (info
= info_list
; info
; info
= info
->next
) {
762 MemoryDeviceInfo
*value
= info
->value
;
765 switch (value
->type
) {
766 case MEMORY_DEVICE_INFO_KIND_DIMM
:
767 case MEMORY_DEVICE_INFO_KIND_NVDIMM
:
768 pcdimm_info
= value
->type
== MEMORY_DEVICE_INFO_KIND_DIMM
?
769 value
->u
.dimm
.data
: value
->u
.nvdimm
.data
;
770 node_mem
[pcdimm_info
->node
].node_mem
+= pcdimm_info
->size
;
771 node_mem
[pcdimm_info
->node
].node_plugged_mem
+=
774 case MEMORY_DEVICE_INFO_KIND_VIRTIO_PMEM
:
775 vpi
= value
->u
.virtio_pmem
.data
;
776 /* TODO: once we support numa, assign to right node */
777 node_mem
[0].node_mem
+= vpi
->size
;
778 node_mem
[0].node_plugged_mem
+= vpi
->size
;
780 case MEMORY_DEVICE_INFO_KIND_VIRTIO_MEM
:
781 vmi
= value
->u
.virtio_mem
.data
;
782 node_mem
[vmi
->node
].node_mem
+= vmi
->size
;
783 node_mem
[vmi
->node
].node_plugged_mem
+= vmi
->size
;
785 case MEMORY_DEVICE_INFO_KIND_SGX_EPC
:
786 se
= value
->u
.sgx_epc
.data
;
787 node_mem
[se
->node
].node_mem
+= se
->size
;
788 node_mem
[se
->node
].node_plugged_mem
= 0;
791 g_assert_not_reached();
795 qapi_free_MemoryDeviceInfoList(info_list
);
798 void query_numa_node_mem(NumaNodeMem node_mem
[], MachineState
*ms
)
802 if (ms
->numa_state
== NULL
|| ms
->numa_state
->num_nodes
<= 0) {
806 numa_stat_memory_devices(node_mem
);
807 for (i
= 0; i
< ms
->numa_state
->num_nodes
; i
++) {
808 node_mem
[i
].node_mem
+= ms
->numa_state
->nodes
[i
].node_mem
;
812 static int ram_block_notify_add_single(RAMBlock
*rb
, void *opaque
)
814 const ram_addr_t max_size
= qemu_ram_get_max_length(rb
);
815 const ram_addr_t size
= qemu_ram_get_used_length(rb
);
816 void *host
= qemu_ram_get_host_addr(rb
);
817 RAMBlockNotifier
*notifier
= opaque
;
820 notifier
->ram_block_added(notifier
, host
, size
, max_size
);
825 static int ram_block_notify_remove_single(RAMBlock
*rb
, void *opaque
)
827 const ram_addr_t max_size
= qemu_ram_get_max_length(rb
);
828 const ram_addr_t size
= qemu_ram_get_used_length(rb
);
829 void *host
= qemu_ram_get_host_addr(rb
);
830 RAMBlockNotifier
*notifier
= opaque
;
833 notifier
->ram_block_removed(notifier
, host
, size
, max_size
);
838 void ram_block_notifier_add(RAMBlockNotifier
*n
)
840 QLIST_INSERT_HEAD(&ram_list
.ramblock_notifiers
, n
, next
);
842 /* Notify about all existing ram blocks. */
843 if (n
->ram_block_added
) {
844 qemu_ram_foreach_block(ram_block_notify_add_single
, n
);
848 void ram_block_notifier_remove(RAMBlockNotifier
*n
)
850 QLIST_REMOVE(n
, next
);
852 if (n
->ram_block_removed
) {
853 qemu_ram_foreach_block(ram_block_notify_remove_single
, n
);
857 void ram_block_notify_add(void *host
, size_t size
, size_t max_size
)
859 RAMBlockNotifier
*notifier
;
860 RAMBlockNotifier
*next
;
862 QLIST_FOREACH_SAFE(notifier
, &ram_list
.ramblock_notifiers
, next
, next
) {
863 if (notifier
->ram_block_added
) {
864 notifier
->ram_block_added(notifier
, host
, size
, max_size
);
869 void ram_block_notify_remove(void *host
, size_t size
, size_t max_size
)
871 RAMBlockNotifier
*notifier
;
872 RAMBlockNotifier
*next
;
874 QLIST_FOREACH_SAFE(notifier
, &ram_list
.ramblock_notifiers
, next
, next
) {
875 if (notifier
->ram_block_removed
) {
876 notifier
->ram_block_removed(notifier
, host
, size
, max_size
);
881 void ram_block_notify_resize(void *host
, size_t old_size
, size_t new_size
)
883 RAMBlockNotifier
*notifier
;
884 RAMBlockNotifier
*next
;
886 QLIST_FOREACH_SAFE(notifier
, &ram_list
.ramblock_notifiers
, next
, next
) {
887 if (notifier
->ram_block_resized
) {
888 notifier
->ram_block_resized(notifier
, host
, old_size
, new_size
);