2 * QEMU RISC-V NUMA Helper
4 * Copyright (c) 2020 Western Digital Corporation or its affiliates.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/units.h"
21 #include "qemu/error-report.h"
22 #include "qapi/error.h"
23 #include "hw/boards.h"
24 #include "hw/qdev-properties.h"
25 #include "hw/riscv/numa.h"
26 #include "sysemu/device_tree.h"
28 static bool numa_enabled(const MachineState
*ms
)
30 return (ms
->numa_state
&& ms
->numa_state
->num_nodes
) ? true : false;
33 int riscv_socket_count(const MachineState
*ms
)
35 return (numa_enabled(ms
)) ? ms
->numa_state
->num_nodes
: 1;
38 int riscv_socket_first_hartid(const MachineState
*ms
, int socket_id
)
40 int i
, first_hartid
= ms
->smp
.cpus
;
42 if (!numa_enabled(ms
)) {
43 return (!socket_id
) ? 0 : -1;
46 for (i
= 0; i
< ms
->smp
.cpus
; i
++) {
47 if (ms
->possible_cpus
->cpus
[i
].props
.node_id
!= socket_id
) {
50 if (i
< first_hartid
) {
55 return (first_hartid
< ms
->smp
.cpus
) ? first_hartid
: -1;
58 int riscv_socket_last_hartid(const MachineState
*ms
, int socket_id
)
60 int i
, last_hartid
= -1;
62 if (!numa_enabled(ms
)) {
63 return (!socket_id
) ? ms
->smp
.cpus
- 1 : -1;
66 for (i
= 0; i
< ms
->smp
.cpus
; i
++) {
67 if (ms
->possible_cpus
->cpus
[i
].props
.node_id
!= socket_id
) {
70 if (i
> last_hartid
) {
75 return (last_hartid
< ms
->smp
.cpus
) ? last_hartid
: -1;
78 int riscv_socket_hart_count(const MachineState
*ms
, int socket_id
)
80 int first_hartid
, last_hartid
;
82 if (!numa_enabled(ms
)) {
83 return (!socket_id
) ? ms
->smp
.cpus
: -1;
86 first_hartid
= riscv_socket_first_hartid(ms
, socket_id
);
87 if (first_hartid
< 0) {
91 last_hartid
= riscv_socket_last_hartid(ms
, socket_id
);
92 if (last_hartid
< 0) {
96 if (first_hartid
> last_hartid
) {
100 return last_hartid
- first_hartid
+ 1;
103 bool riscv_socket_check_hartids(const MachineState
*ms
, int socket_id
)
105 int i
, first_hartid
, last_hartid
;
107 if (!numa_enabled(ms
)) {
108 return (!socket_id
) ? true : false;
111 first_hartid
= riscv_socket_first_hartid(ms
, socket_id
);
112 if (first_hartid
< 0) {
116 last_hartid
= riscv_socket_last_hartid(ms
, socket_id
);
117 if (last_hartid
< 0) {
121 for (i
= first_hartid
; i
<= last_hartid
; i
++) {
122 if (ms
->possible_cpus
->cpus
[i
].props
.node_id
!= socket_id
) {
130 uint64_t riscv_socket_mem_offset(const MachineState
*ms
, int socket_id
)
133 uint64_t mem_offset
= 0;
135 if (!numa_enabled(ms
)) {
139 for (i
= 0; i
< ms
->numa_state
->num_nodes
; i
++) {
140 if (i
== socket_id
) {
143 mem_offset
+= ms
->numa_state
->nodes
[i
].node_mem
;
146 return (i
== socket_id
) ? mem_offset
: 0;
149 uint64_t riscv_socket_mem_size(const MachineState
*ms
, int socket_id
)
151 if (!numa_enabled(ms
)) {
152 return (!socket_id
) ? ms
->ram_size
: 0;
155 return (socket_id
< ms
->numa_state
->num_nodes
) ?
156 ms
->numa_state
->nodes
[socket_id
].node_mem
: 0;
159 void riscv_socket_fdt_write_id(const MachineState
*ms
, void *fdt
,
160 const char *node_name
, int socket_id
)
162 if (numa_enabled(ms
)) {
163 qemu_fdt_setprop_cell(fdt
, node_name
, "numa-node-id", socket_id
);
167 void riscv_socket_fdt_write_distance_matrix(const MachineState
*ms
, void *fdt
)
170 uint32_t *dist_matrix
, dist_matrix_size
;
172 if (numa_enabled(ms
) && ms
->numa_state
->have_numa_distance
) {
173 dist_matrix_size
= riscv_socket_count(ms
) * riscv_socket_count(ms
);
174 dist_matrix_size
*= (3 * sizeof(uint32_t));
175 dist_matrix
= g_malloc0(dist_matrix_size
);
177 for (i
= 0; i
< riscv_socket_count(ms
); i
++) {
178 for (j
= 0; j
< riscv_socket_count(ms
); j
++) {
179 idx
= (i
* riscv_socket_count(ms
) + j
) * 3;
180 dist_matrix
[idx
+ 0] = cpu_to_be32(i
);
181 dist_matrix
[idx
+ 1] = cpu_to_be32(j
);
182 dist_matrix
[idx
+ 2] =
183 cpu_to_be32(ms
->numa_state
->nodes
[i
].distance
[j
]);
187 qemu_fdt_add_subnode(fdt
, "/distance-map");
188 qemu_fdt_setprop_string(fdt
, "/distance-map", "compatible",
189 "numa-distance-map-v1");
190 qemu_fdt_setprop(fdt
, "/distance-map", "distance-matrix",
191 dist_matrix
, dist_matrix_size
);
196 CpuInstanceProperties
197 riscv_numa_cpu_index_to_props(MachineState
*ms
, unsigned cpu_index
)
199 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
200 const CPUArchIdList
*possible_cpus
= mc
->possible_cpu_arch_ids(ms
);
202 assert(cpu_index
< possible_cpus
->len
);
203 return possible_cpus
->cpus
[cpu_index
].props
;
206 int64_t riscv_numa_get_default_cpu_node_id(const MachineState
*ms
, int idx
)
210 if (ms
->numa_state
->num_nodes
) {
211 nidx
= idx
/ (ms
->smp
.cpus
/ ms
->numa_state
->num_nodes
);
212 if (ms
->numa_state
->num_nodes
<= nidx
) {
213 nidx
= ms
->numa_state
->num_nodes
- 1;
220 const CPUArchIdList
*riscv_numa_possible_cpu_arch_ids(MachineState
*ms
)
223 unsigned int max_cpus
= ms
->smp
.max_cpus
;
225 if (ms
->possible_cpus
) {
226 assert(ms
->possible_cpus
->len
== max_cpus
);
227 return ms
->possible_cpus
;
230 ms
->possible_cpus
= g_malloc0(sizeof(CPUArchIdList
) +
231 sizeof(CPUArchId
) * max_cpus
);
232 ms
->possible_cpus
->len
= max_cpus
;
233 for (n
= 0; n
< ms
->possible_cpus
->len
; n
++) {
234 ms
->possible_cpus
->cpus
[n
].type
= ms
->cpu_type
;
235 ms
->possible_cpus
->cpus
[n
].arch_id
= n
;
236 ms
->possible_cpus
->cpus
[n
].props
.has_core_id
= true;
237 ms
->possible_cpus
->cpus
[n
].props
.core_id
= n
;
240 return ms
->possible_cpus
;