Merge tag 'v9.1.0'
[qemu/ar7.git] / target / i386 / kvm / vmsr_energy.c
blob7e064c5aef30273daf6b9e9e5fbdcd9ef7a58392
1 /*
2 * QEMU KVM support -- x86 virtual RAPL msr
4 * Copyright 2024 Red Hat, Inc. 2024
6 * Author:
7 * Anthony Harivel <aharivel@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/error-report.h"
16 #include "vmsr_energy.h"
17 #include "io/channel.h"
18 #include "io/channel-socket.h"
19 #include "hw/boards.h"
20 #include "cpu.h"
21 #include "host-cpu.h"
23 char *vmsr_compute_default_paths(void)
25 g_autofree char *state = qemu_get_local_state_dir();
27 return g_build_filename(state, "run", "qemu-vmsr-helper.sock", NULL);
30 bool is_host_cpu_intel(void)
32 int family, model, stepping;
33 char vendor[CPUID_VENDOR_SZ + 1];
35 host_cpu_vendor_fms(vendor, &family, &model, &stepping);
37 return strcmp(vendor, CPUID_VENDOR_INTEL);
40 int is_rapl_enabled(void)
42 const char *path = "/sys/class/powercap/intel-rapl/enabled";
43 FILE *file = fopen(path, "r");
44 int value = 0;
46 if (file != NULL) {
47 if (fscanf(file, "%d", &value) != 1) {
48 error_report("INTEL RAPL not enabled");
50 fclose(file);
51 } else {
52 error_report("Error opening %s", path);
55 return value;
58 QIOChannelSocket *vmsr_open_socket(const char *path)
60 g_autofree char *socket_path = NULL;
62 socket_path = g_strdup(path);
64 SocketAddress saddr = {
65 .type = SOCKET_ADDRESS_TYPE_UNIX,
66 .u.q_unix.path = socket_path
69 QIOChannelSocket *sioc = qio_channel_socket_new();
70 Error *local_err = NULL;
72 qio_channel_set_name(QIO_CHANNEL(sioc), "vmsr-helper");
73 qio_channel_socket_connect_sync(sioc,
74 &saddr,
75 &local_err);
76 if (local_err) {
77 /* Close socket. */
78 qio_channel_close(QIO_CHANNEL(sioc), NULL);
79 object_unref(OBJECT(sioc));
80 sioc = NULL;
81 goto out;
84 qio_channel_set_delay(QIO_CHANNEL(sioc), false);
85 out:
86 return sioc;
89 uint64_t vmsr_read_msr(uint32_t reg, uint32_t cpu_id, uint32_t tid,
90 QIOChannelSocket *sioc)
92 uint64_t data = 0;
93 int r = 0;
94 Error *local_err = NULL;
95 uint32_t buffer[3];
97 * Send the required arguments:
98 * 1. RAPL MSR register to read
99 * 2. On which CPU ID
100 * 3. From which vCPU (Thread ID)
102 buffer[0] = reg;
103 buffer[1] = cpu_id;
104 buffer[2] = tid;
106 r = qio_channel_write_all(QIO_CHANNEL(sioc),
107 (char *)buffer, sizeof(buffer),
108 &local_err);
109 if (r < 0) {
110 goto out_close;
113 r = qio_channel_read(QIO_CHANNEL(sioc),
114 (char *)&data, sizeof(data),
115 &local_err);
116 if (r < 0) {
117 data = 0;
118 goto out_close;
121 out_close:
122 return data;
125 /* Retrieve the max number of physical package */
126 unsigned int vmsr_get_max_physical_package(unsigned int max_cpus)
128 const char *dir = "/sys/devices/system/cpu/";
129 const char *topo_path = "topology/physical_package_id";
130 g_autofree int *uniquePackages = g_new0(int, max_cpus);
131 unsigned int packageCount = 0;
132 FILE *file = NULL;
134 for (int i = 0; i < max_cpus; i++) {
135 g_autofree char *filePath = NULL;
136 g_autofree char *cpuid = g_strdup_printf("cpu%d", i);
138 filePath = g_build_filename(dir, cpuid, topo_path, NULL);
140 file = fopen(filePath, "r");
142 if (file == NULL) {
143 error_report("Error opening physical_package_id file");
144 return 0;
147 char packageId[10];
148 if (fgets(packageId, sizeof(packageId), file) == NULL) {
149 packageCount = 0;
152 fclose(file);
154 int currentPackageId = atoi(packageId);
156 bool isUnique = true;
157 for (int j = 0; j < packageCount; j++) {
158 if (uniquePackages[j] == currentPackageId) {
159 isUnique = false;
160 break;
164 if (isUnique) {
165 uniquePackages[packageCount] = currentPackageId;
166 packageCount++;
168 if (packageCount >= max_cpus) {
169 break;
174 return (packageCount == 0) ? 1 : packageCount;
177 /* Retrieve the max number of physical cpu on the host */
178 unsigned int vmsr_get_maxcpus(void)
180 GDir *dir;
181 const gchar *entry_name;
182 unsigned int cpu_count = 0;
183 const char *path = "/sys/devices/system/cpu/";
185 dir = g_dir_open(path, 0, NULL);
186 if (dir == NULL) {
187 error_report("Unable to open cpu directory");
188 return -1;
191 while ((entry_name = g_dir_read_name(dir)) != NULL) {
192 if (g_ascii_strncasecmp(entry_name, "cpu", 3) == 0 &&
193 isdigit(entry_name[3])) {
194 cpu_count++;
198 g_dir_close(dir);
200 return cpu_count;
203 /* Count the number of physical cpu on each packages */
204 unsigned int vmsr_count_cpus_per_package(unsigned int *package_count,
205 unsigned int max_pkgs)
207 g_autofree char *file_contents = NULL;
208 g_autofree char *path = NULL;
209 g_autofree char *path_name = NULL;
210 gsize length;
212 /* Iterate over cpus and count cpus in each package */
213 for (int cpu_id = 0; ; cpu_id++) {
214 path_name = g_strdup_printf("/sys/devices/system/cpu/cpu%d/"
215 "topology/physical_package_id", cpu_id);
217 path = g_build_filename(path_name, NULL);
219 if (!g_file_get_contents(path, &file_contents, &length, NULL)) {
220 break; /* No more cpus */
223 /* Get the physical package ID for this CPU */
224 int package_id = atoi(file_contents);
226 /* Check if the package ID is within the known number of packages */
227 if (package_id >= 0 && package_id < max_pkgs) {
228 /* If yes, count the cpu for this package*/
229 package_count[package_id]++;
233 return 0;
236 /* Get the physical package id from a given cpu id */
237 int vmsr_get_physical_package_id(int cpu_id)
239 g_autofree char *file_contents = NULL;
240 g_autofree char *file_path = NULL;
241 int package_id = -1;
242 gsize length;
244 file_path = g_strdup_printf("/sys/devices/system/cpu/cpu%d"
245 "/topology/physical_package_id", cpu_id);
247 if (!g_file_get_contents(file_path, &file_contents, &length, NULL)) {
248 goto out;
251 package_id = atoi(file_contents);
253 out:
254 return package_id;
257 /* Read the scheduled time for a given thread of a give pid */
258 void vmsr_read_thread_stat(pid_t pid,
259 unsigned int thread_id,
260 unsigned long long *utime,
261 unsigned long long *stime,
262 unsigned int *cpu_id)
264 g_autofree char *path = NULL;
265 g_autofree char *path_name = NULL;
267 path_name = g_strdup_printf("/proc/%u/task/%d/stat", pid, thread_id);
269 path = g_build_filename(path_name, NULL);
271 FILE *file = fopen(path, "r");
272 if (file == NULL) {
273 error_report("Error opening %s", path_name);
274 return;
277 if (fscanf(file, "%*d (%*[^)]) %*c %*d %*d %*d %*d %*d %*u %*u %*u %*u %*u"
278 " %llu %llu %*d %*d %*d %*d %*d %*d %*u %*u %*d %*u %*u"
279 " %*u %*u %*u %*u %*u %*u %*u %*u %*u %*d %*u %*u %u",
280 utime, stime, cpu_id) != 3)
282 fclose(file);
283 error_report("Error fscanf did not report the right amount of items");
284 return;
287 fclose(file);
288 return;
291 /* Read QEMU stat task folder to retrieve all QEMU threads ID */
292 pid_t *vmsr_get_thread_ids(pid_t pid, unsigned int *num_threads)
294 g_autofree char *task_path = g_strdup_printf("%d/task", pid);
295 g_autofree char *path = g_build_filename("/proc", task_path, NULL);
297 DIR *dir = opendir(path);
298 if (dir == NULL) {
299 error_report("Error opening /proc/qemu/task");
300 return NULL;
303 pid_t *thread_ids = NULL;
304 unsigned int thread_count = 0;
306 g_autofree struct dirent *ent = NULL;
307 while ((ent = readdir(dir)) != NULL) {
308 if (ent->d_name[0] == '.') {
309 continue;
311 pid_t tid = atoi(ent->d_name);
312 if (pid != tid) {
313 thread_ids = g_renew(pid_t, thread_ids, (thread_count + 1));
314 thread_ids[thread_count] = tid;
315 thread_count++;
319 closedir(dir);
321 *num_threads = thread_count;
322 return thread_ids;
325 void vmsr_delta_ticks(vmsr_thread_stat *thd_stat, int i)
327 thd_stat[i].delta_ticks = (thd_stat[i].utime[1] + thd_stat[i].stime[1])
328 - (thd_stat[i].utime[0] + thd_stat[i].stime[0]);
331 double vmsr_get_ratio(uint64_t e_delta,
332 unsigned long long delta_ticks,
333 unsigned int maxticks)
335 return (e_delta / 100.0) * ((100.0 / maxticks) * delta_ticks);
338 void vmsr_init_topo_info(X86CPUTopoInfo *topo_info,
339 const MachineState *ms)
341 topo_info->dies_per_pkg = ms->smp.dies;
342 topo_info->modules_per_die = ms->smp.modules;
343 topo_info->cores_per_module = ms->smp.cores;
344 topo_info->threads_per_core = ms->smp.threads;