Merge tag 'nvme-next-pull-request' of https://gitlab.com/birkelund/qemu into staging
[qemu/ar7.git] / util / selfmap.c
blob483cb617e219cc0a675dcc2f862d443c5a5a2d6f
1 /*
2 * Utility function to get QEMU's own process map
4 * Copyright (c) 2020 Linaro Ltd
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
9 #include "qemu/osdep.h"
10 #include "qemu/cutils.h"
11 #include "qemu/selfmap.h"
13 IntervalTreeRoot *read_self_maps(void)
15 IntervalTreeRoot *root;
16 gchar *maps, **lines;
17 guint i, nlines;
19 if (!g_file_get_contents("/proc/self/maps", &maps, NULL, NULL)) {
20 return NULL;
23 root = g_new0(IntervalTreeRoot, 1);
24 lines = g_strsplit(maps, "\n", 0);
25 nlines = g_strv_length(lines);
27 for (i = 0; i < nlines; i++) {
28 gchar **fields = g_strsplit(lines[i], " ", 6);
29 guint nfields = g_strv_length(fields);
31 if (nfields > 4) {
32 uint64_t start, end, offset, inode;
33 unsigned dev_maj, dev_min;
34 int errors = 0;
35 const char *p;
37 errors |= qemu_strtou64(fields[0], &p, 16, &start);
38 errors |= qemu_strtou64(p + 1, NULL, 16, &end);
39 errors |= qemu_strtou64(fields[2], NULL, 16, &offset);
40 errors |= qemu_strtoui(fields[3], &p, 16, &dev_maj);
41 errors |= qemu_strtoui(p + 1, NULL, 16, &dev_min);
42 errors |= qemu_strtou64(fields[4], NULL, 10, &inode);
44 if (!errors) {
45 size_t path_len;
46 MapInfo *e;
48 if (nfields == 6) {
49 p = fields[5];
50 p += strspn(p, " ");
51 path_len = strlen(p) + 1;
52 } else {
53 p = NULL;
54 path_len = 0;
57 e = g_malloc0(sizeof(*e) + path_len);
59 e->itree.start = start;
60 e->itree.last = end - 1;
61 e->offset = offset;
62 e->dev = makedev(dev_maj, dev_min);
63 e->inode = inode;
65 e->is_read = fields[1][0] == 'r';
66 e->is_write = fields[1][1] == 'w';
67 e->is_exec = fields[1][2] == 'x';
68 e->is_priv = fields[1][3] == 'p';
70 if (path_len) {
71 e->path = memcpy(e + 1, p, path_len);
74 interval_tree_insert(&e->itree, root);
77 g_strfreev(fields);
79 g_strfreev(lines);
80 g_free(maps);
82 return root;
85 /**
86 * free_self_maps:
87 * @root: an interval tree
89 * Free a tree of MapInfo structures.
90 * Since we allocated each MapInfo in one chunk, we need not consider the
91 * contents and can simply free each RBNode.
94 static void free_rbnode(RBNode *n)
96 if (n) {
97 free_rbnode(n->rb_left);
98 free_rbnode(n->rb_right);
99 g_free(n);
103 void free_self_maps(IntervalTreeRoot *root)
105 if (root) {
106 free_rbnode(root->rb_root.rb_node);
107 g_free(root);