9 static struct thread
*thread__new(pid_t pid
, int set_comm
)
11 struct thread
*self
= calloc(1, sizeof(*self
));
16 self
->comm
= malloc(32);
18 snprintf(self
->comm
, 32, ":%d", self
->pid
);
21 INIT_LIST_HEAD(&self
->removed_maps
);
27 int thread__set_comm(struct thread
*self
, const char *comm
)
31 self
->comm
= strdup(comm
);
32 return self
->comm
? 0 : -ENOMEM
;
35 static size_t thread__fprintf(struct thread
*self
, FILE *fp
)
39 size_t ret
= fprintf(fp
, "Thread %d %s\nCurrent maps:\n",
40 self
->pid
, self
->comm
);
42 for (nd
= rb_first(&self
->maps
); nd
; nd
= rb_next(nd
)) {
43 pos
= rb_entry(nd
, struct map
, rb_node
);
44 ret
+= map__fprintf(pos
, fp
);
47 ret
= fprintf(fp
, "Removed maps:\n");
49 list_for_each_entry(pos
, &self
->removed_maps
, node
)
50 ret
+= map__fprintf(pos
, fp
);
55 static struct thread
*
56 __threads__findnew(pid_t pid
, struct rb_root
*threads
,
57 struct thread
**last_match
,
60 struct rb_node
**p
= &threads
->rb_node
;
61 struct rb_node
*parent
= NULL
;
65 * Font-end cache - PID lookups come in blocks,
66 * so most of the time we dont have to look up
69 if (*last_match
&& (*last_match
)->pid
== pid
)
74 th
= rb_entry(parent
, struct thread
, rb_node
);
87 th
= thread__new(pid
, set_comm
);
90 rb_link_node(&th
->rb_node
, parent
, p
);
91 rb_insert_color(&th
->rb_node
, threads
);
99 threads__findnew(pid_t pid
, struct rb_root
*threads
, struct thread
**last_match
)
101 return __threads__findnew(pid
, threads
, last_match
, 1);
105 threads__findnew_nocomm(pid_t pid
, struct rb_root
*threads
,
106 struct thread
**last_match
)
108 return __threads__findnew(pid
, threads
, last_match
, 0);
112 register_idle_thread(struct rb_root
*threads
, struct thread
**last_match
)
114 struct thread
*thread
= threads__findnew(0, threads
, last_match
);
116 if (!thread
|| thread__set_comm(thread
, "swapper")) {
117 fprintf(stderr
, "problem inserting idle task.\n");
124 static void thread__remove_overlappings(struct thread
*self
, struct map
*map
)
126 struct rb_node
*next
= rb_first(&self
->maps
);
129 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
130 next
= rb_next(&pos
->rb_node
);
132 if (!map__overlap(pos
, map
))
136 printf("overlapping maps:\n");
137 map__fprintf(map
, stdout
);
138 map__fprintf(pos
, stdout
);
141 rb_erase(&pos
->rb_node
, &self
->maps
);
143 * We may have references to this map, for instance in some
144 * hist_entry instances, so just move them to a separate
147 list_add_tail(&pos
->node
, &self
->removed_maps
);
151 void maps__insert(struct rb_root
*maps
, struct map
*map
)
153 struct rb_node
**p
= &maps
->rb_node
;
154 struct rb_node
*parent
= NULL
;
155 const u64 ip
= map
->start
;
160 m
= rb_entry(parent
, struct map
, rb_node
);
167 rb_link_node(&map
->rb_node
, parent
, p
);
168 rb_insert_color(&map
->rb_node
, maps
);
171 struct map
*maps__find(struct rb_root
*maps
, u64 ip
)
173 struct rb_node
**p
= &maps
->rb_node
;
174 struct rb_node
*parent
= NULL
;
179 m
= rb_entry(parent
, struct map
, rb_node
);
182 else if (ip
> m
->end
)
191 void thread__insert_map(struct thread
*self
, struct map
*map
)
193 thread__remove_overlappings(self
, map
);
194 maps__insert(&self
->maps
, map
);
197 int thread__fork(struct thread
*self
, struct thread
*parent
)
203 self
->comm
= strdup(parent
->comm
);
207 for (nd
= rb_first(&parent
->maps
); nd
; nd
= rb_next(nd
)) {
208 struct map
*map
= rb_entry(nd
, struct map
, rb_node
);
209 struct map
*new = map__clone(map
);
212 thread__insert_map(self
, new);
218 size_t threads__fprintf(FILE *fp
, struct rb_root
*threads
)
223 for (nd
= rb_first(threads
); nd
; nd
= rb_next(nd
)) {
224 struct thread
*pos
= rb_entry(nd
, struct thread
, rb_node
);
226 ret
+= thread__fprintf(pos
, fp
);