1 #include <linux/kernel.h>
11 static int perf_session__open(struct perf_session
*self
, bool force
)
13 struct stat input_stat
;
15 self
->fd
= open(self
->filename
, O_RDONLY
);
17 pr_err("failed to open file: %s", self
->filename
);
18 if (!strcmp(self
->filename
, "perf.data"))
19 pr_err(" (try 'perf record' first)");
24 if (fstat(self
->fd
, &input_stat
) < 0)
27 if (!force
&& input_stat
.st_uid
&& (input_stat
.st_uid
!= geteuid())) {
28 pr_err("file %s not owned by current user or root\n",
33 if (!input_stat
.st_size
) {
34 pr_info("zero-sized file (%s), nothing to do!\n",
39 if (perf_header__read(&self
->header
, self
->fd
) < 0) {
40 pr_err("incompatible file format");
44 self
->size
= input_stat
.st_size
;
53 struct perf_session
*perf_session__new(const char *filename
, int mode
, bool force
)
55 size_t len
= filename
? strlen(filename
) + 1 : 0;
56 struct perf_session
*self
= zalloc(sizeof(*self
) + len
);
61 if (perf_header__init(&self
->header
) < 0)
64 memcpy(self
->filename
, filename
, len
);
65 self
->threads
= RB_ROOT
;
66 self
->last_match
= NULL
;
67 self
->mmap_window
= 32;
70 self
->unknown_events
= 0;
71 map_groups__init(&self
->kmaps
);
73 if (mode
== O_RDONLY
) {
74 if (perf_session__open(self
, force
) < 0)
76 } else if (mode
== O_WRONLY
) {
78 * In O_RDONLY mode this will be performed when reading the
79 * kernel MMAP event, in event__process_mmap().
81 if (perf_session__create_kernel_maps(self
) < 0)
85 self
->sample_type
= perf_header__sample_type(&self
->header
);
92 perf_session__delete(self
);
96 void perf_session__delete(struct perf_session
*self
)
98 perf_header__exit(&self
->header
);
104 static bool symbol__match_parent_regex(struct symbol
*sym
)
106 if (sym
->name
&& !regexec(&parent_regex
, sym
->name
, 0, NULL
, 0))
112 struct symbol
**perf_session__resolve_callchain(struct perf_session
*self
,
113 struct thread
*thread
,
114 struct ip_callchain
*chain
,
115 struct symbol
**parent
)
117 u8 cpumode
= PERF_RECORD_MISC_USER
;
118 struct symbol
**syms
= NULL
;
121 if (symbol_conf
.use_callchain
) {
122 syms
= calloc(chain
->nr
, sizeof(*syms
));
124 fprintf(stderr
, "Can't allocate memory for symbols\n");
129 for (i
= 0; i
< chain
->nr
; i
++) {
130 u64 ip
= chain
->ips
[i
];
131 struct addr_location al
;
133 if (ip
>= PERF_CONTEXT_MAX
) {
135 case PERF_CONTEXT_HV
:
136 cpumode
= PERF_RECORD_MISC_HYPERVISOR
; break;
137 case PERF_CONTEXT_KERNEL
:
138 cpumode
= PERF_RECORD_MISC_KERNEL
; break;
139 case PERF_CONTEXT_USER
:
140 cpumode
= PERF_RECORD_MISC_USER
; break;
147 thread__find_addr_location(thread
, self
, cpumode
,
148 MAP__FUNCTION
, ip
, &al
, NULL
);
149 if (al
.sym
!= NULL
) {
150 if (sort__has_parent
&& !*parent
&&
151 symbol__match_parent_regex(al
.sym
))
153 if (!symbol_conf
.use_callchain
)
162 static int process_event_stub(event_t
*event __used
,
163 struct perf_session
*session __used
)
165 dump_printf(": unhandled!\n");
169 static void perf_event_ops__fill_defaults(struct perf_event_ops
*handler
)
171 if (handler
->sample
== NULL
)
172 handler
->sample
= process_event_stub
;
173 if (handler
->mmap
== NULL
)
174 handler
->mmap
= process_event_stub
;
175 if (handler
->comm
== NULL
)
176 handler
->comm
= process_event_stub
;
177 if (handler
->fork
== NULL
)
178 handler
->fork
= process_event_stub
;
179 if (handler
->exit
== NULL
)
180 handler
->exit
= process_event_stub
;
181 if (handler
->lost
== NULL
)
182 handler
->lost
= process_event_stub
;
183 if (handler
->read
== NULL
)
184 handler
->read
= process_event_stub
;
185 if (handler
->throttle
== NULL
)
186 handler
->throttle
= process_event_stub
;
187 if (handler
->unthrottle
== NULL
)
188 handler
->unthrottle
= process_event_stub
;
191 static const char *event__name
[] = {
193 [PERF_RECORD_MMAP
] = "MMAP",
194 [PERF_RECORD_LOST
] = "LOST",
195 [PERF_RECORD_COMM
] = "COMM",
196 [PERF_RECORD_EXIT
] = "EXIT",
197 [PERF_RECORD_THROTTLE
] = "THROTTLE",
198 [PERF_RECORD_UNTHROTTLE
] = "UNTHROTTLE",
199 [PERF_RECORD_FORK
] = "FORK",
200 [PERF_RECORD_READ
] = "READ",
201 [PERF_RECORD_SAMPLE
] = "SAMPLE",
204 unsigned long event__total
[PERF_RECORD_MAX
];
206 void event__print_totals(void)
209 for (i
= 0; i
< PERF_RECORD_MAX
; ++i
)
210 pr_info("%10s events: %10ld\n",
211 event__name
[i
], event__total
[i
]);
214 void mem_bswap_64(void *src
, int byte_size
)
218 while (byte_size
> 0) {
220 byte_size
-= sizeof(u64
);
225 static void event__all64_swap(event_t
*self
)
227 struct perf_event_header
*hdr
= &self
->header
;
228 mem_bswap_64(hdr
+ 1, self
->header
.size
- sizeof(*hdr
));
231 static void event__comm_swap(event_t
*self
)
233 self
->comm
.pid
= bswap_32(self
->comm
.pid
);
234 self
->comm
.tid
= bswap_32(self
->comm
.tid
);
237 static void event__mmap_swap(event_t
*self
)
239 self
->mmap
.pid
= bswap_32(self
->mmap
.pid
);
240 self
->mmap
.tid
= bswap_32(self
->mmap
.tid
);
241 self
->mmap
.start
= bswap_64(self
->mmap
.start
);
242 self
->mmap
.len
= bswap_64(self
->mmap
.len
);
243 self
->mmap
.pgoff
= bswap_64(self
->mmap
.pgoff
);
246 static void event__task_swap(event_t
*self
)
248 self
->fork
.pid
= bswap_32(self
->fork
.pid
);
249 self
->fork
.tid
= bswap_32(self
->fork
.tid
);
250 self
->fork
.ppid
= bswap_32(self
->fork
.ppid
);
251 self
->fork
.ptid
= bswap_32(self
->fork
.ptid
);
252 self
->fork
.time
= bswap_64(self
->fork
.time
);
255 static void event__read_swap(event_t
*self
)
257 self
->read
.pid
= bswap_32(self
->read
.pid
);
258 self
->read
.tid
= bswap_32(self
->read
.tid
);
259 self
->read
.value
= bswap_64(self
->read
.value
);
260 self
->read
.time_enabled
= bswap_64(self
->read
.time_enabled
);
261 self
->read
.time_running
= bswap_64(self
->read
.time_running
);
262 self
->read
.id
= bswap_64(self
->read
.id
);
265 typedef void (*event__swap_op
)(event_t
*self
);
267 static event__swap_op event__swap_ops
[] = {
268 [PERF_RECORD_MMAP
] = event__mmap_swap
,
269 [PERF_RECORD_COMM
] = event__comm_swap
,
270 [PERF_RECORD_FORK
] = event__task_swap
,
271 [PERF_RECORD_EXIT
] = event__task_swap
,
272 [PERF_RECORD_LOST
] = event__all64_swap
,
273 [PERF_RECORD_READ
] = event__read_swap
,
274 [PERF_RECORD_SAMPLE
] = event__all64_swap
,
275 [PERF_RECORD_MAX
] = NULL
,
278 static int perf_session__process_event(struct perf_session
*self
,
280 struct perf_event_ops
*ops
,
281 u64 offset
, u64 head
)
285 if (event
->header
.type
< PERF_RECORD_MAX
) {
286 dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
287 offset
+ head
, event
->header
.size
,
288 event__name
[event
->header
.type
]);
290 ++event__total
[event
->header
.type
];
293 if (self
->header
.needs_swap
&& event__swap_ops
[event
->header
.type
])
294 event__swap_ops
[event
->header
.type
](event
);
296 switch (event
->header
.type
) {
297 case PERF_RECORD_SAMPLE
:
298 return ops
->sample(event
, self
);
299 case PERF_RECORD_MMAP
:
300 return ops
->mmap(event
, self
);
301 case PERF_RECORD_COMM
:
302 return ops
->comm(event
, self
);
303 case PERF_RECORD_FORK
:
304 return ops
->fork(event
, self
);
305 case PERF_RECORD_EXIT
:
306 return ops
->exit(event
, self
);
307 case PERF_RECORD_LOST
:
308 return ops
->lost(event
, self
);
309 case PERF_RECORD_READ
:
310 return ops
->read(event
, self
);
311 case PERF_RECORD_THROTTLE
:
312 return ops
->throttle(event
, self
);
313 case PERF_RECORD_UNTHROTTLE
:
314 return ops
->unthrottle(event
, self
);
316 self
->unknown_events
++;
321 void perf_event_header__bswap(struct perf_event_header
*self
)
323 self
->type
= bswap_32(self
->type
);
324 self
->misc
= bswap_16(self
->misc
);
325 self
->size
= bswap_16(self
->size
);
328 int perf_header__read_build_ids(struct perf_header
*self
,
329 int input
, u64 offset
, u64 size
)
331 struct build_id_event bev
;
332 char filename
[PATH_MAX
];
333 u64 limit
= offset
+ size
;
336 while (offset
< limit
) {
339 struct list_head
*head
= &dsos__user
;
341 if (read(input
, &bev
, sizeof(bev
)) != sizeof(bev
))
344 if (self
->needs_swap
)
345 perf_event_header__bswap(&bev
.header
);
347 len
= bev
.header
.size
- sizeof(bev
);
348 if (read(input
, filename
, len
) != len
)
351 if (bev
.header
.misc
& PERF_RECORD_MISC_KERNEL
)
352 head
= &dsos__kernel
;
354 dso
= __dsos__findnew(head
, filename
);
356 dso__set_build_id(dso
, &bev
.build_id
);
357 if (head
== &dsos__kernel
&& filename
[0] == '[')
361 offset
+= bev
.header
.size
;
368 static struct thread
*perf_session__register_idle_thread(struct perf_session
*self
)
370 struct thread
*thread
= perf_session__findnew(self
, 0);
372 if (thread
== NULL
|| thread__set_comm(thread
, "swapper")) {
373 pr_err("problem inserting idle task.\n");
380 int perf_session__process_events(struct perf_session
*self
,
381 struct perf_event_ops
*ops
)
383 int err
, mmap_prot
, mmap_flags
;
391 if (perf_session__register_idle_thread(self
) == NULL
)
394 perf_event_ops__fill_defaults(ops
);
396 page_size
= sysconf(_SC_PAGESIZE
);
398 head
= self
->header
.data_offset
;
400 if (!symbol_conf
.full_paths
) {
403 if (getcwd(bf
, sizeof(bf
)) == NULL
) {
406 pr_err("failed to get the current directory\n");
409 self
->cwd
= strdup(bf
);
410 if (self
->cwd
== NULL
) {
414 self
->cwdlen
= strlen(self
->cwd
);
417 shift
= page_size
* (head
/ page_size
);
421 mmap_prot
= PROT_READ
;
422 mmap_flags
= MAP_SHARED
;
424 if (self
->header
.needs_swap
) {
425 mmap_prot
|= PROT_WRITE
;
426 mmap_flags
= MAP_PRIVATE
;
429 buf
= mmap(NULL
, page_size
* self
->mmap_window
, mmap_prot
,
430 mmap_flags
, self
->fd
, offset
);
431 if (buf
== MAP_FAILED
) {
432 pr_err("failed to mmap file\n");
438 event
= (event_t
*)(buf
+ head
);
440 if (self
->header
.needs_swap
)
441 perf_event_header__bswap(&event
->header
);
442 size
= event
->header
.size
;
446 if (head
+ event
->header
.size
>= page_size
* self
->mmap_window
) {
449 shift
= page_size
* (head
/ page_size
);
451 munmap_ret
= munmap(buf
, page_size
* self
->mmap_window
);
452 assert(munmap_ret
== 0);
459 size
= event
->header
.size
;
461 dump_printf("\n%#Lx [%#x]: event: %d\n",
462 offset
+ head
, event
->header
.size
, event
->header
.type
);
465 perf_session__process_event(self
, event
, ops
, offset
, head
) < 0) {
466 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
467 offset
+ head
, event
->header
.size
,
470 * assume we lost track of the stream, check alignment, and
471 * increment a single u64 in the hope to catch on again 'soon'.
473 if (unlikely(head
& 7))
481 if (offset
+ head
>= self
->header
.data_offset
+ self
->header
.data_size
)
484 if (offset
+ head
< self
->size
)
492 bool perf_session__has_traces(struct perf_session
*self
, const char *msg
)
494 if (!(self
->sample_type
& PERF_SAMPLE_RAW
)) {
495 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg
);
502 int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session
*self
,
503 const char *symbol_name
,
508 self
->ref_reloc_sym
.name
= strdup(symbol_name
);
509 if (self
->ref_reloc_sym
.name
== NULL
)
512 bracket
= strchr(self
->ref_reloc_sym
.name
, ']');
516 self
->ref_reloc_sym
.addr
= addr
;
520 static u64
map__reloc_map_ip(struct map
*map
, u64 ip
)
522 return ip
+ (s64
)map
->pgoff
;
525 static u64
map__reloc_unmap_ip(struct map
*map
, u64 ip
)
527 return ip
- (s64
)map
->pgoff
;
530 void perf_session__reloc_vmlinux_maps(struct perf_session
*self
,
531 u64 unrelocated_addr
)
534 s64 reloc
= unrelocated_addr
- self
->ref_reloc_sym
.addr
;
539 for (type
= 0; type
< MAP__NR_TYPES
; ++type
) {
540 struct map
*map
= self
->vmlinux_maps
[type
];
542 map
->map_ip
= map__reloc_map_ip
;
543 map
->unmap_ip
= map__reloc_unmap_ip
;