1 #define _FILE_OFFSET_BITS 64
3 #include <linux/kernel.h>
13 static int perf_session__open(struct perf_session
*self
, bool force
)
15 struct stat input_stat
;
17 self
->fd
= open(self
->filename
, O_RDONLY
);
19 pr_err("failed to open file: %s", self
->filename
);
20 if (!strcmp(self
->filename
, "perf.data"))
21 pr_err(" (try 'perf record' first)");
26 if (fstat(self
->fd
, &input_stat
) < 0)
29 if (!force
&& input_stat
.st_uid
&& (input_stat
.st_uid
!= geteuid())) {
30 pr_err("file %s not owned by current user or root\n",
35 if (!input_stat
.st_size
) {
36 pr_info("zero-sized file (%s), nothing to do!\n",
41 if (perf_header__read(&self
->header
, self
->fd
) < 0) {
42 pr_err("incompatible file format");
46 self
->size
= input_stat
.st_size
;
55 static inline int perf_session__create_kernel_maps(struct perf_session
*self
)
57 return map_groups__create_kernel_maps(&self
->kmaps
, self
->vmlinux_maps
);
60 struct perf_session
*perf_session__new(const char *filename
, int mode
, bool force
)
62 size_t len
= filename
? strlen(filename
) + 1 : 0;
63 struct perf_session
*self
= zalloc(sizeof(*self
) + len
);
68 if (perf_header__init(&self
->header
) < 0)
71 memcpy(self
->filename
, filename
, len
);
72 self
->threads
= RB_ROOT
;
73 self
->last_match
= NULL
;
74 self
->mmap_window
= 32;
77 self
->unknown_events
= 0;
78 map_groups__init(&self
->kmaps
);
80 if (mode
== O_RDONLY
) {
81 if (perf_session__open(self
, force
) < 0)
83 } else if (mode
== O_WRONLY
) {
85 * In O_RDONLY mode this will be performed when reading the
86 * kernel MMAP event, in event__process_mmap().
88 if (perf_session__create_kernel_maps(self
) < 0)
92 self
->sample_type
= perf_header__sample_type(&self
->header
);
99 perf_session__delete(self
);
103 void perf_session__delete(struct perf_session
*self
)
105 perf_header__exit(&self
->header
);
111 static bool symbol__match_parent_regex(struct symbol
*sym
)
113 if (sym
->name
&& !regexec(&parent_regex
, sym
->name
, 0, NULL
, 0))
119 struct symbol
**perf_session__resolve_callchain(struct perf_session
*self
,
120 struct thread
*thread
,
121 struct ip_callchain
*chain
,
122 struct symbol
**parent
)
124 u8 cpumode
= PERF_RECORD_MISC_USER
;
125 struct symbol
**syms
= NULL
;
128 if (symbol_conf
.use_callchain
) {
129 syms
= calloc(chain
->nr
, sizeof(*syms
));
131 fprintf(stderr
, "Can't allocate memory for symbols\n");
136 for (i
= 0; i
< chain
->nr
; i
++) {
137 u64 ip
= chain
->ips
[i
];
138 struct addr_location al
;
140 if (ip
>= PERF_CONTEXT_MAX
) {
142 case PERF_CONTEXT_HV
:
143 cpumode
= PERF_RECORD_MISC_HYPERVISOR
; break;
144 case PERF_CONTEXT_KERNEL
:
145 cpumode
= PERF_RECORD_MISC_KERNEL
; break;
146 case PERF_CONTEXT_USER
:
147 cpumode
= PERF_RECORD_MISC_USER
; break;
154 thread__find_addr_location(thread
, self
, cpumode
,
155 MAP__FUNCTION
, ip
, &al
, NULL
);
156 if (al
.sym
!= NULL
) {
157 if (sort__has_parent
&& !*parent
&&
158 symbol__match_parent_regex(al
.sym
))
160 if (!symbol_conf
.use_callchain
)
169 static int process_event_stub(event_t
*event __used
,
170 struct perf_session
*session __used
)
172 dump_printf(": unhandled!\n");
176 static void perf_event_ops__fill_defaults(struct perf_event_ops
*handler
)
178 if (handler
->sample
== NULL
)
179 handler
->sample
= process_event_stub
;
180 if (handler
->mmap
== NULL
)
181 handler
->mmap
= process_event_stub
;
182 if (handler
->comm
== NULL
)
183 handler
->comm
= process_event_stub
;
184 if (handler
->fork
== NULL
)
185 handler
->fork
= process_event_stub
;
186 if (handler
->exit
== NULL
)
187 handler
->exit
= process_event_stub
;
188 if (handler
->lost
== NULL
)
189 handler
->lost
= process_event_stub
;
190 if (handler
->read
== NULL
)
191 handler
->read
= process_event_stub
;
192 if (handler
->throttle
== NULL
)
193 handler
->throttle
= process_event_stub
;
194 if (handler
->unthrottle
== NULL
)
195 handler
->unthrottle
= process_event_stub
;
198 static const char *event__name
[] = {
200 [PERF_RECORD_MMAP
] = "MMAP",
201 [PERF_RECORD_LOST
] = "LOST",
202 [PERF_RECORD_COMM
] = "COMM",
203 [PERF_RECORD_EXIT
] = "EXIT",
204 [PERF_RECORD_THROTTLE
] = "THROTTLE",
205 [PERF_RECORD_UNTHROTTLE
] = "UNTHROTTLE",
206 [PERF_RECORD_FORK
] = "FORK",
207 [PERF_RECORD_READ
] = "READ",
208 [PERF_RECORD_SAMPLE
] = "SAMPLE",
211 unsigned long event__total
[PERF_RECORD_MAX
];
213 void event__print_totals(void)
216 for (i
= 0; i
< PERF_RECORD_MAX
; ++i
)
217 pr_info("%10s events: %10ld\n",
218 event__name
[i
], event__total
[i
]);
221 void mem_bswap_64(void *src
, int byte_size
)
225 while (byte_size
> 0) {
227 byte_size
-= sizeof(u64
);
232 static void event__all64_swap(event_t
*self
)
234 struct perf_event_header
*hdr
= &self
->header
;
235 mem_bswap_64(hdr
+ 1, self
->header
.size
- sizeof(*hdr
));
238 static void event__comm_swap(event_t
*self
)
240 self
->comm
.pid
= bswap_32(self
->comm
.pid
);
241 self
->comm
.tid
= bswap_32(self
->comm
.tid
);
244 static void event__mmap_swap(event_t
*self
)
246 self
->mmap
.pid
= bswap_32(self
->mmap
.pid
);
247 self
->mmap
.tid
= bswap_32(self
->mmap
.tid
);
248 self
->mmap
.start
= bswap_64(self
->mmap
.start
);
249 self
->mmap
.len
= bswap_64(self
->mmap
.len
);
250 self
->mmap
.pgoff
= bswap_64(self
->mmap
.pgoff
);
253 static void event__task_swap(event_t
*self
)
255 self
->fork
.pid
= bswap_32(self
->fork
.pid
);
256 self
->fork
.tid
= bswap_32(self
->fork
.tid
);
257 self
->fork
.ppid
= bswap_32(self
->fork
.ppid
);
258 self
->fork
.ptid
= bswap_32(self
->fork
.ptid
);
259 self
->fork
.time
= bswap_64(self
->fork
.time
);
262 static void event__read_swap(event_t
*self
)
264 self
->read
.pid
= bswap_32(self
->read
.pid
);
265 self
->read
.tid
= bswap_32(self
->read
.tid
);
266 self
->read
.value
= bswap_64(self
->read
.value
);
267 self
->read
.time_enabled
= bswap_64(self
->read
.time_enabled
);
268 self
->read
.time_running
= bswap_64(self
->read
.time_running
);
269 self
->read
.id
= bswap_64(self
->read
.id
);
272 typedef void (*event__swap_op
)(event_t
*self
);
274 static event__swap_op event__swap_ops
[] = {
275 [PERF_RECORD_MMAP
] = event__mmap_swap
,
276 [PERF_RECORD_COMM
] = event__comm_swap
,
277 [PERF_RECORD_FORK
] = event__task_swap
,
278 [PERF_RECORD_EXIT
] = event__task_swap
,
279 [PERF_RECORD_LOST
] = event__all64_swap
,
280 [PERF_RECORD_READ
] = event__read_swap
,
281 [PERF_RECORD_SAMPLE
] = event__all64_swap
,
282 [PERF_RECORD_MAX
] = NULL
,
285 static int perf_session__process_event(struct perf_session
*self
,
287 struct perf_event_ops
*ops
,
288 u64 offset
, u64 head
)
292 if (event
->header
.type
< PERF_RECORD_MAX
) {
293 dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
294 offset
+ head
, event
->header
.size
,
295 event__name
[event
->header
.type
]);
297 ++event__total
[event
->header
.type
];
300 if (self
->header
.needs_swap
&& event__swap_ops
[event
->header
.type
])
301 event__swap_ops
[event
->header
.type
](event
);
303 switch (event
->header
.type
) {
304 case PERF_RECORD_SAMPLE
:
305 return ops
->sample(event
, self
);
306 case PERF_RECORD_MMAP
:
307 return ops
->mmap(event
, self
);
308 case PERF_RECORD_COMM
:
309 return ops
->comm(event
, self
);
310 case PERF_RECORD_FORK
:
311 return ops
->fork(event
, self
);
312 case PERF_RECORD_EXIT
:
313 return ops
->exit(event
, self
);
314 case PERF_RECORD_LOST
:
315 return ops
->lost(event
, self
);
316 case PERF_RECORD_READ
:
317 return ops
->read(event
, self
);
318 case PERF_RECORD_THROTTLE
:
319 return ops
->throttle(event
, self
);
320 case PERF_RECORD_UNTHROTTLE
:
321 return ops
->unthrottle(event
, self
);
323 self
->unknown_events
++;
328 void perf_event_header__bswap(struct perf_event_header
*self
)
330 self
->type
= bswap_32(self
->type
);
331 self
->misc
= bswap_16(self
->misc
);
332 self
->size
= bswap_16(self
->size
);
335 int perf_header__read_build_ids(struct perf_header
*self
,
336 int input
, u64 offset
, u64 size
)
338 struct build_id_event bev
;
339 char filename
[PATH_MAX
];
340 u64 limit
= offset
+ size
;
343 while (offset
< limit
) {
346 struct list_head
*head
= &dsos__user
;
348 if (read(input
, &bev
, sizeof(bev
)) != sizeof(bev
))
351 if (self
->needs_swap
)
352 perf_event_header__bswap(&bev
.header
);
354 len
= bev
.header
.size
- sizeof(bev
);
355 if (read(input
, filename
, len
) != len
)
358 if (bev
.header
.misc
& PERF_RECORD_MISC_KERNEL
)
359 head
= &dsos__kernel
;
361 dso
= __dsos__findnew(head
, filename
);
363 dso__set_build_id(dso
, &bev
.build_id
);
364 if (head
== &dsos__kernel
&& filename
[0] == '[')
368 offset
+= bev
.header
.size
;
375 static struct thread
*perf_session__register_idle_thread(struct perf_session
*self
)
377 struct thread
*thread
= perf_session__findnew(self
, 0);
379 if (thread
== NULL
|| thread__set_comm(thread
, "swapper")) {
380 pr_err("problem inserting idle task.\n");
387 int __perf_session__process_events(struct perf_session
*self
,
388 u64 data_offset
, u64 data_size
,
389 u64 file_size
, struct perf_event_ops
*ops
)
391 int err
, mmap_prot
, mmap_flags
;
399 perf_event_ops__fill_defaults(ops
);
401 page_size
= sysconf(_SC_PAGESIZE
);
404 shift
= page_size
* (head
/ page_size
);
408 mmap_prot
= PROT_READ
;
409 mmap_flags
= MAP_SHARED
;
411 if (self
->header
.needs_swap
) {
412 mmap_prot
|= PROT_WRITE
;
413 mmap_flags
= MAP_PRIVATE
;
416 buf
= mmap(NULL
, page_size
* self
->mmap_window
, mmap_prot
,
417 mmap_flags
, self
->fd
, offset
);
418 if (buf
== MAP_FAILED
) {
419 pr_err("failed to mmap file\n");
425 event
= (event_t
*)(buf
+ head
);
427 if (self
->header
.needs_swap
)
428 perf_event_header__bswap(&event
->header
);
429 size
= event
->header
.size
;
433 if (head
+ event
->header
.size
>= page_size
* self
->mmap_window
) {
436 shift
= page_size
* (head
/ page_size
);
438 munmap_ret
= munmap(buf
, page_size
* self
->mmap_window
);
439 assert(munmap_ret
== 0);
446 size
= event
->header
.size
;
448 dump_printf("\n%#Lx [%#x]: event: %d\n",
449 offset
+ head
, event
->header
.size
, event
->header
.type
);
452 perf_session__process_event(self
, event
, ops
, offset
, head
) < 0) {
453 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
454 offset
+ head
, event
->header
.size
,
457 * assume we lost track of the stream, check alignment, and
458 * increment a single u64 in the hope to catch on again 'soon'.
460 if (unlikely(head
& 7))
468 if (offset
+ head
>= data_offset
+ data_size
)
471 if (offset
+ head
< file_size
)
479 int perf_session__process_events(struct perf_session
*self
,
480 struct perf_event_ops
*ops
)
484 if (perf_session__register_idle_thread(self
) == NULL
)
487 if (!symbol_conf
.full_paths
) {
490 if (getcwd(bf
, sizeof(bf
)) == NULL
) {
493 pr_err("failed to get the current directory\n");
496 self
->cwd
= strdup(bf
);
497 if (self
->cwd
== NULL
) {
501 self
->cwdlen
= strlen(self
->cwd
);
504 err
= __perf_session__process_events(self
, self
->header
.data_offset
,
505 self
->header
.data_size
,
511 bool perf_session__has_traces(struct perf_session
*self
, const char *msg
)
513 if (!(self
->sample_type
& PERF_SAMPLE_RAW
)) {
514 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg
);
521 int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session
*self
,
522 const char *symbol_name
,
528 self
->ref_reloc_sym
.name
= strdup(symbol_name
);
529 if (self
->ref_reloc_sym
.name
== NULL
)
532 bracket
= strchr(self
->ref_reloc_sym
.name
, ']');
536 self
->ref_reloc_sym
.addr
= addr
;
538 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
539 struct kmap
*kmap
= map__kmap(self
->vmlinux_maps
[i
]);
540 kmap
->ref_reloc_sym
= &self
->ref_reloc_sym
;
546 static u64
map__reloc_map_ip(struct map
*map
, u64 ip
)
548 return ip
+ (s64
)map
->pgoff
;
551 static u64
map__reloc_unmap_ip(struct map
*map
, u64 ip
)
553 return ip
- (s64
)map
->pgoff
;
556 void map__reloc_vmlinux(struct map
*self
)
558 struct kmap
*kmap
= map__kmap(self
);
561 if (!kmap
->ref_reloc_sym
|| !kmap
->ref_reloc_sym
->unrelocated_addr
)
564 reloc
= (kmap
->ref_reloc_sym
->unrelocated_addr
-
565 kmap
->ref_reloc_sym
->addr
);
570 self
->map_ip
= map__reloc_map_ip
;
571 self
->unmap_ip
= map__reloc_unmap_ip
;