1 #define _FILE_OFFSET_BITS 64
3 #include <linux/kernel.h>
13 static int perf_session__open(struct perf_session
*self
, bool force
)
15 struct stat input_stat
;
17 self
->fd
= open(self
->filename
, O_RDONLY
);
19 pr_err("failed to open file: %s", self
->filename
);
20 if (!strcmp(self
->filename
, "perf.data"))
21 pr_err(" (try 'perf record' first)");
26 if (fstat(self
->fd
, &input_stat
) < 0)
29 if (!force
&& input_stat
.st_uid
&& (input_stat
.st_uid
!= geteuid())) {
30 pr_err("file %s not owned by current user or root\n",
35 if (!input_stat
.st_size
) {
36 pr_info("zero-sized file (%s), nothing to do!\n",
41 if (perf_header__read(&self
->header
, self
->fd
) < 0) {
42 pr_err("incompatible file format");
46 self
->size
= input_stat
.st_size
;
55 static inline int perf_session__create_kernel_maps(struct perf_session
*self
)
57 return map_groups__create_kernel_maps(&self
->kmaps
, self
->vmlinux_maps
);
60 struct perf_session
*perf_session__new(const char *filename
, int mode
, bool force
)
62 size_t len
= filename
? strlen(filename
) + 1 : 0;
63 struct perf_session
*self
= zalloc(sizeof(*self
) + len
);
68 if (perf_header__init(&self
->header
) < 0)
71 memcpy(self
->filename
, filename
, len
);
72 self
->threads
= RB_ROOT
;
73 self
->stats_by_id
= RB_ROOT
;
74 self
->last_match
= NULL
;
75 self
->mmap_window
= 32;
78 self
->unknown_events
= 0;
79 map_groups__init(&self
->kmaps
);
81 if (mode
== O_RDONLY
) {
82 if (perf_session__open(self
, force
) < 0)
84 } else if (mode
== O_WRONLY
) {
86 * In O_RDONLY mode this will be performed when reading the
87 * kernel MMAP event, in event__process_mmap().
89 if (perf_session__create_kernel_maps(self
) < 0)
93 self
->sample_type
= perf_header__sample_type(&self
->header
);
100 perf_session__delete(self
);
104 void perf_session__delete(struct perf_session
*self
)
106 perf_header__exit(&self
->header
);
112 static bool symbol__match_parent_regex(struct symbol
*sym
)
114 if (sym
->name
&& !regexec(&parent_regex
, sym
->name
, 0, NULL
, 0))
120 struct symbol
**perf_session__resolve_callchain(struct perf_session
*self
,
121 struct thread
*thread
,
122 struct ip_callchain
*chain
,
123 struct symbol
**parent
)
125 u8 cpumode
= PERF_RECORD_MISC_USER
;
126 struct symbol
**syms
= NULL
;
129 if (symbol_conf
.use_callchain
) {
130 syms
= calloc(chain
->nr
, sizeof(*syms
));
132 fprintf(stderr
, "Can't allocate memory for symbols\n");
137 for (i
= 0; i
< chain
->nr
; i
++) {
138 u64 ip
= chain
->ips
[i
];
139 struct addr_location al
;
141 if (ip
>= PERF_CONTEXT_MAX
) {
143 case PERF_CONTEXT_HV
:
144 cpumode
= PERF_RECORD_MISC_HYPERVISOR
; break;
145 case PERF_CONTEXT_KERNEL
:
146 cpumode
= PERF_RECORD_MISC_KERNEL
; break;
147 case PERF_CONTEXT_USER
:
148 cpumode
= PERF_RECORD_MISC_USER
; break;
155 thread__find_addr_location(thread
, self
, cpumode
,
156 MAP__FUNCTION
, ip
, &al
, NULL
);
157 if (al
.sym
!= NULL
) {
158 if (sort__has_parent
&& !*parent
&&
159 symbol__match_parent_regex(al
.sym
))
161 if (!symbol_conf
.use_callchain
)
170 static int process_event_stub(event_t
*event __used
,
171 struct perf_session
*session __used
)
173 dump_printf(": unhandled!\n");
177 static void perf_event_ops__fill_defaults(struct perf_event_ops
*handler
)
179 if (handler
->sample
== NULL
)
180 handler
->sample
= process_event_stub
;
181 if (handler
->mmap
== NULL
)
182 handler
->mmap
= process_event_stub
;
183 if (handler
->comm
== NULL
)
184 handler
->comm
= process_event_stub
;
185 if (handler
->fork
== NULL
)
186 handler
->fork
= process_event_stub
;
187 if (handler
->exit
== NULL
)
188 handler
->exit
= process_event_stub
;
189 if (handler
->lost
== NULL
)
190 handler
->lost
= process_event_stub
;
191 if (handler
->read
== NULL
)
192 handler
->read
= process_event_stub
;
193 if (handler
->throttle
== NULL
)
194 handler
->throttle
= process_event_stub
;
195 if (handler
->unthrottle
== NULL
)
196 handler
->unthrottle
= process_event_stub
;
199 static const char *event__name
[] = {
201 [PERF_RECORD_MMAP
] = "MMAP",
202 [PERF_RECORD_LOST
] = "LOST",
203 [PERF_RECORD_COMM
] = "COMM",
204 [PERF_RECORD_EXIT
] = "EXIT",
205 [PERF_RECORD_THROTTLE
] = "THROTTLE",
206 [PERF_RECORD_UNTHROTTLE
] = "UNTHROTTLE",
207 [PERF_RECORD_FORK
] = "FORK",
208 [PERF_RECORD_READ
] = "READ",
209 [PERF_RECORD_SAMPLE
] = "SAMPLE",
212 unsigned long event__total
[PERF_RECORD_MAX
];
214 void event__print_totals(void)
217 for (i
= 0; i
< PERF_RECORD_MAX
; ++i
)
218 pr_info("%10s events: %10ld\n",
219 event__name
[i
], event__total
[i
]);
222 void mem_bswap_64(void *src
, int byte_size
)
226 while (byte_size
> 0) {
228 byte_size
-= sizeof(u64
);
233 static void event__all64_swap(event_t
*self
)
235 struct perf_event_header
*hdr
= &self
->header
;
236 mem_bswap_64(hdr
+ 1, self
->header
.size
- sizeof(*hdr
));
239 static void event__comm_swap(event_t
*self
)
241 self
->comm
.pid
= bswap_32(self
->comm
.pid
);
242 self
->comm
.tid
= bswap_32(self
->comm
.tid
);
245 static void event__mmap_swap(event_t
*self
)
247 self
->mmap
.pid
= bswap_32(self
->mmap
.pid
);
248 self
->mmap
.tid
= bswap_32(self
->mmap
.tid
);
249 self
->mmap
.start
= bswap_64(self
->mmap
.start
);
250 self
->mmap
.len
= bswap_64(self
->mmap
.len
);
251 self
->mmap
.pgoff
= bswap_64(self
->mmap
.pgoff
);
254 static void event__task_swap(event_t
*self
)
256 self
->fork
.pid
= bswap_32(self
->fork
.pid
);
257 self
->fork
.tid
= bswap_32(self
->fork
.tid
);
258 self
->fork
.ppid
= bswap_32(self
->fork
.ppid
);
259 self
->fork
.ptid
= bswap_32(self
->fork
.ptid
);
260 self
->fork
.time
= bswap_64(self
->fork
.time
);
263 static void event__read_swap(event_t
*self
)
265 self
->read
.pid
= bswap_32(self
->read
.pid
);
266 self
->read
.tid
= bswap_32(self
->read
.tid
);
267 self
->read
.value
= bswap_64(self
->read
.value
);
268 self
->read
.time_enabled
= bswap_64(self
->read
.time_enabled
);
269 self
->read
.time_running
= bswap_64(self
->read
.time_running
);
270 self
->read
.id
= bswap_64(self
->read
.id
);
273 typedef void (*event__swap_op
)(event_t
*self
);
275 static event__swap_op event__swap_ops
[] = {
276 [PERF_RECORD_MMAP
] = event__mmap_swap
,
277 [PERF_RECORD_COMM
] = event__comm_swap
,
278 [PERF_RECORD_FORK
] = event__task_swap
,
279 [PERF_RECORD_EXIT
] = event__task_swap
,
280 [PERF_RECORD_LOST
] = event__all64_swap
,
281 [PERF_RECORD_READ
] = event__read_swap
,
282 [PERF_RECORD_SAMPLE
] = event__all64_swap
,
283 [PERF_RECORD_MAX
] = NULL
,
286 static int perf_session__process_event(struct perf_session
*self
,
288 struct perf_event_ops
*ops
,
289 u64 offset
, u64 head
)
293 if (event
->header
.type
< PERF_RECORD_MAX
) {
294 dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
295 offset
+ head
, event
->header
.size
,
296 event__name
[event
->header
.type
]);
298 ++event__total
[event
->header
.type
];
301 if (self
->header
.needs_swap
&& event__swap_ops
[event
->header
.type
])
302 event__swap_ops
[event
->header
.type
](event
);
304 switch (event
->header
.type
) {
305 case PERF_RECORD_SAMPLE
:
306 return ops
->sample(event
, self
);
307 case PERF_RECORD_MMAP
:
308 return ops
->mmap(event
, self
);
309 case PERF_RECORD_COMM
:
310 return ops
->comm(event
, self
);
311 case PERF_RECORD_FORK
:
312 return ops
->fork(event
, self
);
313 case PERF_RECORD_EXIT
:
314 return ops
->exit(event
, self
);
315 case PERF_RECORD_LOST
:
316 return ops
->lost(event
, self
);
317 case PERF_RECORD_READ
:
318 return ops
->read(event
, self
);
319 case PERF_RECORD_THROTTLE
:
320 return ops
->throttle(event
, self
);
321 case PERF_RECORD_UNTHROTTLE
:
322 return ops
->unthrottle(event
, self
);
324 self
->unknown_events
++;
329 void perf_event_header__bswap(struct perf_event_header
*self
)
331 self
->type
= bswap_32(self
->type
);
332 self
->misc
= bswap_16(self
->misc
);
333 self
->size
= bswap_16(self
->size
);
336 int perf_header__read_build_ids(struct perf_header
*self
,
337 int input
, u64 offset
, u64 size
)
339 struct build_id_event bev
;
340 char filename
[PATH_MAX
];
341 u64 limit
= offset
+ size
;
344 while (offset
< limit
) {
347 struct list_head
*head
= &dsos__user
;
349 if (read(input
, &bev
, sizeof(bev
)) != sizeof(bev
))
352 if (self
->needs_swap
)
353 perf_event_header__bswap(&bev
.header
);
355 len
= bev
.header
.size
- sizeof(bev
);
356 if (read(input
, filename
, len
) != len
)
359 if (bev
.header
.misc
& PERF_RECORD_MISC_KERNEL
)
360 head
= &dsos__kernel
;
362 dso
= __dsos__findnew(head
, filename
);
364 dso__set_build_id(dso
, &bev
.build_id
);
365 if (head
== &dsos__kernel
&& filename
[0] == '[')
369 offset
+= bev
.header
.size
;
376 static struct thread
*perf_session__register_idle_thread(struct perf_session
*self
)
378 struct thread
*thread
= perf_session__findnew(self
, 0);
380 if (thread
== NULL
|| thread__set_comm(thread
, "swapper")) {
381 pr_err("problem inserting idle task.\n");
388 int __perf_session__process_events(struct perf_session
*self
,
389 u64 data_offset
, u64 data_size
,
390 u64 file_size
, struct perf_event_ops
*ops
)
392 int err
, mmap_prot
, mmap_flags
;
400 perf_event_ops__fill_defaults(ops
);
402 page_size
= sysconf(_SC_PAGESIZE
);
405 shift
= page_size
* (head
/ page_size
);
409 mmap_prot
= PROT_READ
;
410 mmap_flags
= MAP_SHARED
;
412 if (self
->header
.needs_swap
) {
413 mmap_prot
|= PROT_WRITE
;
414 mmap_flags
= MAP_PRIVATE
;
417 buf
= mmap(NULL
, page_size
* self
->mmap_window
, mmap_prot
,
418 mmap_flags
, self
->fd
, offset
);
419 if (buf
== MAP_FAILED
) {
420 pr_err("failed to mmap file\n");
426 event
= (event_t
*)(buf
+ head
);
428 if (self
->header
.needs_swap
)
429 perf_event_header__bswap(&event
->header
);
430 size
= event
->header
.size
;
434 if (head
+ event
->header
.size
>= page_size
* self
->mmap_window
) {
437 shift
= page_size
* (head
/ page_size
);
439 munmap_ret
= munmap(buf
, page_size
* self
->mmap_window
);
440 assert(munmap_ret
== 0);
447 size
= event
->header
.size
;
449 dump_printf("\n%#Lx [%#x]: event: %d\n",
450 offset
+ head
, event
->header
.size
, event
->header
.type
);
453 perf_session__process_event(self
, event
, ops
, offset
, head
) < 0) {
454 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
455 offset
+ head
, event
->header
.size
,
458 * assume we lost track of the stream, check alignment, and
459 * increment a single u64 in the hope to catch on again 'soon'.
461 if (unlikely(head
& 7))
469 if (offset
+ head
>= data_offset
+ data_size
)
472 if (offset
+ head
< file_size
)
480 int perf_session__process_events(struct perf_session
*self
,
481 struct perf_event_ops
*ops
)
485 if (perf_session__register_idle_thread(self
) == NULL
)
488 if (!symbol_conf
.full_paths
) {
491 if (getcwd(bf
, sizeof(bf
)) == NULL
) {
494 pr_err("failed to get the current directory\n");
497 self
->cwd
= strdup(bf
);
498 if (self
->cwd
== NULL
) {
502 self
->cwdlen
= strlen(self
->cwd
);
505 err
= __perf_session__process_events(self
, self
->header
.data_offset
,
506 self
->header
.data_size
,
512 bool perf_session__has_traces(struct perf_session
*self
, const char *msg
)
514 if (!(self
->sample_type
& PERF_SAMPLE_RAW
)) {
515 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg
);
522 int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session
*self
,
523 const char *symbol_name
,
529 self
->ref_reloc_sym
.name
= strdup(symbol_name
);
530 if (self
->ref_reloc_sym
.name
== NULL
)
533 bracket
= strchr(self
->ref_reloc_sym
.name
, ']');
537 self
->ref_reloc_sym
.addr
= addr
;
539 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
540 struct kmap
*kmap
= map__kmap(self
->vmlinux_maps
[i
]);
541 kmap
->ref_reloc_sym
= &self
->ref_reloc_sym
;
547 static u64
map__reloc_map_ip(struct map
*map
, u64 ip
)
549 return ip
+ (s64
)map
->pgoff
;
552 static u64
map__reloc_unmap_ip(struct map
*map
, u64 ip
)
554 return ip
- (s64
)map
->pgoff
;
557 void map__reloc_vmlinux(struct map
*self
)
559 struct kmap
*kmap
= map__kmap(self
);
562 if (!kmap
->ref_reloc_sym
|| !kmap
->ref_reloc_sym
->unrelocated_addr
)
565 reloc
= (kmap
->ref_reloc_sym
->unrelocated_addr
-
566 kmap
->ref_reloc_sym
->addr
);
571 self
->map_ip
= map__reloc_map_ip
;
572 self
->unmap_ip
= map__reloc_unmap_ip
;