1 #define _FILE_OFFSET_BITS 64
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/bitops.h>
12 #include <sys/utsname.h>
18 #include "trace-event.h"
24 static bool no_buildid_cache
= false;
26 static int event_count
;
27 static struct perf_trace_event_type
*events
;
29 static u32 header_argc
;
30 static const char **header_argv
;
32 int perf_header__push_event(u64 id
, const char *name
)
34 if (strlen(name
) > MAX_EVENT_NAME
)
35 pr_warning("Event %s will be truncated\n", name
);
38 events
= malloc(sizeof(struct perf_trace_event_type
));
42 struct perf_trace_event_type
*nevents
;
44 nevents
= realloc(events
, (event_count
+ 1) * sizeof(*events
));
49 memset(&events
[event_count
], 0, sizeof(struct perf_trace_event_type
));
50 events
[event_count
].event_id
= id
;
51 strncpy(events
[event_count
].name
, name
, MAX_EVENT_NAME
- 1);
56 char *perf_header__find_event(u64 id
)
59 for (i
= 0 ; i
< event_count
; i
++) {
60 if (events
[i
].event_id
== id
)
61 return events
[i
].name
;
68 * must be a numerical value to let the endianness
69 * determine the memory layout. That way we are able
70 * to detect endianness when reading the perf.data file
73 * we check for legacy (PERFFILE) format.
75 static const char *__perf_magic1
= "PERFFILE";
76 static const u64 __perf_magic2
= 0x32454c4946524550ULL
;
77 static const u64 __perf_magic2_sw
= 0x50455246494c4532ULL
;
79 #define PERF_MAGIC __perf_magic2
81 struct perf_file_attr
{
82 struct perf_event_attr attr
;
83 struct perf_file_section ids
;
86 void perf_header__set_feat(struct perf_header
*header
, int feat
)
88 set_bit(feat
, header
->adds_features
);
91 void perf_header__clear_feat(struct perf_header
*header
, int feat
)
93 clear_bit(feat
, header
->adds_features
);
96 bool perf_header__has_feat(const struct perf_header
*header
, int feat
)
98 return test_bit(feat
, header
->adds_features
);
101 static int do_write(int fd
, const void *buf
, size_t size
)
104 int ret
= write(fd
, buf
, size
);
116 #define NAME_ALIGN 64
118 static int write_padded(int fd
, const void *bf
, size_t count
,
119 size_t count_aligned
)
121 static const char zero_buf
[NAME_ALIGN
];
122 int err
= do_write(fd
, bf
, count
);
125 err
= do_write(fd
, zero_buf
, count_aligned
- count
);
130 static int do_write_string(int fd
, const char *str
)
135 olen
= strlen(str
) + 1;
136 len
= ALIGN(olen
, NAME_ALIGN
);
138 /* write len, incl. \0 */
139 ret
= do_write(fd
, &len
, sizeof(len
));
143 return write_padded(fd
, str
, olen
, len
);
146 static char *do_read_string(int fd
, struct perf_header
*ph
)
152 sz
= read(fd
, &len
, sizeof(len
));
153 if (sz
< (ssize_t
)sizeof(len
))
163 ret
= read(fd
, buf
, len
);
164 if (ret
== (ssize_t
)len
) {
166 * strings are padded by zeroes
167 * thus the actual strlen of buf
168 * may be less than len
178 perf_header__set_cmdline(int argc
, const char **argv
)
182 header_argc
= (u32
)argc
;
184 /* do not include NULL termination */
185 header_argv
= calloc(argc
, sizeof(char *));
190 * must copy argv contents because it gets moved
191 * around during option parsing
193 for (i
= 0; i
< argc
; i
++)
194 header_argv
[i
] = argv
[i
];
199 #define dsos__for_each_with_build_id(pos, head) \
200 list_for_each_entry(pos, head, node) \
201 if (!pos->has_build_id) \
205 static int __dsos__write_buildid_table(struct list_head
*head
, pid_t pid
,
210 dsos__for_each_with_build_id(pos
, head
) {
212 struct build_id_event b
;
217 len
= pos
->long_name_len
+ 1;
218 len
= ALIGN(len
, NAME_ALIGN
);
219 memset(&b
, 0, sizeof(b
));
220 memcpy(&b
.build_id
, pos
->build_id
, sizeof(pos
->build_id
));
222 b
.header
.misc
= misc
;
223 b
.header
.size
= sizeof(b
) + len
;
224 err
= do_write(fd
, &b
, sizeof(b
));
227 err
= write_padded(fd
, pos
->long_name
,
228 pos
->long_name_len
+ 1, len
);
236 static int machine__write_buildid_table(struct machine
*machine
, int fd
)
239 u16 kmisc
= PERF_RECORD_MISC_KERNEL
,
240 umisc
= PERF_RECORD_MISC_USER
;
242 if (!machine__is_host(machine
)) {
243 kmisc
= PERF_RECORD_MISC_GUEST_KERNEL
;
244 umisc
= PERF_RECORD_MISC_GUEST_USER
;
247 err
= __dsos__write_buildid_table(&machine
->kernel_dsos
, machine
->pid
,
250 err
= __dsos__write_buildid_table(&machine
->user_dsos
,
251 machine
->pid
, umisc
, fd
);
255 static int dsos__write_buildid_table(struct perf_header
*header
, int fd
)
257 struct perf_session
*session
= container_of(header
,
258 struct perf_session
, header
);
260 int err
= machine__write_buildid_table(&session
->host_machine
, fd
);
265 for (nd
= rb_first(&session
->machines
); nd
; nd
= rb_next(nd
)) {
266 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
267 err
= machine__write_buildid_table(pos
, fd
);
274 int build_id_cache__add_s(const char *sbuild_id
, const char *debugdir
,
275 const char *name
, bool is_kallsyms
)
277 const size_t size
= PATH_MAX
;
278 char *realname
, *filename
= zalloc(size
),
279 *linkname
= zalloc(size
), *targetname
;
283 if (symbol_conf
.kptr_restrict
) {
284 pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
287 realname
= (char *)name
;
289 realname
= realpath(name
, NULL
);
291 if (realname
== NULL
|| filename
== NULL
|| linkname
== NULL
)
294 len
= scnprintf(filename
, size
, "%s%s%s",
295 debugdir
, is_kallsyms
? "/" : "", realname
);
296 if (mkdir_p(filename
, 0755))
299 snprintf(filename
+ len
, sizeof(filename
) - len
, "/%s", sbuild_id
);
301 if (access(filename
, F_OK
)) {
303 if (copyfile("/proc/kallsyms", filename
))
305 } else if (link(realname
, filename
) && copyfile(name
, filename
))
309 len
= scnprintf(linkname
, size
, "%s/.build-id/%.2s",
310 debugdir
, sbuild_id
);
312 if (access(linkname
, X_OK
) && mkdir_p(linkname
, 0755))
315 snprintf(linkname
+ len
, size
- len
, "/%s", sbuild_id
+ 2);
316 targetname
= filename
+ strlen(debugdir
) - 5;
317 memcpy(targetname
, "../..", 5);
319 if (symlink(targetname
, linkname
) == 0)
329 static int build_id_cache__add_b(const u8
*build_id
, size_t build_id_size
,
330 const char *name
, const char *debugdir
,
333 char sbuild_id
[BUILD_ID_SIZE
* 2 + 1];
335 build_id__sprintf(build_id
, build_id_size
, sbuild_id
);
337 return build_id_cache__add_s(sbuild_id
, debugdir
, name
, is_kallsyms
);
340 int build_id_cache__remove_s(const char *sbuild_id
, const char *debugdir
)
342 const size_t size
= PATH_MAX
;
343 char *filename
= zalloc(size
),
344 *linkname
= zalloc(size
);
347 if (filename
== NULL
|| linkname
== NULL
)
350 snprintf(linkname
, size
, "%s/.build-id/%.2s/%s",
351 debugdir
, sbuild_id
, sbuild_id
+ 2);
353 if (access(linkname
, F_OK
))
356 if (readlink(linkname
, filename
, size
- 1) < 0)
359 if (unlink(linkname
))
363 * Since the link is relative, we must make it absolute:
365 snprintf(linkname
, size
, "%s/.build-id/%.2s/%s",
366 debugdir
, sbuild_id
, filename
);
368 if (unlink(linkname
))
378 static int dso__cache_build_id(struct dso
*dso
, const char *debugdir
)
380 bool is_kallsyms
= dso
->kernel
&& dso
->long_name
[0] != '/';
382 return build_id_cache__add_b(dso
->build_id
, sizeof(dso
->build_id
),
383 dso
->long_name
, debugdir
, is_kallsyms
);
386 static int __dsos__cache_build_ids(struct list_head
*head
, const char *debugdir
)
391 dsos__for_each_with_build_id(pos
, head
)
392 if (dso__cache_build_id(pos
, debugdir
))
398 static int machine__cache_build_ids(struct machine
*machine
, const char *debugdir
)
400 int ret
= __dsos__cache_build_ids(&machine
->kernel_dsos
, debugdir
);
401 ret
|= __dsos__cache_build_ids(&machine
->user_dsos
, debugdir
);
405 static int perf_session__cache_build_ids(struct perf_session
*session
)
409 char debugdir
[PATH_MAX
];
411 snprintf(debugdir
, sizeof(debugdir
), "%s", buildid_dir
);
413 if (mkdir(debugdir
, 0755) != 0 && errno
!= EEXIST
)
416 ret
= machine__cache_build_ids(&session
->host_machine
, debugdir
);
418 for (nd
= rb_first(&session
->machines
); nd
; nd
= rb_next(nd
)) {
419 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
420 ret
|= machine__cache_build_ids(pos
, debugdir
);
425 static bool machine__read_build_ids(struct machine
*machine
, bool with_hits
)
427 bool ret
= __dsos__read_build_ids(&machine
->kernel_dsos
, with_hits
);
428 ret
|= __dsos__read_build_ids(&machine
->user_dsos
, with_hits
);
432 static bool perf_session__read_build_ids(struct perf_session
*session
, bool with_hits
)
435 bool ret
= machine__read_build_ids(&session
->host_machine
, with_hits
);
437 for (nd
= rb_first(&session
->machines
); nd
; nd
= rb_next(nd
)) {
438 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
439 ret
|= machine__read_build_ids(pos
, with_hits
);
445 static int write_trace_info(int fd
, struct perf_header
*h __used
,
446 struct perf_evlist
*evlist
)
448 return read_tracing_data(fd
, &evlist
->entries
);
452 static int write_build_id(int fd
, struct perf_header
*h
,
453 struct perf_evlist
*evlist __used
)
455 struct perf_session
*session
;
458 session
= container_of(h
, struct perf_session
, header
);
460 if (!perf_session__read_build_ids(session
, true))
463 err
= dsos__write_buildid_table(h
, fd
);
465 pr_debug("failed to write buildid table\n");
468 if (!no_buildid_cache
)
469 perf_session__cache_build_ids(session
);
474 static int write_hostname(int fd
, struct perf_header
*h __used
,
475 struct perf_evlist
*evlist __used
)
484 return do_write_string(fd
, uts
.nodename
);
487 static int write_osrelease(int fd
, struct perf_header
*h __used
,
488 struct perf_evlist
*evlist __used
)
497 return do_write_string(fd
, uts
.release
);
500 static int write_arch(int fd
, struct perf_header
*h __used
,
501 struct perf_evlist
*evlist __used
)
510 return do_write_string(fd
, uts
.machine
);
513 static int write_version(int fd
, struct perf_header
*h __used
,
514 struct perf_evlist
*evlist __used
)
516 return do_write_string(fd
, perf_version_string
);
519 static int write_cpudesc(int fd
, struct perf_header
*h __used
,
520 struct perf_evlist
*evlist __used
)
523 #define CPUINFO_PROC NULL
528 const char *search
= CPUINFO_PROC
;
535 file
= fopen("/proc/cpuinfo", "r");
539 while (getline(&buf
, &len
, file
) > 0) {
540 ret
= strncmp(buf
, search
, strlen(search
));
550 p
= strchr(buf
, ':');
551 if (p
&& *(p
+1) == ' ' && *(p
+2))
557 /* squash extra space characters (branding string) */
564 while (*q
&& isspace(*q
))
567 while ((*r
++ = *q
++));
571 ret
= do_write_string(fd
, s
);
578 static int write_nrcpus(int fd
, struct perf_header
*h __used
,
579 struct perf_evlist
*evlist __used
)
585 nr
= sysconf(_SC_NPROCESSORS_CONF
);
589 nrc
= (u32
)(nr
& UINT_MAX
);
591 nr
= sysconf(_SC_NPROCESSORS_ONLN
);
595 nra
= (u32
)(nr
& UINT_MAX
);
597 ret
= do_write(fd
, &nrc
, sizeof(nrc
));
601 return do_write(fd
, &nra
, sizeof(nra
));
604 static int write_event_desc(int fd
, struct perf_header
*h __used
,
605 struct perf_evlist
*evlist
)
607 struct perf_evsel
*attr
;
608 u32 nre
= 0, nri
, sz
;
611 list_for_each_entry(attr
, &evlist
->entries
, node
)
615 * write number of events
617 ret
= do_write(fd
, &nre
, sizeof(nre
));
622 * size of perf_event_attr struct
624 sz
= (u32
)sizeof(attr
->attr
);
625 ret
= do_write(fd
, &sz
, sizeof(sz
));
629 list_for_each_entry(attr
, &evlist
->entries
, node
) {
631 ret
= do_write(fd
, &attr
->attr
, sz
);
635 * write number of unique id per event
636 * there is one id per instance of an event
638 * copy into an nri to be independent of the
642 ret
= do_write(fd
, &nri
, sizeof(nri
));
647 * write event string as passed on cmdline
649 ret
= do_write_string(fd
, event_name(attr
));
653 * write unique ids for this event
655 ret
= do_write(fd
, attr
->id
, attr
->ids
* sizeof(u64
));
662 static int write_cmdline(int fd
, struct perf_header
*h __used
,
663 struct perf_evlist
*evlist __used
)
665 char buf
[MAXPATHLEN
];
671 * actual atual path to perf binary
673 sprintf(proc
, "/proc/%d/exe", getpid());
674 ret
= readlink(proc
, buf
, sizeof(buf
));
678 /* readlink() does not add null termination */
681 /* account for binary path */
684 ret
= do_write(fd
, &n
, sizeof(n
));
688 ret
= do_write_string(fd
, buf
);
692 for (i
= 0 ; i
< header_argc
; i
++) {
693 ret
= do_write_string(fd
, header_argv
[i
]);
700 #define CORE_SIB_FMT \
701 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
702 #define THRD_SIB_FMT \
703 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
708 char **core_siblings
;
709 char **thread_siblings
;
712 static int build_cpu_topo(struct cpu_topo
*tp
, int cpu
)
715 char filename
[MAXPATHLEN
];
716 char *buf
= NULL
, *p
;
721 sprintf(filename
, CORE_SIB_FMT
, cpu
);
722 fp
= fopen(filename
, "r");
726 if (getline(&buf
, &len
, fp
) <= 0)
731 p
= strchr(buf
, '\n');
735 for (i
= 0; i
< tp
->core_sib
; i
++) {
736 if (!strcmp(buf
, tp
->core_siblings
[i
]))
739 if (i
== tp
->core_sib
) {
740 tp
->core_siblings
[i
] = buf
;
746 sprintf(filename
, THRD_SIB_FMT
, cpu
);
747 fp
= fopen(filename
, "r");
751 if (getline(&buf
, &len
, fp
) <= 0)
754 p
= strchr(buf
, '\n');
758 for (i
= 0; i
< tp
->thread_sib
; i
++) {
759 if (!strcmp(buf
, tp
->thread_siblings
[i
]))
762 if (i
== tp
->thread_sib
) {
763 tp
->thread_siblings
[i
] = buf
;
775 static void free_cpu_topo(struct cpu_topo
*tp
)
782 for (i
= 0 ; i
< tp
->core_sib
; i
++)
783 free(tp
->core_siblings
[i
]);
785 for (i
= 0 ; i
< tp
->thread_sib
; i
++)
786 free(tp
->thread_siblings
[i
]);
791 static struct cpu_topo
*build_cpu_topology(void)
800 ncpus
= sysconf(_SC_NPROCESSORS_CONF
);
804 nr
= (u32
)(ncpus
& UINT_MAX
);
806 sz
= nr
* sizeof(char *);
808 addr
= calloc(1, sizeof(*tp
) + 2 * sz
);
815 tp
->core_siblings
= addr
;
817 tp
->thread_siblings
= addr
;
819 for (i
= 0; i
< nr
; i
++) {
820 ret
= build_cpu_topo(tp
, i
);
831 static int write_cpu_topology(int fd
, struct perf_header
*h __used
,
832 struct perf_evlist
*evlist __used
)
838 tp
= build_cpu_topology();
842 ret
= do_write(fd
, &tp
->core_sib
, sizeof(tp
->core_sib
));
846 for (i
= 0; i
< tp
->core_sib
; i
++) {
847 ret
= do_write_string(fd
, tp
->core_siblings
[i
]);
851 ret
= do_write(fd
, &tp
->thread_sib
, sizeof(tp
->thread_sib
));
855 for (i
= 0; i
< tp
->thread_sib
; i
++) {
856 ret
= do_write_string(fd
, tp
->thread_siblings
[i
]);
867 static int write_total_mem(int fd
, struct perf_header
*h __used
,
868 struct perf_evlist
*evlist __used
)
876 fp
= fopen("/proc/meminfo", "r");
880 while (getline(&buf
, &len
, fp
) > 0) {
881 ret
= strncmp(buf
, "MemTotal:", 9);
886 n
= sscanf(buf
, "%*s %"PRIu64
, &mem
);
888 ret
= do_write(fd
, &mem
, sizeof(mem
));
895 static int write_topo_node(int fd
, int node
)
897 char str
[MAXPATHLEN
];
899 char *buf
= NULL
, *p
;
902 u64 mem_total
, mem_free
, mem
;
905 sprintf(str
, "/sys/devices/system/node/node%d/meminfo", node
);
906 fp
= fopen(str
, "r");
910 while (getline(&buf
, &len
, fp
) > 0) {
911 /* skip over invalid lines */
912 if (!strchr(buf
, ':'))
914 if (sscanf(buf
, "%*s %*d %s %"PRIu64
, field
, &mem
) != 2)
916 if (!strcmp(field
, "MemTotal:"))
918 if (!strcmp(field
, "MemFree:"))
924 ret
= do_write(fd
, &mem_total
, sizeof(u64
));
928 ret
= do_write(fd
, &mem_free
, sizeof(u64
));
933 sprintf(str
, "/sys/devices/system/node/node%d/cpulist", node
);
935 fp
= fopen(str
, "r");
939 if (getline(&buf
, &len
, fp
) <= 0)
942 p
= strchr(buf
, '\n');
946 ret
= do_write_string(fd
, buf
);
953 static int write_numa_topology(int fd
, struct perf_header
*h __used
,
954 struct perf_evlist
*evlist __used
)
959 struct cpu_map
*node_map
= NULL
;
964 fp
= fopen("/sys/devices/system/node/online", "r");
968 if (getline(&buf
, &len
, fp
) <= 0)
971 c
= strchr(buf
, '\n');
975 node_map
= cpu_map__new(buf
);
979 nr
= (u32
)node_map
->nr
;
981 ret
= do_write(fd
, &nr
, sizeof(nr
));
985 for (i
= 0; i
< nr
; i
++) {
986 j
= (u32
)node_map
->map
[i
];
987 ret
= do_write(fd
, &j
, sizeof(j
));
991 ret
= write_topo_node(fd
, i
);
1003 * default get_cpuid(): nothing gets recorded
1004 * actual implementation must be in arch/$(ARCH)/util/header.c
1006 int __attribute__((weak
)) get_cpuid(char *buffer __used
, size_t sz __used
)
1011 static int write_cpuid(int fd
, struct perf_header
*h __used
,
1012 struct perf_evlist
*evlist __used
)
1017 ret
= get_cpuid(buffer
, sizeof(buffer
));
1023 return do_write_string(fd
, buffer
);
1026 static int write_branch_stack(int fd __used
, struct perf_header
*h __used
,
1027 struct perf_evlist
*evlist __used
)
1032 static void print_hostname(struct perf_header
*ph
, int fd
, FILE *fp
)
1034 char *str
= do_read_string(fd
, ph
);
1035 fprintf(fp
, "# hostname : %s\n", str
);
1039 static void print_osrelease(struct perf_header
*ph
, int fd
, FILE *fp
)
1041 char *str
= do_read_string(fd
, ph
);
1042 fprintf(fp
, "# os release : %s\n", str
);
1046 static void print_arch(struct perf_header
*ph
, int fd
, FILE *fp
)
1048 char *str
= do_read_string(fd
, ph
);
1049 fprintf(fp
, "# arch : %s\n", str
);
1053 static void print_cpudesc(struct perf_header
*ph
, int fd
, FILE *fp
)
1055 char *str
= do_read_string(fd
, ph
);
1056 fprintf(fp
, "# cpudesc : %s\n", str
);
1060 static void print_nrcpus(struct perf_header
*ph
, int fd
, FILE *fp
)
1065 ret
= read(fd
, &nr
, sizeof(nr
));
1066 if (ret
!= (ssize_t
)sizeof(nr
))
1067 nr
= -1; /* interpreted as error */
1072 fprintf(fp
, "# nrcpus online : %u\n", nr
);
1074 ret
= read(fd
, &nr
, sizeof(nr
));
1075 if (ret
!= (ssize_t
)sizeof(nr
))
1076 nr
= -1; /* interpreted as error */
1081 fprintf(fp
, "# nrcpus avail : %u\n", nr
);
1084 static void print_version(struct perf_header
*ph
, int fd
, FILE *fp
)
1086 char *str
= do_read_string(fd
, ph
);
1087 fprintf(fp
, "# perf version : %s\n", str
);
1091 static void print_cmdline(struct perf_header
*ph
, int fd
, FILE *fp
)
1097 ret
= read(fd
, &nr
, sizeof(nr
));
1098 if (ret
!= (ssize_t
)sizeof(nr
))
1104 fprintf(fp
, "# cmdline : ");
1106 for (i
= 0; i
< nr
; i
++) {
1107 str
= do_read_string(fd
, ph
);
1108 fprintf(fp
, "%s ", str
);
1114 static void print_cpu_topology(struct perf_header
*ph
, int fd
, FILE *fp
)
1120 ret
= read(fd
, &nr
, sizeof(nr
));
1121 if (ret
!= (ssize_t
)sizeof(nr
))
1127 for (i
= 0; i
< nr
; i
++) {
1128 str
= do_read_string(fd
, ph
);
1129 fprintf(fp
, "# sibling cores : %s\n", str
);
1133 ret
= read(fd
, &nr
, sizeof(nr
));
1134 if (ret
!= (ssize_t
)sizeof(nr
))
1140 for (i
= 0; i
< nr
; i
++) {
1141 str
= do_read_string(fd
, ph
);
1142 fprintf(fp
, "# sibling threads : %s\n", str
);
1147 static void print_event_desc(struct perf_header
*ph
, int fd
, FILE *fp
)
1149 struct perf_event_attr attr
;
1153 u32 nre
, sz
, nr
, i
, j
;
1157 /* number of events */
1158 ret
= read(fd
, &nre
, sizeof(nre
));
1159 if (ret
!= (ssize_t
)sizeof(nre
))
1163 nre
= bswap_32(nre
);
1165 ret
= read(fd
, &sz
, sizeof(sz
));
1166 if (ret
!= (ssize_t
)sizeof(sz
))
1172 memset(&attr
, 0, sizeof(attr
));
1174 /* buffer to hold on file attr struct */
1183 for (i
= 0 ; i
< nre
; i
++) {
1186 * must read entire on-file attr struct to
1187 * sync up with layout.
1189 ret
= read(fd
, buf
, sz
);
1190 if (ret
!= (ssize_t
)sz
)
1194 perf_event__attr_swap(buf
);
1196 memcpy(&attr
, buf
, msz
);
1198 ret
= read(fd
, &nr
, sizeof(nr
));
1199 if (ret
!= (ssize_t
)sizeof(nr
))
1205 str
= do_read_string(fd
, ph
);
1206 fprintf(fp
, "# event : name = %s, ", str
);
1209 fprintf(fp
, "type = %d, config = 0x%"PRIx64
1210 ", config1 = 0x%"PRIx64
", config2 = 0x%"PRIx64
,
1216 fprintf(fp
, ", excl_usr = %d, excl_kern = %d",
1218 attr
.exclude_kernel
);
1221 fprintf(fp
, ", id = {");
1223 for (j
= 0 ; j
< nr
; j
++) {
1224 ret
= read(fd
, &id
, sizeof(id
));
1225 if (ret
!= (ssize_t
)sizeof(id
))
1234 fprintf(fp
, " %"PRIu64
, id
);
1243 fprintf(fp
, "# event desc: not available or unable to read\n");
1246 static void print_total_mem(struct perf_header
*h __used
, int fd
, FILE *fp
)
1251 ret
= read(fd
, &mem
, sizeof(mem
));
1252 if (ret
!= sizeof(mem
))
1256 mem
= bswap_64(mem
);
1258 fprintf(fp
, "# total memory : %"PRIu64
" kB\n", mem
);
1261 fprintf(fp
, "# total memory : unknown\n");
1264 static void print_numa_topology(struct perf_header
*h __used
, int fd
, FILE *fp
)
1269 uint64_t mem_total
, mem_free
;
1272 ret
= read(fd
, &nr
, sizeof(nr
));
1273 if (ret
!= (ssize_t
)sizeof(nr
))
1279 for (i
= 0; i
< nr
; i
++) {
1282 ret
= read(fd
, &c
, sizeof(c
));
1283 if (ret
!= (ssize_t
)sizeof(c
))
1289 ret
= read(fd
, &mem_total
, sizeof(u64
));
1290 if (ret
!= sizeof(u64
))
1293 ret
= read(fd
, &mem_free
, sizeof(u64
));
1294 if (ret
!= sizeof(u64
))
1297 if (h
->needs_swap
) {
1298 mem_total
= bswap_64(mem_total
);
1299 mem_free
= bswap_64(mem_free
);
1302 fprintf(fp
, "# node%u meminfo : total = %"PRIu64
" kB,"
1303 " free = %"PRIu64
" kB\n",
1308 str
= do_read_string(fd
, h
);
1309 fprintf(fp
, "# node%u cpu list : %s\n", c
, str
);
1314 fprintf(fp
, "# numa topology : not available\n");
1317 static void print_cpuid(struct perf_header
*ph
, int fd
, FILE *fp
)
1319 char *str
= do_read_string(fd
, ph
);
1320 fprintf(fp
, "# cpuid : %s\n", str
);
1324 static void print_branch_stack(struct perf_header
*ph __used
, int fd __used
,
1327 fprintf(fp
, "# contains samples with branch stack\n");
1330 static int __event_process_build_id(struct build_id_event
*bev
,
1332 struct perf_session
*session
)
1335 struct list_head
*head
;
1336 struct machine
*machine
;
1339 enum dso_kernel_type dso_type
;
1341 machine
= perf_session__findnew_machine(session
, bev
->pid
);
1345 misc
= bev
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
1348 case PERF_RECORD_MISC_KERNEL
:
1349 dso_type
= DSO_TYPE_KERNEL
;
1350 head
= &machine
->kernel_dsos
;
1352 case PERF_RECORD_MISC_GUEST_KERNEL
:
1353 dso_type
= DSO_TYPE_GUEST_KERNEL
;
1354 head
= &machine
->kernel_dsos
;
1356 case PERF_RECORD_MISC_USER
:
1357 case PERF_RECORD_MISC_GUEST_USER
:
1358 dso_type
= DSO_TYPE_USER
;
1359 head
= &machine
->user_dsos
;
1365 dso
= __dsos__findnew(head
, filename
);
1367 char sbuild_id
[BUILD_ID_SIZE
* 2 + 1];
1369 dso__set_build_id(dso
, &bev
->build_id
);
1371 if (filename
[0] == '[')
1372 dso
->kernel
= dso_type
;
1374 build_id__sprintf(dso
->build_id
, sizeof(dso
->build_id
),
1376 pr_debug("build id event received for %s: %s\n",
1377 dso
->long_name
, sbuild_id
);
1385 static int perf_header__read_build_ids_abi_quirk(struct perf_header
*header
,
1386 int input
, u64 offset
, u64 size
)
1388 struct perf_session
*session
= container_of(header
, struct perf_session
, header
);
1390 struct perf_event_header header
;
1391 u8 build_id
[ALIGN(BUILD_ID_SIZE
, sizeof(u64
))];
1394 struct build_id_event bev
;
1395 char filename
[PATH_MAX
];
1396 u64 limit
= offset
+ size
;
1398 while (offset
< limit
) {
1401 if (read(input
, &old_bev
, sizeof(old_bev
)) != sizeof(old_bev
))
1404 if (header
->needs_swap
)
1405 perf_event_header__bswap(&old_bev
.header
);
1407 len
= old_bev
.header
.size
- sizeof(old_bev
);
1408 if (read(input
, filename
, len
) != len
)
1411 bev
.header
= old_bev
.header
;
1414 * As the pid is the missing value, we need to fill
1415 * it properly. The header.misc value give us nice hint.
1417 bev
.pid
= HOST_KERNEL_ID
;
1418 if (bev
.header
.misc
== PERF_RECORD_MISC_GUEST_USER
||
1419 bev
.header
.misc
== PERF_RECORD_MISC_GUEST_KERNEL
)
1420 bev
.pid
= DEFAULT_GUEST_KERNEL_ID
;
1422 memcpy(bev
.build_id
, old_bev
.build_id
, sizeof(bev
.build_id
));
1423 __event_process_build_id(&bev
, filename
, session
);
1425 offset
+= bev
.header
.size
;
1431 static int perf_header__read_build_ids(struct perf_header
*header
,
1432 int input
, u64 offset
, u64 size
)
1434 struct perf_session
*session
= container_of(header
, struct perf_session
, header
);
1435 struct build_id_event bev
;
1436 char filename
[PATH_MAX
];
1437 u64 limit
= offset
+ size
, orig_offset
= offset
;
1440 while (offset
< limit
) {
1443 if (read(input
, &bev
, sizeof(bev
)) != sizeof(bev
))
1446 if (header
->needs_swap
)
1447 perf_event_header__bswap(&bev
.header
);
1449 len
= bev
.header
.size
- sizeof(bev
);
1450 if (read(input
, filename
, len
) != len
)
1453 * The a1645ce1 changeset:
1455 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1457 * Added a field to struct build_id_event that broke the file
1460 * Since the kernel build-id is the first entry, process the
1461 * table using the old format if the well known
1462 * '[kernel.kallsyms]' string for the kernel build-id has the
1463 * first 4 characters chopped off (where the pid_t sits).
1465 if (memcmp(filename
, "nel.kallsyms]", 13) == 0) {
1466 if (lseek(input
, orig_offset
, SEEK_SET
) == (off_t
)-1)
1468 return perf_header__read_build_ids_abi_quirk(header
, input
, offset
, size
);
1471 __event_process_build_id(&bev
, filename
, session
);
1473 offset
+= bev
.header
.size
;
1480 static int process_trace_info(struct perf_file_section
*section __unused
,
1481 struct perf_header
*ph __unused
,
1482 int feat __unused
, int fd
)
1484 trace_report(fd
, false);
1488 static int process_build_id(struct perf_file_section
*section
,
1489 struct perf_header
*ph
,
1490 int feat __unused
, int fd
)
1492 if (perf_header__read_build_ids(ph
, fd
, section
->offset
, section
->size
))
1493 pr_debug("Failed to read buildids, continuing...\n");
1497 struct feature_ops
{
1498 int (*write
)(int fd
, struct perf_header
*h
, struct perf_evlist
*evlist
);
1499 void (*print
)(struct perf_header
*h
, int fd
, FILE *fp
);
1500 int (*process
)(struct perf_file_section
*section
,
1501 struct perf_header
*h
, int feat
, int fd
);
1506 #define FEAT_OPA(n, func) \
1507 [n] = { .name = #n, .write = write_##func, .print = print_##func }
1508 #define FEAT_OPP(n, func) \
1509 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
1510 .process = process_##func }
1511 #define FEAT_OPF(n, func) \
1512 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
1515 /* feature_ops not implemented: */
1516 #define print_trace_info NULL
1517 #define print_build_id NULL
1519 static const struct feature_ops feat_ops
[HEADER_LAST_FEATURE
] = {
1520 FEAT_OPP(HEADER_TRACE_INFO
, trace_info
),
1521 FEAT_OPP(HEADER_BUILD_ID
, build_id
),
1522 FEAT_OPA(HEADER_HOSTNAME
, hostname
),
1523 FEAT_OPA(HEADER_OSRELEASE
, osrelease
),
1524 FEAT_OPA(HEADER_VERSION
, version
),
1525 FEAT_OPA(HEADER_ARCH
, arch
),
1526 FEAT_OPA(HEADER_NRCPUS
, nrcpus
),
1527 FEAT_OPA(HEADER_CPUDESC
, cpudesc
),
1528 FEAT_OPA(HEADER_CPUID
, cpuid
),
1529 FEAT_OPA(HEADER_TOTAL_MEM
, total_mem
),
1530 FEAT_OPA(HEADER_EVENT_DESC
, event_desc
),
1531 FEAT_OPA(HEADER_CMDLINE
, cmdline
),
1532 FEAT_OPF(HEADER_CPU_TOPOLOGY
, cpu_topology
),
1533 FEAT_OPF(HEADER_NUMA_TOPOLOGY
, numa_topology
),
1534 FEAT_OPA(HEADER_BRANCH_STACK
, branch_stack
),
1537 struct header_print_data
{
1539 bool full
; /* extended list of headers */
1542 static int perf_file_section__fprintf_info(struct perf_file_section
*section
,
1543 struct perf_header
*ph
,
1544 int feat
, int fd
, void *data
)
1546 struct header_print_data
*hd
= data
;
1548 if (lseek(fd
, section
->offset
, SEEK_SET
) == (off_t
)-1) {
1549 pr_debug("Failed to lseek to %" PRIu64
" offset for feature "
1550 "%d, continuing...\n", section
->offset
, feat
);
1553 if (feat
>= HEADER_LAST_FEATURE
) {
1554 pr_warning("unknown feature %d\n", feat
);
1557 if (!feat_ops
[feat
].print
)
1560 if (!feat_ops
[feat
].full_only
|| hd
->full
)
1561 feat_ops
[feat
].print(ph
, fd
, hd
->fp
);
1563 fprintf(hd
->fp
, "# %s info available, use -I to display\n",
1564 feat_ops
[feat
].name
);
1569 int perf_header__fprintf_info(struct perf_session
*session
, FILE *fp
, bool full
)
1571 struct header_print_data hd
;
1572 struct perf_header
*header
= &session
->header
;
1573 int fd
= session
->fd
;
1577 perf_header__process_sections(header
, fd
, &hd
,
1578 perf_file_section__fprintf_info
);
1582 static int do_write_feat(int fd
, struct perf_header
*h
, int type
,
1583 struct perf_file_section
**p
,
1584 struct perf_evlist
*evlist
)
1589 if (perf_header__has_feat(h
, type
)) {
1590 if (!feat_ops
[type
].write
)
1593 (*p
)->offset
= lseek(fd
, 0, SEEK_CUR
);
1595 err
= feat_ops
[type
].write(fd
, h
, evlist
);
1597 pr_debug("failed to write feature %d\n", type
);
1599 /* undo anything written */
1600 lseek(fd
, (*p
)->offset
, SEEK_SET
);
1604 (*p
)->size
= lseek(fd
, 0, SEEK_CUR
) - (*p
)->offset
;
1610 static int perf_header__adds_write(struct perf_header
*header
,
1611 struct perf_evlist
*evlist
, int fd
)
1614 struct perf_file_section
*feat_sec
, *p
;
1620 nr_sections
= bitmap_weight(header
->adds_features
, HEADER_FEAT_BITS
);
1624 feat_sec
= p
= calloc(sizeof(*feat_sec
), nr_sections
);
1625 if (feat_sec
== NULL
)
1628 sec_size
= sizeof(*feat_sec
) * nr_sections
;
1630 sec_start
= header
->data_offset
+ header
->data_size
;
1631 lseek(fd
, sec_start
+ sec_size
, SEEK_SET
);
1633 for_each_set_bit(feat
, header
->adds_features
, HEADER_FEAT_BITS
) {
1634 if (do_write_feat(fd
, header
, feat
, &p
, evlist
))
1635 perf_header__clear_feat(header
, feat
);
1638 lseek(fd
, sec_start
, SEEK_SET
);
1640 * may write more than needed due to dropped feature, but
1641 * this is okay, reader will skip the mising entries
1643 err
= do_write(fd
, feat_sec
, sec_size
);
1645 pr_debug("failed to write feature section\n");
1650 int perf_header__write_pipe(int fd
)
1652 struct perf_pipe_file_header f_header
;
1655 f_header
= (struct perf_pipe_file_header
){
1656 .magic
= PERF_MAGIC
,
1657 .size
= sizeof(f_header
),
1660 err
= do_write(fd
, &f_header
, sizeof(f_header
));
1662 pr_debug("failed to write perf pipe header\n");
1669 int perf_session__write_header(struct perf_session
*session
,
1670 struct perf_evlist
*evlist
,
1671 int fd
, bool at_exit
)
1673 struct perf_file_header f_header
;
1674 struct perf_file_attr f_attr
;
1675 struct perf_header
*header
= &session
->header
;
1676 struct perf_evsel
*attr
, *pair
= NULL
;
1679 lseek(fd
, sizeof(f_header
), SEEK_SET
);
1681 if (session
->evlist
!= evlist
)
1682 pair
= list_entry(session
->evlist
->entries
.next
, struct perf_evsel
, node
);
1684 list_for_each_entry(attr
, &evlist
->entries
, node
) {
1685 attr
->id_offset
= lseek(fd
, 0, SEEK_CUR
);
1686 err
= do_write(fd
, attr
->id
, attr
->ids
* sizeof(u64
));
1689 pr_debug("failed to write perf header\n");
1692 if (session
->evlist
!= evlist
) {
1693 err
= do_write(fd
, pair
->id
, pair
->ids
* sizeof(u64
));
1696 attr
->ids
+= pair
->ids
;
1697 pair
= list_entry(pair
->node
.next
, struct perf_evsel
, node
);
1701 header
->attr_offset
= lseek(fd
, 0, SEEK_CUR
);
1703 list_for_each_entry(attr
, &evlist
->entries
, node
) {
1704 f_attr
= (struct perf_file_attr
){
1707 .offset
= attr
->id_offset
,
1708 .size
= attr
->ids
* sizeof(u64
),
1711 err
= do_write(fd
, &f_attr
, sizeof(f_attr
));
1713 pr_debug("failed to write perf header attribute\n");
1718 header
->event_offset
= lseek(fd
, 0, SEEK_CUR
);
1719 header
->event_size
= event_count
* sizeof(struct perf_trace_event_type
);
1721 err
= do_write(fd
, events
, header
->event_size
);
1723 pr_debug("failed to write perf header events\n");
1728 header
->data_offset
= lseek(fd
, 0, SEEK_CUR
);
1731 err
= perf_header__adds_write(header
, evlist
, fd
);
1736 f_header
= (struct perf_file_header
){
1737 .magic
= PERF_MAGIC
,
1738 .size
= sizeof(f_header
),
1739 .attr_size
= sizeof(f_attr
),
1741 .offset
= header
->attr_offset
,
1742 .size
= evlist
->nr_entries
* sizeof(f_attr
),
1745 .offset
= header
->data_offset
,
1746 .size
= header
->data_size
,
1749 .offset
= header
->event_offset
,
1750 .size
= header
->event_size
,
1754 memcpy(&f_header
.adds_features
, &header
->adds_features
, sizeof(header
->adds_features
));
1756 lseek(fd
, 0, SEEK_SET
);
1757 err
= do_write(fd
, &f_header
, sizeof(f_header
));
1759 pr_debug("failed to write perf header\n");
1762 lseek(fd
, header
->data_offset
+ header
->data_size
, SEEK_SET
);
1768 static int perf_header__getbuffer64(struct perf_header
*header
,
1769 int fd
, void *buf
, size_t size
)
1771 if (readn(fd
, buf
, size
) <= 0)
1774 if (header
->needs_swap
)
1775 mem_bswap_64(buf
, size
);
1780 int perf_header__process_sections(struct perf_header
*header
, int fd
,
1782 int (*process
)(struct perf_file_section
*section
,
1783 struct perf_header
*ph
,
1784 int feat
, int fd
, void *data
))
1786 struct perf_file_section
*feat_sec
, *sec
;
1792 nr_sections
= bitmap_weight(header
->adds_features
, HEADER_FEAT_BITS
);
1796 feat_sec
= sec
= calloc(sizeof(*feat_sec
), nr_sections
);
1800 sec_size
= sizeof(*feat_sec
) * nr_sections
;
1802 lseek(fd
, header
->data_offset
+ header
->data_size
, SEEK_SET
);
1804 err
= perf_header__getbuffer64(header
, fd
, feat_sec
, sec_size
);
1808 for_each_set_bit(feat
, header
->adds_features
, HEADER_LAST_FEATURE
) {
1809 err
= process(sec
++, header
, feat
, fd
, data
);
1819 static const int attr_file_abi_sizes
[] = {
1820 [0] = PERF_ATTR_SIZE_VER0
,
1821 [1] = PERF_ATTR_SIZE_VER1
,
1826 * In the legacy file format, the magic number is not used to encode endianness.
1827 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
1828 * on ABI revisions, we need to try all combinations for all endianness to
1829 * detect the endianness.
1831 static int try_all_file_abis(uint64_t hdr_sz
, struct perf_header
*ph
)
1833 uint64_t ref_size
, attr_size
;
1836 for (i
= 0 ; attr_file_abi_sizes
[i
]; i
++) {
1837 ref_size
= attr_file_abi_sizes
[i
]
1838 + sizeof(struct perf_file_section
);
1839 if (hdr_sz
!= ref_size
) {
1840 attr_size
= bswap_64(hdr_sz
);
1841 if (attr_size
!= ref_size
)
1844 ph
->needs_swap
= true;
1846 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
1851 /* could not determine endianness */
1855 #define PERF_PIPE_HDR_VER0 16
1857 static const size_t attr_pipe_abi_sizes
[] = {
1858 [0] = PERF_PIPE_HDR_VER0
,
1863 * In the legacy pipe format, there is an implicit assumption that endiannesss
1864 * between host recording the samples, and host parsing the samples is the
1865 * same. This is not always the case given that the pipe output may always be
1866 * redirected into a file and analyzed on a different machine with possibly a
1867 * different endianness and perf_event ABI revsions in the perf tool itself.
1869 static int try_all_pipe_abis(uint64_t hdr_sz
, struct perf_header
*ph
)
1874 for (i
= 0 ; attr_pipe_abi_sizes
[i
]; i
++) {
1875 if (hdr_sz
!= attr_pipe_abi_sizes
[i
]) {
1876 attr_size
= bswap_64(hdr_sz
);
1877 if (attr_size
!= hdr_sz
)
1880 ph
->needs_swap
= true;
1882 pr_debug("Pipe ABI%d perf.data file detected\n", i
);
1888 static int check_magic_endian(u64 magic
, uint64_t hdr_sz
,
1889 bool is_pipe
, struct perf_header
*ph
)
1893 /* check for legacy format */
1894 ret
= memcmp(&magic
, __perf_magic1
, sizeof(magic
));
1896 pr_debug("legacy perf.data format\n");
1898 return try_all_pipe_abis(hdr_sz
, ph
);
1900 return try_all_file_abis(hdr_sz
, ph
);
1903 * the new magic number serves two purposes:
1904 * - unique number to identify actual perf.data files
1905 * - encode endianness of file
1908 /* check magic number with one endianness */
1909 if (magic
== __perf_magic2
)
1912 /* check magic number with opposite endianness */
1913 if (magic
!= __perf_magic2_sw
)
1916 ph
->needs_swap
= true;
1921 int perf_file_header__read(struct perf_file_header
*header
,
1922 struct perf_header
*ph
, int fd
)
1926 lseek(fd
, 0, SEEK_SET
);
1928 ret
= readn(fd
, header
, sizeof(*header
));
1932 if (check_magic_endian(header
->magic
,
1933 header
->attr_size
, false, ph
) < 0) {
1934 pr_debug("magic/endian check failed\n");
1938 if (ph
->needs_swap
) {
1939 mem_bswap_64(header
, offsetof(struct perf_file_header
,
1943 if (header
->size
!= sizeof(*header
)) {
1944 /* Support the previous format */
1945 if (header
->size
== offsetof(typeof(*header
), adds_features
))
1946 bitmap_zero(header
->adds_features
, HEADER_FEAT_BITS
);
1949 } else if (ph
->needs_swap
) {
1952 * feature bitmap is declared as an array of unsigned longs --
1953 * not good since its size can differ between the host that
1954 * generated the data file and the host analyzing the file.
1956 * We need to handle endianness, but we don't know the size of
1957 * the unsigned long where the file was generated. Take a best
1958 * guess at determining it: try 64-bit swap first (ie., file
1959 * created on a 64-bit host), and check if the hostname feature
1960 * bit is set (this feature bit is forced on as of fbe96f2).
1961 * If the bit is not, undo the 64-bit swap and try a 32-bit
1962 * swap. If the hostname bit is still not set (e.g., older data
1963 * file), punt and fallback to the original behavior --
1964 * clearing all feature bits and setting buildid.
1966 for (i
= 0; i
< BITS_TO_LONGS(HEADER_FEAT_BITS
); ++i
)
1967 header
->adds_features
[i
] = bswap_64(header
->adds_features
[i
]);
1969 if (!test_bit(HEADER_HOSTNAME
, header
->adds_features
)) {
1970 for (i
= 0; i
< BITS_TO_LONGS(HEADER_FEAT_BITS
); ++i
) {
1971 header
->adds_features
[i
] = bswap_64(header
->adds_features
[i
]);
1972 header
->adds_features
[i
] = bswap_32(header
->adds_features
[i
]);
1976 if (!test_bit(HEADER_HOSTNAME
, header
->adds_features
)) {
1977 bitmap_zero(header
->adds_features
, HEADER_FEAT_BITS
);
1978 set_bit(HEADER_BUILD_ID
, header
->adds_features
);
1982 memcpy(&ph
->adds_features
, &header
->adds_features
,
1983 sizeof(ph
->adds_features
));
1985 ph
->event_offset
= header
->event_types
.offset
;
1986 ph
->event_size
= header
->event_types
.size
;
1987 ph
->data_offset
= header
->data
.offset
;
1988 ph
->data_size
= header
->data
.size
;
1992 static int perf_file_section__process(struct perf_file_section
*section
,
1993 struct perf_header
*ph
,
1994 int feat
, int fd
, void *data __used
)
1996 if (lseek(fd
, section
->offset
, SEEK_SET
) == (off_t
)-1) {
1997 pr_debug("Failed to lseek to %" PRIu64
" offset for feature "
1998 "%d, continuing...\n", section
->offset
, feat
);
2002 if (feat
>= HEADER_LAST_FEATURE
) {
2003 pr_debug("unknown feature %d, continuing...\n", feat
);
2007 if (!feat_ops
[feat
].process
)
2010 return feat_ops
[feat
].process(section
, ph
, feat
, fd
);
2013 static int perf_file_header__read_pipe(struct perf_pipe_file_header
*header
,
2014 struct perf_header
*ph
, int fd
,
2019 ret
= readn(fd
, header
, sizeof(*header
));
2023 if (check_magic_endian(header
->magic
, header
->size
, true, ph
) < 0) {
2024 pr_debug("endian/magic failed\n");
2029 header
->size
= bswap_64(header
->size
);
2031 if (repipe
&& do_write(STDOUT_FILENO
, header
, sizeof(*header
)) < 0)
2037 static int perf_header__read_pipe(struct perf_session
*session
, int fd
)
2039 struct perf_header
*header
= &session
->header
;
2040 struct perf_pipe_file_header f_header
;
2042 if (perf_file_header__read_pipe(&f_header
, header
, fd
,
2043 session
->repipe
) < 0) {
2044 pr_debug("incompatible file format\n");
2053 static int read_attr(int fd
, struct perf_header
*ph
,
2054 struct perf_file_attr
*f_attr
)
2056 struct perf_event_attr
*attr
= &f_attr
->attr
;
2058 size_t our_sz
= sizeof(f_attr
->attr
);
2061 memset(f_attr
, 0, sizeof(*f_attr
));
2063 /* read minimal guaranteed structure */
2064 ret
= readn(fd
, attr
, PERF_ATTR_SIZE_VER0
);
2066 pr_debug("cannot read %d bytes of header attr\n",
2067 PERF_ATTR_SIZE_VER0
);
2071 /* on file perf_event_attr size */
2079 sz
= PERF_ATTR_SIZE_VER0
;
2080 } else if (sz
> our_sz
) {
2081 pr_debug("file uses a more recent and unsupported ABI"
2082 " (%zu bytes extra)\n", sz
- our_sz
);
2085 /* what we have not yet read and that we know about */
2086 left
= sz
- PERF_ATTR_SIZE_VER0
;
2089 ptr
+= PERF_ATTR_SIZE_VER0
;
2091 ret
= readn(fd
, ptr
, left
);
2093 /* read perf_file_section, ids are read in caller */
2094 ret
= readn(fd
, &f_attr
->ids
, sizeof(f_attr
->ids
));
2096 return ret
<= 0 ? -1 : 0;
2099 int perf_session__read_header(struct perf_session
*session
, int fd
)
2101 struct perf_header
*header
= &session
->header
;
2102 struct perf_file_header f_header
;
2103 struct perf_file_attr f_attr
;
2105 int nr_attrs
, nr_ids
, i
, j
;
2107 session
->evlist
= perf_evlist__new(NULL
, NULL
);
2108 if (session
->evlist
== NULL
)
2111 if (session
->fd_pipe
)
2112 return perf_header__read_pipe(session
, fd
);
2114 if (perf_file_header__read(&f_header
, header
, fd
) < 0)
2117 nr_attrs
= f_header
.attrs
.size
/ f_header
.attr_size
;
2118 lseek(fd
, f_header
.attrs
.offset
, SEEK_SET
);
2120 for (i
= 0; i
< nr_attrs
; i
++) {
2121 struct perf_evsel
*evsel
;
2124 if (read_attr(fd
, header
, &f_attr
) < 0)
2127 if (header
->needs_swap
)
2128 perf_event__attr_swap(&f_attr
.attr
);
2130 tmp
= lseek(fd
, 0, SEEK_CUR
);
2131 evsel
= perf_evsel__new(&f_attr
.attr
, i
);
2134 goto out_delete_evlist
;
2136 * Do it before so that if perf_evsel__alloc_id fails, this
2137 * entry gets purged too at perf_evlist__delete().
2139 perf_evlist__add(session
->evlist
, evsel
);
2141 nr_ids
= f_attr
.ids
.size
/ sizeof(u64
);
2143 * We don't have the cpu and thread maps on the header, so
2144 * for allocating the perf_sample_id table we fake 1 cpu and
2145 * hattr->ids threads.
2147 if (perf_evsel__alloc_id(evsel
, 1, nr_ids
))
2148 goto out_delete_evlist
;
2150 lseek(fd
, f_attr
.ids
.offset
, SEEK_SET
);
2152 for (j
= 0; j
< nr_ids
; j
++) {
2153 if (perf_header__getbuffer64(header
, fd
, &f_id
, sizeof(f_id
)))
2156 perf_evlist__id_add(session
->evlist
, evsel
, 0, j
, f_id
);
2159 lseek(fd
, tmp
, SEEK_SET
);
2162 symbol_conf
.nr_events
= nr_attrs
;
2164 if (f_header
.event_types
.size
) {
2165 lseek(fd
, f_header
.event_types
.offset
, SEEK_SET
);
2166 events
= malloc(f_header
.event_types
.size
);
2169 if (perf_header__getbuffer64(header
, fd
, events
,
2170 f_header
.event_types
.size
))
2172 event_count
= f_header
.event_types
.size
/ sizeof(struct perf_trace_event_type
);
2175 perf_header__process_sections(header
, fd
, NULL
,
2176 perf_file_section__process
);
2178 lseek(fd
, header
->data_offset
, SEEK_SET
);
2186 perf_evlist__delete(session
->evlist
);
2187 session
->evlist
= NULL
;
2191 int perf_event__synthesize_attr(struct perf_tool
*tool
,
2192 struct perf_event_attr
*attr
, u16 ids
, u64
*id
,
2193 perf_event__handler_t process
)
2195 union perf_event
*ev
;
2199 size
= sizeof(struct perf_event_attr
);
2200 size
= ALIGN(size
, sizeof(u64
));
2201 size
+= sizeof(struct perf_event_header
);
2202 size
+= ids
* sizeof(u64
);
2209 ev
->attr
.attr
= *attr
;
2210 memcpy(ev
->attr
.id
, id
, ids
* sizeof(u64
));
2212 ev
->attr
.header
.type
= PERF_RECORD_HEADER_ATTR
;
2213 ev
->attr
.header
.size
= size
;
2215 err
= process(tool
, ev
, NULL
, NULL
);
2222 int perf_event__synthesize_attrs(struct perf_tool
*tool
,
2223 struct perf_session
*session
,
2224 perf_event__handler_t process
)
2226 struct perf_evsel
*attr
;
2229 list_for_each_entry(attr
, &session
->evlist
->entries
, node
) {
2230 err
= perf_event__synthesize_attr(tool
, &attr
->attr
, attr
->ids
,
2233 pr_debug("failed to create perf header attribute\n");
2241 int perf_event__process_attr(union perf_event
*event
,
2242 struct perf_evlist
**pevlist
)
2244 unsigned int i
, ids
, n_ids
;
2245 struct perf_evsel
*evsel
;
2246 struct perf_evlist
*evlist
= *pevlist
;
2248 if (evlist
== NULL
) {
2249 *pevlist
= evlist
= perf_evlist__new(NULL
, NULL
);
2254 evsel
= perf_evsel__new(&event
->attr
.attr
, evlist
->nr_entries
);
2258 perf_evlist__add(evlist
, evsel
);
2260 ids
= event
->header
.size
;
2261 ids
-= (void *)&event
->attr
.id
- (void *)event
;
2262 n_ids
= ids
/ sizeof(u64
);
2264 * We don't have the cpu and thread maps on the header, so
2265 * for allocating the perf_sample_id table we fake 1 cpu and
2266 * hattr->ids threads.
2268 if (perf_evsel__alloc_id(evsel
, 1, n_ids
))
2271 for (i
= 0; i
< n_ids
; i
++) {
2272 perf_evlist__id_add(evlist
, evsel
, 0, i
, event
->attr
.id
[i
]);
2278 int perf_event__synthesize_event_type(struct perf_tool
*tool
,
2279 u64 event_id
, char *name
,
2280 perf_event__handler_t process
,
2281 struct machine
*machine
)
2283 union perf_event ev
;
2287 memset(&ev
, 0, sizeof(ev
));
2289 ev
.event_type
.event_type
.event_id
= event_id
;
2290 memset(ev
.event_type
.event_type
.name
, 0, MAX_EVENT_NAME
);
2291 strncpy(ev
.event_type
.event_type
.name
, name
, MAX_EVENT_NAME
- 1);
2293 ev
.event_type
.header
.type
= PERF_RECORD_HEADER_EVENT_TYPE
;
2294 size
= strlen(ev
.event_type
.event_type
.name
);
2295 size
= ALIGN(size
, sizeof(u64
));
2296 ev
.event_type
.header
.size
= sizeof(ev
.event_type
) -
2297 (sizeof(ev
.event_type
.event_type
.name
) - size
);
2299 err
= process(tool
, &ev
, NULL
, machine
);
2304 int perf_event__synthesize_event_types(struct perf_tool
*tool
,
2305 perf_event__handler_t process
,
2306 struct machine
*machine
)
2308 struct perf_trace_event_type
*type
;
2311 for (i
= 0; i
< event_count
; i
++) {
2314 err
= perf_event__synthesize_event_type(tool
, type
->event_id
,
2315 type
->name
, process
,
2318 pr_debug("failed to create perf header event type\n");
2326 int perf_event__process_event_type(struct perf_tool
*tool __unused
,
2327 union perf_event
*event
)
2329 if (perf_header__push_event(event
->event_type
.event_type
.event_id
,
2330 event
->event_type
.event_type
.name
) < 0)
2336 int perf_event__synthesize_tracing_data(struct perf_tool
*tool
, int fd
,
2337 struct perf_evlist
*evlist
,
2338 perf_event__handler_t process
)
2340 union perf_event ev
;
2341 struct tracing_data
*tdata
;
2342 ssize_t size
= 0, aligned_size
= 0, padding
;
2346 * We are going to store the size of the data followed
2347 * by the data contents. Since the fd descriptor is a pipe,
2348 * we cannot seek back to store the size of the data once
2349 * we know it. Instead we:
2351 * - write the tracing data to the temp file
2352 * - get/write the data size to pipe
2353 * - write the tracing data from the temp file
2356 tdata
= tracing_data_get(&evlist
->entries
, fd
, true);
2360 memset(&ev
, 0, sizeof(ev
));
2362 ev
.tracing_data
.header
.type
= PERF_RECORD_HEADER_TRACING_DATA
;
2364 aligned_size
= ALIGN(size
, sizeof(u64
));
2365 padding
= aligned_size
- size
;
2366 ev
.tracing_data
.header
.size
= sizeof(ev
.tracing_data
);
2367 ev
.tracing_data
.size
= aligned_size
;
2369 process(tool
, &ev
, NULL
, NULL
);
2372 * The put function will copy all the tracing data
2373 * stored in temp file to the pipe.
2375 tracing_data_put(tdata
);
2377 write_padded(fd
, NULL
, 0, padding
);
2379 return aligned_size
;
2382 int perf_event__process_tracing_data(union perf_event
*event
,
2383 struct perf_session
*session
)
2385 ssize_t size_read
, padding
, size
= event
->tracing_data
.size
;
2386 off_t offset
= lseek(session
->fd
, 0, SEEK_CUR
);
2389 /* setup for reading amidst mmap */
2390 lseek(session
->fd
, offset
+ sizeof(struct tracing_data_event
),
2393 size_read
= trace_report(session
->fd
, session
->repipe
);
2395 padding
= ALIGN(size_read
, sizeof(u64
)) - size_read
;
2397 if (read(session
->fd
, buf
, padding
) < 0)
2398 die("reading input file");
2399 if (session
->repipe
) {
2400 int retw
= write(STDOUT_FILENO
, buf
, padding
);
2401 if (retw
<= 0 || retw
!= padding
)
2402 die("repiping tracing data padding");
2405 if (size_read
+ padding
!= size
)
2406 die("tracing data size mismatch");
2408 return size_read
+ padding
;
2411 int perf_event__synthesize_build_id(struct perf_tool
*tool
,
2412 struct dso
*pos
, u16 misc
,
2413 perf_event__handler_t process
,
2414 struct machine
*machine
)
2416 union perf_event ev
;
2423 memset(&ev
, 0, sizeof(ev
));
2425 len
= pos
->long_name_len
+ 1;
2426 len
= ALIGN(len
, NAME_ALIGN
);
2427 memcpy(&ev
.build_id
.build_id
, pos
->build_id
, sizeof(pos
->build_id
));
2428 ev
.build_id
.header
.type
= PERF_RECORD_HEADER_BUILD_ID
;
2429 ev
.build_id
.header
.misc
= misc
;
2430 ev
.build_id
.pid
= machine
->pid
;
2431 ev
.build_id
.header
.size
= sizeof(ev
.build_id
) + len
;
2432 memcpy(&ev
.build_id
.filename
, pos
->long_name
, pos
->long_name_len
);
2434 err
= process(tool
, &ev
, NULL
, machine
);
2439 int perf_event__process_build_id(struct perf_tool
*tool __used
,
2440 union perf_event
*event
,
2441 struct perf_session
*session
)
2443 __event_process_build_id(&event
->build_id
,
2444 event
->build_id
.filename
,
2449 void disable_buildid_cache(void)
2451 no_buildid_cache
= true;