1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2023 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "gdbsupport/common-defs.h"
23 #include "linux-btrace.h"
24 #include "gdbsupport/common-regcache.h"
25 #include "gdbsupport/gdb_wait.h"
26 #include "x86-cpuid.h"
27 #include "gdbsupport/filestuff.h"
28 #include "gdbsupport/scoped_fd.h"
29 #include "gdbsupport/scoped_mmap.h"
33 #include <sys/syscall.h>
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
43 /* A branch trace record in perf_event. */
46 /* The linear address of the branch source. */
49 /* The linear address of the branch destination. */
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
56 /* The perf_event sample header. */
57 struct perf_event_header header
;
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts
;
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
67 struct btrace_cpu cpu
;
68 unsigned int eax
, ebx
, ecx
, edx
;
71 memset (&cpu
, 0, sizeof (cpu
));
73 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
76 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
77 && edx
== signature_INTEL_edx
)
79 unsigned int cpuid
, ignore
;
81 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
84 cpu
.vendor
= CV_INTEL
;
86 cpu
.family
= (cpuid
>> 8) & 0xf;
87 if (cpu
.family
== 0xf)
88 cpu
.family
+= (cpuid
>> 20) & 0xff;
90 cpu
.model
= (cpuid
>> 4) & 0xf;
91 if ((cpu
.family
== 0x6) || ((cpu
.family
& 0xf) == 0xf))
92 cpu
.model
+= (cpuid
>> 12) & 0xf0;
95 else if (ebx
== signature_AMD_ebx
&& ecx
== signature_AMD_ecx
96 && edx
== signature_AMD_edx
)
103 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
106 perf_event_new_data (const struct perf_event_buffer
*pev
)
108 return *pev
->data_head
!= pev
->last_head
;
111 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
112 to the memory holding the copy.
113 The caller is responsible for freeing the memory. */
116 perf_event_read (const struct perf_event_buffer
*pev
, __u64 data_head
,
119 const gdb_byte
*begin
, *end
, *start
, *stop
;
127 /* We should never ask for more data than the buffer can hold. */
128 buffer_size
= pev
->size
;
129 gdb_assert (size
<= buffer_size
);
131 /* If we ask for more data than we seem to have, we wrap around and read
132 data from the end of the buffer. This is already handled by the %
133 BUFFER_SIZE operation, below. Here, we just need to make sure that we
136 Note that this is perfectly OK for perf event buffers where data_head
137 doesn'grow indefinitely and instead wraps around to remain within the
138 buffer's boundaries. */
139 if (data_head
< size
)
140 data_head
+= buffer_size
;
142 gdb_assert (size
<= data_head
);
143 data_tail
= data_head
- size
;
146 start
= begin
+ data_tail
% buffer_size
;
147 stop
= begin
+ data_head
% buffer_size
;
149 buffer
= (gdb_byte
*) xmalloc (size
);
152 memcpy (buffer
, start
, stop
- start
);
155 end
= begin
+ buffer_size
;
157 memcpy (buffer
, start
, end
- start
);
158 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
164 /* Copy the perf event buffer data from PEV.
165 Store a pointer to the copy into DATA and its size in SIZE. */
168 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
174 data_head
= *pev
->data_head
;
177 *data
= perf_event_read (pev
, data_head
, size
);
180 pev
->last_head
= data_head
;
183 /* Try to determine the start address of the Linux kernel. */
186 linux_determine_kernel_start (void)
188 static uint64_t kernel_start
;
196 gdb_file_up file
= gdb_fopen_cloexec ("/proc/kallsyms", "r");
200 while (!feof (file
.get ()))
202 char buffer
[1024], symbol
[8], *line
;
206 line
= fgets (buffer
, sizeof (buffer
), file
.get ());
210 match
= sscanf (line
, "%" SCNx64
" %*[tT] %7s", &addr
, symbol
);
214 if (strcmp (symbol
, "_text") == 0)
224 /* Check whether an address is in the kernel. */
227 perf_event_is_kernel_addr (uint64_t addr
)
229 uint64_t kernel_start
;
231 kernel_start
= linux_determine_kernel_start ();
232 if (kernel_start
!= 0ull)
233 return (addr
>= kernel_start
);
235 /* If we don't know the kernel's start address, let's check the most
236 significant bit. This will work at least for 64-bit kernels. */
237 return ((addr
& (1ull << 63)) != 0);
240 /* Check whether a perf event record should be skipped. */
243 perf_event_skip_bts_record (const struct perf_event_bts
*bts
)
245 /* The hardware may report branches from kernel into user space. Branches
246 from user into kernel space will be suppressed. We filter the former to
247 provide a consistent branch trace excluding kernel. */
248 return perf_event_is_kernel_addr (bts
->from
);
251 /* Perform a few consistency checks on a perf event sample record. This is
252 meant to catch cases when we get out of sync with the perf event stream. */
255 perf_event_sample_ok (const struct perf_event_sample
*sample
)
257 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
260 if (sample
->header
.size
!= sizeof (*sample
))
266 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
267 and to addresses (plus a header).
269 Start points into that buffer at the next sample position.
270 We read the collected samples backwards from start.
272 While reading the samples, we convert the information into a list of blocks.
273 For two adjacent samples s1 and s2, we form a block b such that b.begin =
274 s1.to and b.end = s2.from.
276 In case the buffer overflows during sampling, one sample may have its lower
277 part at the end and its upper part at the beginning of the buffer. */
279 static std::vector
<btrace_block
> *
280 perf_event_read_bts (btrace_target_info
*tinfo
, const uint8_t *begin
,
281 const uint8_t *end
, const uint8_t *start
, size_t size
)
283 std::vector
<btrace_block
> *btrace
= new std::vector
<btrace_block
>;
284 struct perf_event_sample sample
;
286 struct btrace_block block
= { 0, 0 };
287 struct regcache
*regcache
;
289 gdb_assert (begin
<= start
);
290 gdb_assert (start
<= end
);
292 /* The first block ends at the current pc. */
293 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
294 block
.end
= regcache_read_pc (regcache
);
296 /* The buffer may contain a partial record as its last entry (i.e. when the
297 buffer size is not a multiple of the sample size). */
298 read
= sizeof (sample
) - 1;
300 for (; read
< size
; read
+= sizeof (sample
))
302 const struct perf_event_sample
*psample
;
304 /* Find the next perf_event sample in a backwards traversal. */
305 start
-= sizeof (sample
);
307 /* If we're still inside the buffer, we're done. */
309 psample
= (const struct perf_event_sample
*) start
;
314 /* We're to the left of the ring buffer, we will wrap around and
315 reappear at the very right of the ring buffer. */
317 missing
= (begin
- start
);
318 start
= (end
- missing
);
320 /* If the entire sample is missing, we're done. */
321 if (missing
== sizeof (sample
))
322 psample
= (const struct perf_event_sample
*) start
;
327 /* The sample wrapped around. The lower part is at the end and
328 the upper part is at the beginning of the buffer. */
329 stack
= (uint8_t *) &sample
;
331 /* Copy the two parts so we have a contiguous sample. */
332 memcpy (stack
, start
, missing
);
333 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
339 if (!perf_event_sample_ok (psample
))
341 warning (_("Branch trace may be incomplete."));
345 if (perf_event_skip_bts_record (&psample
->bts
))
348 /* We found a valid sample, so we can complete the current block. */
349 block
.begin
= psample
->bts
.to
;
351 btrace
->push_back (block
);
353 /* Start the next block. */
354 block
.end
= psample
->bts
.from
;
357 /* Push the last block (i.e. the first one of inferior execution), as well.
358 We don't know where it ends, but we know where it starts. If we're
359 reading delta trace, we can fill in the start address later on.
360 Otherwise we will prune it. */
362 btrace
->push_back (block
);
367 /* Check whether an Intel cpu supports BTS. */
370 intel_supports_bts (const struct btrace_cpu
*cpu
)
377 case 0x1a: /* Nehalem */
381 case 0x25: /* Westmere */
384 case 0x2a: /* Sandy Bridge */
386 case 0x3a: /* Ivy Bridge */
388 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
389 "from" information afer an EIST transition, T-states, C1E, or
390 Adaptive Thermal Throttling. */
398 /* Check whether the cpu supports BTS. */
401 cpu_supports_bts (void)
403 struct btrace_cpu cpu
;
405 cpu
= btrace_this_cpu ();
409 /* Don't know about others. Let's assume they do. */
413 return intel_supports_bts (&cpu
);
420 /* The perf_event_open syscall failed. Try to print a helpful error
424 diagnose_perf_event_open_fail ()
431 static const char filename
[] = "/proc/sys/kernel/perf_event_paranoid";
433 gdb_file_up file
= gdb_fopen_cloexec (filename
, "r");
434 if (file
.get () == nullptr)
435 error (_("Failed to open %s (%s). Your system does not support "
436 "process recording."), filename
, safe_strerror (errno
));
438 int level
, found
= fscanf (file
.get (), "%d", &level
);
439 if (found
== 1 && level
> 2)
440 error (_("You do not have permission to record the process. "
441 "Try setting %s to 2 or less."), filename
);
447 error (_("Failed to start recording: %s"), safe_strerror (errno
));
450 /* Get the linux version of a btrace_target_info. */
452 static linux_btrace_target_info
*
453 get_linux_btrace_target_info (btrace_target_info
*gtinfo
)
455 return gdb::checked_static_cast
<linux_btrace_target_info
*> (gtinfo
);
458 /* Enable branch tracing in BTS format. */
460 static struct btrace_target_info
*
461 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
467 if (!cpu_supports_bts ())
468 error (_("BTS support has been disabled for the target cpu."));
470 std::unique_ptr
<linux_btrace_target_info
> tinfo
471 { std::make_unique
<linux_btrace_target_info
> (ptid
) };
473 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
475 tinfo
->attr
.size
= sizeof (tinfo
->attr
);
476 tinfo
->attr
.type
= PERF_TYPE_HARDWARE
;
477 tinfo
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
478 tinfo
->attr
.sample_period
= 1;
480 /* We sample from and to address. */
481 tinfo
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
483 tinfo
->attr
.exclude_kernel
= 1;
484 tinfo
->attr
.exclude_hv
= 1;
485 tinfo
->attr
.exclude_idle
= 1;
492 scoped_fd
fd (syscall (SYS_perf_event_open
, &tinfo
->attr
, pid
, -1, -1, 0));
494 diagnose_perf_event_open_fail ();
496 /* Convert the requested size in bytes to pages (rounding up). */
497 pages
= ((size_t) conf
->size
/ PAGE_SIZE
498 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
499 /* We need at least one page. */
503 /* The buffer size can be requested in powers of two pages. Adjust PAGES
504 to the next power of two. */
505 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
506 if ((pages
& ((size_t) 1 << pg
)) != 0)
507 pages
+= ((size_t) 1 << pg
);
509 /* We try to allocate the requested size.
510 If that fails, try to get as much as we can. */
512 for (; pages
> 0; pages
>>= 1)
517 data_size
= (__u64
) pages
* PAGE_SIZE
;
519 /* Don't ask for more than we can represent in the configuration. */
520 if ((__u64
) UINT_MAX
< data_size
)
523 size
= (size_t) data_size
;
524 length
= size
+ PAGE_SIZE
;
526 /* Check for overflows. */
527 if ((__u64
) length
!= data_size
+ PAGE_SIZE
)
531 /* The number of pages we request needs to be a power of two. */
532 data
.reset (nullptr, length
, PROT_READ
, MAP_SHARED
, fd
.get (), 0);
533 if (data
.get () != MAP_FAILED
)
538 error (_("Failed to map trace buffer: %s."), safe_strerror (errno
));
540 struct perf_event_mmap_page
*header
= (struct perf_event_mmap_page
*)
542 data_offset
= PAGE_SIZE
;
544 #if defined (PERF_ATTR_SIZE_VER5)
545 if (offsetof (struct perf_event_mmap_page
, data_size
) <= header
->size
)
549 data_offset
= header
->data_offset
;
550 data_size
= header
->data_size
;
552 size
= (unsigned int) data_size
;
554 /* Check for overflows. */
555 if ((__u64
) size
!= data_size
)
556 error (_("Failed to determine trace buffer size."));
558 #endif /* defined (PERF_ATTR_SIZE_VER5) */
560 tinfo
->pev
.size
= size
;
561 tinfo
->pev
.data_head
= &header
->data_head
;
562 tinfo
->pev
.mem
= (const uint8_t *) data
.release () + data_offset
;
563 tinfo
->pev
.last_head
= 0ull;
564 tinfo
->header
= header
;
565 tinfo
->file
= fd
.release ();
567 tinfo
->conf
.bts
.size
= (unsigned int) size
;
568 return tinfo
.release ();
571 #if defined (PERF_ATTR_SIZE_VER5)
573 /* Determine the event type. */
576 perf_event_pt_event_type ()
578 static const char filename
[] = "/sys/bus/event_source/devices/intel_pt/type";
581 gdb_file_up file
= gdb_fopen_cloexec (filename
, "r");
582 if (file
.get () == nullptr)
588 error (_("Failed to open %s (%s). You do not have permission "
589 "to use Intel PT."), filename
, safe_strerror (errno
));
593 error (_("Failed to open %s (%s). Your system does not support "
594 "Intel PT."), filename
, safe_strerror (errno
));
597 error (_("Failed to open %s: %s."), filename
, safe_strerror (errno
));
600 int type
, found
= fscanf (file
.get (), "%d", &type
);
602 error (_("Failed to read the PT event type from %s."), filename
);
607 /* Enable branch tracing in Intel Processor Trace format. */
609 static struct btrace_target_info
*
610 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
619 std::unique_ptr
<linux_btrace_target_info
> tinfo
620 { std::make_unique
<linux_btrace_target_info
> (ptid
) };
622 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
624 tinfo
->attr
.size
= sizeof (tinfo
->attr
);
625 tinfo
->attr
.type
= perf_event_pt_event_type ();
627 tinfo
->attr
.exclude_kernel
= 1;
628 tinfo
->attr
.exclude_hv
= 1;
629 tinfo
->attr
.exclude_idle
= 1;
632 scoped_fd
fd (syscall (SYS_perf_event_open
, &tinfo
->attr
, pid
, -1, -1, 0));
634 diagnose_perf_event_open_fail ();
636 /* Allocate the configuration page. */
637 scoped_mmap
data (nullptr, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
639 if (data
.get () == MAP_FAILED
)
640 error (_("Failed to map trace user page: %s."), safe_strerror (errno
));
642 struct perf_event_mmap_page
*header
= (struct perf_event_mmap_page
*)
645 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
647 /* Convert the requested size in bytes to pages (rounding up). */
648 pages
= ((size_t) conf
->size
/ PAGE_SIZE
649 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
650 /* We need at least one page. */
654 /* The buffer size can be requested in powers of two pages. Adjust PAGES
655 to the next power of two. */
656 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
657 if ((pages
& ((size_t) 1 << pg
)) != 0)
658 pages
+= ((size_t) 1 << pg
);
660 /* We try to allocate the requested size.
661 If that fails, try to get as much as we can. */
663 for (; pages
> 0; pages
>>= 1)
668 data_size
= (__u64
) pages
* PAGE_SIZE
;
670 /* Don't ask for more than we can represent in the configuration. */
671 if ((__u64
) UINT_MAX
< data_size
)
674 length
= (size_t) data_size
;
676 /* Check for overflows. */
677 if ((__u64
) length
!= data_size
)
680 header
->aux_size
= data_size
;
683 aux
.reset (nullptr, length
, PROT_READ
, MAP_SHARED
, fd
.get (),
685 if (aux
.get () != MAP_FAILED
)
690 error (_("Failed to map trace buffer: %s."), safe_strerror (errno
));
692 tinfo
->pev
.size
= aux
.size ();
693 tinfo
->pev
.mem
= (const uint8_t *) aux
.release ();
694 tinfo
->pev
.data_head
= &header
->aux_head
;
695 tinfo
->header
= (struct perf_event_mmap_page
*) data
.release ();
696 gdb_assert (tinfo
->header
== header
);
697 tinfo
->file
= fd
.release ();
699 tinfo
->conf
.pt
.size
= (unsigned int) tinfo
->pev
.size
;
700 return tinfo
.release ();
703 #else /* !defined (PERF_ATTR_SIZE_VER5) */
705 static struct btrace_target_info
*
706 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
708 error (_("Intel Processor Trace support was disabled at compile time."));
711 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
713 /* See linux-btrace.h. */
715 struct btrace_target_info
*
716 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
718 switch (conf
->format
)
720 case BTRACE_FORMAT_NONE
:
721 error (_("Bad branch trace format."));
724 error (_("Unknown branch trace format."));
726 case BTRACE_FORMAT_BTS
:
727 return linux_enable_bts (ptid
, &conf
->bts
);
729 case BTRACE_FORMAT_PT
:
730 return linux_enable_pt (ptid
, &conf
->pt
);
734 /* Disable BTS tracing. */
737 linux_disable_bts (struct linux_btrace_target_info
*tinfo
)
739 munmap ((void *) tinfo
->header
, tinfo
->pev
.size
+ PAGE_SIZE
);
743 /* Disable Intel Processor Trace tracing. */
746 linux_disable_pt (struct linux_btrace_target_info
*tinfo
)
748 munmap ((void *) tinfo
->pev
.mem
, tinfo
->pev
.size
);
749 munmap ((void *) tinfo
->header
, PAGE_SIZE
);
753 /* See linux-btrace.h. */
756 linux_disable_btrace (struct btrace_target_info
*gtinfo
)
758 linux_btrace_target_info
*tinfo
759 = get_linux_btrace_target_info (gtinfo
);
761 switch (tinfo
->conf
.format
)
763 case BTRACE_FORMAT_NONE
:
764 return BTRACE_ERR_NOT_SUPPORTED
;
766 case BTRACE_FORMAT_BTS
:
767 linux_disable_bts (tinfo
);
769 return BTRACE_ERR_NONE
;
771 case BTRACE_FORMAT_PT
:
772 linux_disable_pt (tinfo
);
774 return BTRACE_ERR_NONE
;
777 return BTRACE_ERR_NOT_SUPPORTED
;
780 /* Read branch trace data in BTS format for the thread given by TINFO into
781 BTRACE using the TYPE reading method. */
783 static enum btrace_error
784 linux_read_bts (btrace_data_bts
*btrace
, linux_btrace_target_info
*tinfo
,
785 enum btrace_read_type type
)
787 const uint8_t *begin
, *end
, *start
;
788 size_t buffer_size
, size
;
789 __u64 data_head
= 0, data_tail
;
790 unsigned int retries
= 5;
792 /* For delta reads, we return at least the partial last block containing
794 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (&tinfo
->pev
))
795 return BTRACE_ERR_NONE
;
797 buffer_size
= tinfo
->pev
.size
;
798 data_tail
= tinfo
->pev
.last_head
;
800 /* We may need to retry reading the trace. See below. */
803 data_head
= *tinfo
->pev
.data_head
;
805 /* Delete any leftover trace from the previous iteration. */
806 delete btrace
->blocks
;
807 btrace
->blocks
= nullptr;
809 if (type
== BTRACE_READ_DELTA
)
813 /* Determine the number of bytes to read and check for buffer
816 /* Check for data head overflows. We might be able to recover from
817 those but they are very unlikely and it's not really worth the
819 if (data_head
< data_tail
)
820 return BTRACE_ERR_OVERFLOW
;
822 /* If the buffer is smaller than the trace delta, we overflowed. */
823 data_size
= data_head
- data_tail
;
824 if (buffer_size
< data_size
)
825 return BTRACE_ERR_OVERFLOW
;
827 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
828 size
= (size_t) data_size
;
832 /* Read the entire buffer. */
835 /* Adjust the size if the buffer has not overflowed, yet. */
836 if (data_head
< size
)
837 size
= (size_t) data_head
;
840 /* Data_head keeps growing; the buffer itself is circular. */
841 begin
= tinfo
->pev
.mem
;
842 start
= begin
+ data_head
% buffer_size
;
844 if (data_head
<= buffer_size
)
847 end
= begin
+ tinfo
->pev
.size
;
849 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
851 /* The stopping thread notifies its ptracer before it is scheduled out.
852 On multi-core systems, the debugger might therefore run while the
853 kernel might be writing the last branch trace records.
855 Let's check whether the data head moved while we read the trace. */
856 if (data_head
== *tinfo
->pev
.data_head
)
860 tinfo
->pev
.last_head
= data_head
;
862 /* Prune the incomplete last block (i.e. the first one of inferior execution)
863 if we're not doing a delta read. There is no way of filling in its zeroed
865 if (!btrace
->blocks
->empty () && type
!= BTRACE_READ_DELTA
)
866 btrace
->blocks
->pop_back ();
868 return BTRACE_ERR_NONE
;
871 /* Fill in the Intel Processor Trace configuration information. */
874 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
876 conf
->cpu
= btrace_this_cpu ();
879 /* Read branch trace data in Intel Processor Trace format for the thread
880 given by TINFO into BTRACE using the TYPE reading method. */
882 static enum btrace_error
883 linux_read_pt (btrace_data_pt
*btrace
, linux_btrace_target_info
*tinfo
,
884 enum btrace_read_type type
)
886 linux_fill_btrace_pt_config (&btrace
->config
);
890 case BTRACE_READ_DELTA
:
891 /* We don't support delta reads. The data head (i.e. aux_head) wraps
892 around to stay inside the aux buffer. */
893 return BTRACE_ERR_NOT_SUPPORTED
;
895 case BTRACE_READ_NEW
:
896 if (!perf_event_new_data (&tinfo
->pev
))
897 return BTRACE_ERR_NONE
;
899 case BTRACE_READ_ALL
:
900 perf_event_read_all (&tinfo
->pev
, &btrace
->data
, &btrace
->size
);
901 return BTRACE_ERR_NONE
;
904 internal_error (_("Unknown btrace read type."));
907 /* See linux-btrace.h. */
910 linux_read_btrace (struct btrace_data
*btrace
,
911 struct btrace_target_info
*gtinfo
,
912 enum btrace_read_type type
)
914 linux_btrace_target_info
*tinfo
915 = get_linux_btrace_target_info (gtinfo
);
917 switch (tinfo
->conf
.format
)
919 case BTRACE_FORMAT_NONE
:
920 return BTRACE_ERR_NOT_SUPPORTED
;
922 case BTRACE_FORMAT_BTS
:
923 /* We read btrace in BTS format. */
924 btrace
->format
= BTRACE_FORMAT_BTS
;
925 btrace
->variant
.bts
.blocks
= NULL
;
927 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
929 case BTRACE_FORMAT_PT
:
930 /* We read btrace in Intel Processor Trace format. */
931 btrace
->format
= BTRACE_FORMAT_PT
;
932 btrace
->variant
.pt
.data
= NULL
;
933 btrace
->variant
.pt
.size
= 0;
935 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
938 internal_error (_("Unkown branch trace format."));
941 /* See linux-btrace.h. */
943 const struct btrace_config
*
944 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
949 #else /* !HAVE_LINUX_PERF_EVENT_H */
951 /* See linux-btrace.h. */
953 struct btrace_target_info
*
954 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
959 /* See linux-btrace.h. */
962 linux_disable_btrace (struct btrace_target_info
*tinfo
)
964 return BTRACE_ERR_NOT_SUPPORTED
;
967 /* See linux-btrace.h. */
970 linux_read_btrace (struct btrace_data
*btrace
,
971 struct btrace_target_info
*tinfo
,
972 enum btrace_read_type type
)
974 return BTRACE_ERR_NOT_SUPPORTED
;
977 /* See linux-btrace.h. */
979 const struct btrace_config
*
980 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
985 #endif /* !HAVE_LINUX_PERF_EVENT_H */