Fix: strip --strip-debug breaks relocations
[binutils-gdb.git] / gdb / nat / linux-btrace.c
blob89bf28b5133fb7aa67305c7004d770195e6b7661
1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2023 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "gdbsupport/common-defs.h"
23 #include "linux-btrace.h"
24 #include "gdbsupport/common-regcache.h"
25 #include "gdbsupport/gdb_wait.h"
26 #include "x86-cpuid.h"
27 #include "gdbsupport/filestuff.h"
28 #include "gdbsupport/scoped_fd.h"
29 #include "gdbsupport/scoped_mmap.h"
31 #include <inttypes.h>
33 #include <sys/syscall.h>
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
36 #include <unistd.h>
37 #include <sys/mman.h>
38 #include <sys/user.h>
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
41 #include <signal.h>
43 /* A branch trace record in perf_event. */
44 struct perf_event_bts
46 /* The linear address of the branch source. */
47 uint64_t from;
49 /* The linear address of the branch destination. */
50 uint64_t to;
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
56 /* The perf_event sample header. */
57 struct perf_event_header header;
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts;
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
67 struct btrace_cpu cpu;
68 unsigned int eax, ebx, ecx, edx;
69 int ok;
71 memset (&cpu, 0, sizeof (cpu));
73 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
74 if (ok != 0)
76 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
77 && edx == signature_INTEL_edx)
79 unsigned int cpuid, ignore;
81 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
82 if (ok != 0)
84 cpu.vendor = CV_INTEL;
86 cpu.family = (cpuid >> 8) & 0xf;
87 if (cpu.family == 0xf)
88 cpu.family += (cpuid >> 20) & 0xff;
90 cpu.model = (cpuid >> 4) & 0xf;
91 if ((cpu.family == 0x6) || ((cpu.family & 0xf) == 0xf))
92 cpu.model += (cpuid >> 12) & 0xf0;
95 else if (ebx == signature_AMD_ebx && ecx == signature_AMD_ecx
96 && edx == signature_AMD_edx)
97 cpu.vendor = CV_AMD;
100 return cpu;
103 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
105 static int
106 perf_event_new_data (const struct perf_event_buffer *pev)
108 return *pev->data_head != pev->last_head;
111 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
112 to the memory holding the copy.
113 The caller is responsible for freeing the memory. */
115 static gdb_byte *
116 perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
117 size_t size)
119 const gdb_byte *begin, *end, *start, *stop;
120 gdb_byte *buffer;
121 size_t buffer_size;
122 __u64 data_tail;
124 if (size == 0)
125 return NULL;
127 /* We should never ask for more data than the buffer can hold. */
128 buffer_size = pev->size;
129 gdb_assert (size <= buffer_size);
131 /* If we ask for more data than we seem to have, we wrap around and read
132 data from the end of the buffer. This is already handled by the %
133 BUFFER_SIZE operation, below. Here, we just need to make sure that we
134 don't underflow.
136 Note that this is perfectly OK for perf event buffers where data_head
137 doesn'grow indefinitely and instead wraps around to remain within the
138 buffer's boundaries. */
139 if (data_head < size)
140 data_head += buffer_size;
142 gdb_assert (size <= data_head);
143 data_tail = data_head - size;
145 begin = pev->mem;
146 start = begin + data_tail % buffer_size;
147 stop = begin + data_head % buffer_size;
149 buffer = (gdb_byte *) xmalloc (size);
151 if (start < stop)
152 memcpy (buffer, start, stop - start);
153 else
155 end = begin + buffer_size;
157 memcpy (buffer, start, end - start);
158 memcpy (buffer + (end - start), begin, stop - begin);
161 return buffer;
164 /* Copy the perf event buffer data from PEV.
165 Store a pointer to the copy into DATA and its size in SIZE. */
167 static void
168 perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
169 size_t *psize)
171 size_t size;
172 __u64 data_head;
174 data_head = *pev->data_head;
175 size = pev->size;
177 *data = perf_event_read (pev, data_head, size);
178 *psize = size;
180 pev->last_head = data_head;
183 /* Try to determine the start address of the Linux kernel. */
185 static uint64_t
186 linux_determine_kernel_start (void)
188 static uint64_t kernel_start;
189 static int cached;
191 if (cached != 0)
192 return kernel_start;
194 cached = 1;
196 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
197 if (file == NULL)
198 return kernel_start;
200 while (!feof (file.get ()))
202 char buffer[1024], symbol[8], *line;
203 uint64_t addr;
204 int match;
206 line = fgets (buffer, sizeof (buffer), file.get ());
207 if (line == NULL)
208 break;
210 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
211 if (match != 2)
212 continue;
214 if (strcmp (symbol, "_text") == 0)
216 kernel_start = addr;
217 break;
221 return kernel_start;
224 /* Check whether an address is in the kernel. */
226 static inline int
227 perf_event_is_kernel_addr (uint64_t addr)
229 uint64_t kernel_start;
231 kernel_start = linux_determine_kernel_start ();
232 if (kernel_start != 0ull)
233 return (addr >= kernel_start);
235 /* If we don't know the kernel's start address, let's check the most
236 significant bit. This will work at least for 64-bit kernels. */
237 return ((addr & (1ull << 63)) != 0);
240 /* Check whether a perf event record should be skipped. */
242 static inline int
243 perf_event_skip_bts_record (const struct perf_event_bts *bts)
245 /* The hardware may report branches from kernel into user space. Branches
246 from user into kernel space will be suppressed. We filter the former to
247 provide a consistent branch trace excluding kernel. */
248 return perf_event_is_kernel_addr (bts->from);
251 /* Perform a few consistency checks on a perf event sample record. This is
252 meant to catch cases when we get out of sync with the perf event stream. */
254 static inline int
255 perf_event_sample_ok (const struct perf_event_sample *sample)
257 if (sample->header.type != PERF_RECORD_SAMPLE)
258 return 0;
260 if (sample->header.size != sizeof (*sample))
261 return 0;
263 return 1;
266 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
267 and to addresses (plus a header).
269 Start points into that buffer at the next sample position.
270 We read the collected samples backwards from start.
272 While reading the samples, we convert the information into a list of blocks.
273 For two adjacent samples s1 and s2, we form a block b such that b.begin =
274 s1.to and b.end = s2.from.
276 In case the buffer overflows during sampling, one sample may have its lower
277 part at the end and its upper part at the beginning of the buffer. */
279 static std::vector<btrace_block> *
280 perf_event_read_bts (btrace_target_info *tinfo, const uint8_t *begin,
281 const uint8_t *end, const uint8_t *start, size_t size)
283 std::vector<btrace_block> *btrace = new std::vector<btrace_block>;
284 struct perf_event_sample sample;
285 size_t read = 0;
286 struct btrace_block block = { 0, 0 };
287 struct regcache *regcache;
289 gdb_assert (begin <= start);
290 gdb_assert (start <= end);
292 /* The first block ends at the current pc. */
293 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
294 block.end = regcache_read_pc (regcache);
296 /* The buffer may contain a partial record as its last entry (i.e. when the
297 buffer size is not a multiple of the sample size). */
298 read = sizeof (sample) - 1;
300 for (; read < size; read += sizeof (sample))
302 const struct perf_event_sample *psample;
304 /* Find the next perf_event sample in a backwards traversal. */
305 start -= sizeof (sample);
307 /* If we're still inside the buffer, we're done. */
308 if (begin <= start)
309 psample = (const struct perf_event_sample *) start;
310 else
312 int missing;
314 /* We're to the left of the ring buffer, we will wrap around and
315 reappear at the very right of the ring buffer. */
317 missing = (begin - start);
318 start = (end - missing);
320 /* If the entire sample is missing, we're done. */
321 if (missing == sizeof (sample))
322 psample = (const struct perf_event_sample *) start;
323 else
325 uint8_t *stack;
327 /* The sample wrapped around. The lower part is at the end and
328 the upper part is at the beginning of the buffer. */
329 stack = (uint8_t *) &sample;
331 /* Copy the two parts so we have a contiguous sample. */
332 memcpy (stack, start, missing);
333 memcpy (stack + missing, begin, sizeof (sample) - missing);
335 psample = &sample;
339 if (!perf_event_sample_ok (psample))
341 warning (_("Branch trace may be incomplete."));
342 break;
345 if (perf_event_skip_bts_record (&psample->bts))
346 continue;
348 /* We found a valid sample, so we can complete the current block. */
349 block.begin = psample->bts.to;
351 btrace->push_back (block);
353 /* Start the next block. */
354 block.end = psample->bts.from;
357 /* Push the last block (i.e. the first one of inferior execution), as well.
358 We don't know where it ends, but we know where it starts. If we're
359 reading delta trace, we can fill in the start address later on.
360 Otherwise we will prune it. */
361 block.begin = 0;
362 btrace->push_back (block);
364 return btrace;
367 /* Check whether an Intel cpu supports BTS. */
369 static int
370 intel_supports_bts (const struct btrace_cpu *cpu)
372 switch (cpu->family)
374 case 0x6:
375 switch (cpu->model)
377 case 0x1a: /* Nehalem */
378 case 0x1f:
379 case 0x1e:
380 case 0x2e:
381 case 0x25: /* Westmere */
382 case 0x2c:
383 case 0x2f:
384 case 0x2a: /* Sandy Bridge */
385 case 0x2d:
386 case 0x3a: /* Ivy Bridge */
388 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
389 "from" information afer an EIST transition, T-states, C1E, or
390 Adaptive Thermal Throttling. */
391 return 0;
395 return 1;
398 /* Check whether the cpu supports BTS. */
400 static int
401 cpu_supports_bts (void)
403 struct btrace_cpu cpu;
405 cpu = btrace_this_cpu ();
406 switch (cpu.vendor)
408 default:
409 /* Don't know about others. Let's assume they do. */
410 return 1;
412 case CV_INTEL:
413 return intel_supports_bts (&cpu);
415 case CV_AMD:
416 return 0;
420 /* The perf_event_open syscall failed. Try to print a helpful error
421 message. */
423 static void
424 diagnose_perf_event_open_fail ()
426 switch (errno)
428 case EPERM:
429 case EACCES:
431 static const char filename[] = "/proc/sys/kernel/perf_event_paranoid";
432 errno = 0;
433 gdb_file_up file = gdb_fopen_cloexec (filename, "r");
434 if (file.get () == nullptr)
435 error (_("Failed to open %s (%s). Your system does not support "
436 "process recording."), filename, safe_strerror (errno));
438 int level, found = fscanf (file.get (), "%d", &level);
439 if (found == 1 && level > 2)
440 error (_("You do not have permission to record the process. "
441 "Try setting %s to 2 or less."), filename);
444 break;
447 error (_("Failed to start recording: %s"), safe_strerror (errno));
450 /* Get the linux version of a btrace_target_info. */
452 static linux_btrace_target_info *
453 get_linux_btrace_target_info (btrace_target_info *gtinfo)
455 return gdb::checked_static_cast<linux_btrace_target_info *> (gtinfo);
458 /* Enable branch tracing in BTS format. */
460 static struct btrace_target_info *
461 linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
463 size_t size, pages;
464 __u64 data_offset;
465 int pid, pg;
467 if (!cpu_supports_bts ())
468 error (_("BTS support has been disabled for the target cpu."));
470 std::unique_ptr<linux_btrace_target_info> tinfo
471 { std::make_unique<linux_btrace_target_info> (ptid) };
473 tinfo->conf.format = BTRACE_FORMAT_BTS;
475 tinfo->attr.size = sizeof (tinfo->attr);
476 tinfo->attr.type = PERF_TYPE_HARDWARE;
477 tinfo->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
478 tinfo->attr.sample_period = 1;
480 /* We sample from and to address. */
481 tinfo->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
483 tinfo->attr.exclude_kernel = 1;
484 tinfo->attr.exclude_hv = 1;
485 tinfo->attr.exclude_idle = 1;
487 pid = ptid.lwp ();
488 if (pid == 0)
489 pid = ptid.pid ();
491 errno = 0;
492 scoped_fd fd (syscall (SYS_perf_event_open, &tinfo->attr, pid, -1, -1, 0));
493 if (fd.get () < 0)
494 diagnose_perf_event_open_fail ();
496 /* Convert the requested size in bytes to pages (rounding up). */
497 pages = ((size_t) conf->size / PAGE_SIZE
498 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
499 /* We need at least one page. */
500 if (pages == 0)
501 pages = 1;
503 /* The buffer size can be requested in powers of two pages. Adjust PAGES
504 to the next power of two. */
505 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
506 if ((pages & ((size_t) 1 << pg)) != 0)
507 pages += ((size_t) 1 << pg);
509 /* We try to allocate the requested size.
510 If that fails, try to get as much as we can. */
511 scoped_mmap data;
512 for (; pages > 0; pages >>= 1)
514 size_t length;
515 __u64 data_size;
517 data_size = (__u64) pages * PAGE_SIZE;
519 /* Don't ask for more than we can represent in the configuration. */
520 if ((__u64) UINT_MAX < data_size)
521 continue;
523 size = (size_t) data_size;
524 length = size + PAGE_SIZE;
526 /* Check for overflows. */
527 if ((__u64) length != data_size + PAGE_SIZE)
528 continue;
530 errno = 0;
531 /* The number of pages we request needs to be a power of two. */
532 data.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (), 0);
533 if (data.get () != MAP_FAILED)
534 break;
537 if (pages == 0)
538 error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
540 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
541 data.get ();
542 data_offset = PAGE_SIZE;
544 #if defined (PERF_ATTR_SIZE_VER5)
545 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
547 __u64 data_size;
549 data_offset = header->data_offset;
550 data_size = header->data_size;
552 size = (unsigned int) data_size;
554 /* Check for overflows. */
555 if ((__u64) size != data_size)
556 error (_("Failed to determine trace buffer size."));
558 #endif /* defined (PERF_ATTR_SIZE_VER5) */
560 tinfo->pev.size = size;
561 tinfo->pev.data_head = &header->data_head;
562 tinfo->pev.mem = (const uint8_t *) data.release () + data_offset;
563 tinfo->pev.last_head = 0ull;
564 tinfo->header = header;
565 tinfo->file = fd.release ();
567 tinfo->conf.bts.size = (unsigned int) size;
568 return tinfo.release ();
571 #if defined (PERF_ATTR_SIZE_VER5)
573 /* Determine the event type. */
575 static int
576 perf_event_pt_event_type ()
578 static const char filename[] = "/sys/bus/event_source/devices/intel_pt/type";
580 errno = 0;
581 gdb_file_up file = gdb_fopen_cloexec (filename, "r");
582 if (file.get () == nullptr)
583 switch (errno)
585 case EACCES:
586 case EFAULT:
587 case EPERM:
588 error (_("Failed to open %s (%s). You do not have permission "
589 "to use Intel PT."), filename, safe_strerror (errno));
591 case ENOTDIR:
592 case ENOENT:
593 error (_("Failed to open %s (%s). Your system does not support "
594 "Intel PT."), filename, safe_strerror (errno));
596 default:
597 error (_("Failed to open %s: %s."), filename, safe_strerror (errno));
600 int type, found = fscanf (file.get (), "%d", &type);
601 if (found != 1)
602 error (_("Failed to read the PT event type from %s."), filename);
604 return type;
607 /* Enable branch tracing in Intel Processor Trace format. */
609 static struct btrace_target_info *
610 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
612 size_t pages;
613 int pid, pg;
615 pid = ptid.lwp ();
616 if (pid == 0)
617 pid = ptid.pid ();
619 std::unique_ptr<linux_btrace_target_info> tinfo
620 { std::make_unique<linux_btrace_target_info> (ptid) };
622 tinfo->conf.format = BTRACE_FORMAT_PT;
624 tinfo->attr.size = sizeof (tinfo->attr);
625 tinfo->attr.type = perf_event_pt_event_type ();
627 tinfo->attr.exclude_kernel = 1;
628 tinfo->attr.exclude_hv = 1;
629 tinfo->attr.exclude_idle = 1;
631 errno = 0;
632 scoped_fd fd (syscall (SYS_perf_event_open, &tinfo->attr, pid, -1, -1, 0));
633 if (fd.get () < 0)
634 diagnose_perf_event_open_fail ();
636 /* Allocate the configuration page. */
637 scoped_mmap data (nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
638 fd.get (), 0);
639 if (data.get () == MAP_FAILED)
640 error (_("Failed to map trace user page: %s."), safe_strerror (errno));
642 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
643 data.get ();
645 header->aux_offset = header->data_offset + header->data_size;
647 /* Convert the requested size in bytes to pages (rounding up). */
648 pages = ((size_t) conf->size / PAGE_SIZE
649 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
650 /* We need at least one page. */
651 if (pages == 0)
652 pages = 1;
654 /* The buffer size can be requested in powers of two pages. Adjust PAGES
655 to the next power of two. */
656 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
657 if ((pages & ((size_t) 1 << pg)) != 0)
658 pages += ((size_t) 1 << pg);
660 /* We try to allocate the requested size.
661 If that fails, try to get as much as we can. */
662 scoped_mmap aux;
663 for (; pages > 0; pages >>= 1)
665 size_t length;
666 __u64 data_size;
668 data_size = (__u64) pages * PAGE_SIZE;
670 /* Don't ask for more than we can represent in the configuration. */
671 if ((__u64) UINT_MAX < data_size)
672 continue;
674 length = (size_t) data_size;
676 /* Check for overflows. */
677 if ((__u64) length != data_size)
678 continue;
680 header->aux_size = data_size;
682 errno = 0;
683 aux.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (),
684 header->aux_offset);
685 if (aux.get () != MAP_FAILED)
686 break;
689 if (pages == 0)
690 error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
692 tinfo->pev.size = aux.size ();
693 tinfo->pev.mem = (const uint8_t *) aux.release ();
694 tinfo->pev.data_head = &header->aux_head;
695 tinfo->header = (struct perf_event_mmap_page *) data.release ();
696 gdb_assert (tinfo->header == header);
697 tinfo->file = fd.release ();
699 tinfo->conf.pt.size = (unsigned int) tinfo->pev.size;
700 return tinfo.release ();
703 #else /* !defined (PERF_ATTR_SIZE_VER5) */
705 static struct btrace_target_info *
706 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
708 error (_("Intel Processor Trace support was disabled at compile time."));
711 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
713 /* See linux-btrace.h. */
715 struct btrace_target_info *
716 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
718 switch (conf->format)
720 case BTRACE_FORMAT_NONE:
721 error (_("Bad branch trace format."));
723 default:
724 error (_("Unknown branch trace format."));
726 case BTRACE_FORMAT_BTS:
727 return linux_enable_bts (ptid, &conf->bts);
729 case BTRACE_FORMAT_PT:
730 return linux_enable_pt (ptid, &conf->pt);
734 /* Disable BTS tracing. */
736 static void
737 linux_disable_bts (struct linux_btrace_target_info *tinfo)
739 munmap ((void *) tinfo->header, tinfo->pev.size + PAGE_SIZE);
740 close (tinfo->file);
743 /* Disable Intel Processor Trace tracing. */
745 static void
746 linux_disable_pt (struct linux_btrace_target_info *tinfo)
748 munmap ((void *) tinfo->pev.mem, tinfo->pev.size);
749 munmap ((void *) tinfo->header, PAGE_SIZE);
750 close (tinfo->file);
753 /* See linux-btrace.h. */
755 enum btrace_error
756 linux_disable_btrace (struct btrace_target_info *gtinfo)
758 linux_btrace_target_info *tinfo
759 = get_linux_btrace_target_info (gtinfo);
761 switch (tinfo->conf.format)
763 case BTRACE_FORMAT_NONE:
764 return BTRACE_ERR_NOT_SUPPORTED;
766 case BTRACE_FORMAT_BTS:
767 linux_disable_bts (tinfo);
768 delete tinfo;
769 return BTRACE_ERR_NONE;
771 case BTRACE_FORMAT_PT:
772 linux_disable_pt (tinfo);
773 delete tinfo;
774 return BTRACE_ERR_NONE;
777 return BTRACE_ERR_NOT_SUPPORTED;
780 /* Read branch trace data in BTS format for the thread given by TINFO into
781 BTRACE using the TYPE reading method. */
783 static enum btrace_error
784 linux_read_bts (btrace_data_bts *btrace, linux_btrace_target_info *tinfo,
785 enum btrace_read_type type)
787 const uint8_t *begin, *end, *start;
788 size_t buffer_size, size;
789 __u64 data_head = 0, data_tail;
790 unsigned int retries = 5;
792 /* For delta reads, we return at least the partial last block containing
793 the current PC. */
794 if (type == BTRACE_READ_NEW && !perf_event_new_data (&tinfo->pev))
795 return BTRACE_ERR_NONE;
797 buffer_size = tinfo->pev.size;
798 data_tail = tinfo->pev.last_head;
800 /* We may need to retry reading the trace. See below. */
801 while (retries--)
803 data_head = *tinfo->pev.data_head;
805 /* Delete any leftover trace from the previous iteration. */
806 delete btrace->blocks;
807 btrace->blocks = nullptr;
809 if (type == BTRACE_READ_DELTA)
811 __u64 data_size;
813 /* Determine the number of bytes to read and check for buffer
814 overflows. */
816 /* Check for data head overflows. We might be able to recover from
817 those but they are very unlikely and it's not really worth the
818 effort, I think. */
819 if (data_head < data_tail)
820 return BTRACE_ERR_OVERFLOW;
822 /* If the buffer is smaller than the trace delta, we overflowed. */
823 data_size = data_head - data_tail;
824 if (buffer_size < data_size)
825 return BTRACE_ERR_OVERFLOW;
827 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
828 size = (size_t) data_size;
830 else
832 /* Read the entire buffer. */
833 size = buffer_size;
835 /* Adjust the size if the buffer has not overflowed, yet. */
836 if (data_head < size)
837 size = (size_t) data_head;
840 /* Data_head keeps growing; the buffer itself is circular. */
841 begin = tinfo->pev.mem;
842 start = begin + data_head % buffer_size;
844 if (data_head <= buffer_size)
845 end = start;
846 else
847 end = begin + tinfo->pev.size;
849 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
851 /* The stopping thread notifies its ptracer before it is scheduled out.
852 On multi-core systems, the debugger might therefore run while the
853 kernel might be writing the last branch trace records.
855 Let's check whether the data head moved while we read the trace. */
856 if (data_head == *tinfo->pev.data_head)
857 break;
860 tinfo->pev.last_head = data_head;
862 /* Prune the incomplete last block (i.e. the first one of inferior execution)
863 if we're not doing a delta read. There is no way of filling in its zeroed
864 BEGIN element. */
865 if (!btrace->blocks->empty () && type != BTRACE_READ_DELTA)
866 btrace->blocks->pop_back ();
868 return BTRACE_ERR_NONE;
871 /* Fill in the Intel Processor Trace configuration information. */
873 static void
874 linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
876 conf->cpu = btrace_this_cpu ();
879 /* Read branch trace data in Intel Processor Trace format for the thread
880 given by TINFO into BTRACE using the TYPE reading method. */
882 static enum btrace_error
883 linux_read_pt (btrace_data_pt *btrace, linux_btrace_target_info *tinfo,
884 enum btrace_read_type type)
886 linux_fill_btrace_pt_config (&btrace->config);
888 switch (type)
890 case BTRACE_READ_DELTA:
891 /* We don't support delta reads. The data head (i.e. aux_head) wraps
892 around to stay inside the aux buffer. */
893 return BTRACE_ERR_NOT_SUPPORTED;
895 case BTRACE_READ_NEW:
896 if (!perf_event_new_data (&tinfo->pev))
897 return BTRACE_ERR_NONE;
898 [[fallthrough]];
899 case BTRACE_READ_ALL:
900 perf_event_read_all (&tinfo->pev, &btrace->data, &btrace->size);
901 return BTRACE_ERR_NONE;
904 internal_error (_("Unknown btrace read type."));
907 /* See linux-btrace.h. */
909 enum btrace_error
910 linux_read_btrace (struct btrace_data *btrace,
911 struct btrace_target_info *gtinfo,
912 enum btrace_read_type type)
914 linux_btrace_target_info *tinfo
915 = get_linux_btrace_target_info (gtinfo);
917 switch (tinfo->conf.format)
919 case BTRACE_FORMAT_NONE:
920 return BTRACE_ERR_NOT_SUPPORTED;
922 case BTRACE_FORMAT_BTS:
923 /* We read btrace in BTS format. */
924 btrace->format = BTRACE_FORMAT_BTS;
925 btrace->variant.bts.blocks = NULL;
927 return linux_read_bts (&btrace->variant.bts, tinfo, type);
929 case BTRACE_FORMAT_PT:
930 /* We read btrace in Intel Processor Trace format. */
931 btrace->format = BTRACE_FORMAT_PT;
932 btrace->variant.pt.data = NULL;
933 btrace->variant.pt.size = 0;
935 return linux_read_pt (&btrace->variant.pt, tinfo, type);
938 internal_error (_("Unkown branch trace format."));
941 /* See linux-btrace.h. */
943 const struct btrace_config *
944 linux_btrace_conf (const struct btrace_target_info *tinfo)
946 return &tinfo->conf;
949 #else /* !HAVE_LINUX_PERF_EVENT_H */
951 /* See linux-btrace.h. */
953 struct btrace_target_info *
954 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
956 return NULL;
959 /* See linux-btrace.h. */
961 enum btrace_error
962 linux_disable_btrace (struct btrace_target_info *tinfo)
964 return BTRACE_ERR_NOT_SUPPORTED;
967 /* See linux-btrace.h. */
969 enum btrace_error
970 linux_read_btrace (struct btrace_data *btrace,
971 struct btrace_target_info *tinfo,
972 enum btrace_read_type type)
974 return BTRACE_ERR_NOT_SUPPORTED;
977 /* See linux-btrace.h. */
979 const struct btrace_config *
980 linux_btrace_conf (const struct btrace_target_info *tinfo)
982 return NULL;
985 #endif /* !HAVE_LINUX_PERF_EVENT_H */