1 /* elf.c -- Get debug data from an ELF file for backtraces.
2 Copyright (C) 2012-2023 Free Software Foundation, Inc.
3 Written by Ian Lance Taylor, Google.
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are
9 (1) Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
12 (2) Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in
14 the documentation and/or other materials provided with the
17 (3) The name of the author may not be used to
18 endorse or promote products derived from this software without
19 specific prior written permission.
21 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
25 INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30 IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE. */
38 #include <sys/types.h>
42 #ifdef HAVE_DL_ITERATE_PHDR
46 #ifdef HAVE_SYS_LINK_H
51 #include "backtrace.h"
56 #define S_IFLNK 0120000
59 #define S_IFMT 0170000
61 #define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK)
65 #define __builtin_prefetch(p, r, l)
66 #define unlikely(x) (x)
68 #define unlikely(x) __builtin_expect(!!(x), 0)
71 #if !defined(HAVE_DECL_STRNLEN) || !HAVE_DECL_STRNLEN
73 /* If strnlen is not declared, provide our own version. */
76 xstrnlen (const char *s
, size_t maxlen
)
80 for (i
= 0; i
< maxlen
; ++i
)
86 #define strnlen xstrnlen
92 /* Dummy version of lstat for systems that don't have it. */
95 xlstat (const char *path ATTRIBUTE_UNUSED
, struct stat
*st ATTRIBUTE_UNUSED
)
104 #ifndef HAVE_READLINK
106 /* Dummy version of readlink for systems that don't have it. */
109 xreadlink (const char *path ATTRIBUTE_UNUSED
, char *buf ATTRIBUTE_UNUSED
,
110 size_t bufsz ATTRIBUTE_UNUSED
)
115 #define readlink xreadlink
119 #ifndef HAVE_DL_ITERATE_PHDR
121 /* Dummy version of dl_iterate_phdr for systems that don't have it. */
123 #define dl_phdr_info x_dl_phdr_info
124 #define dl_iterate_phdr x_dl_iterate_phdr
129 const char *dlpi_name
;
133 dl_iterate_phdr (int (*callback
) (struct dl_phdr_info
*,
134 size_t, void *) ATTRIBUTE_UNUSED
,
135 void *data ATTRIBUTE_UNUSED
)
140 #endif /* ! defined (HAVE_DL_ITERATE_PHDR) */
142 /* The configure script must tell us whether we are 32-bit or 64-bit
143 ELF. We could make this code test and support either possibility,
144 but there is no point. This code only works for the currently
145 running executable, which means that we know the ELF mode at
148 #if BACKTRACE_ELF_SIZE != 32 && BACKTRACE_ELF_SIZE != 64
149 #error "Unknown BACKTRACE_ELF_SIZE"
152 /* <link.h> might #include <elf.h> which might define our constants
153 with slightly different values. Undefine them to be safe. */
182 #undef SHF_COMPRESSED
185 #undef NT_GNU_BUILD_ID
186 #undef ELFCOMPRESS_ZLIB
187 #undef ELFCOMPRESS_ZSTD
191 typedef uint16_t b_elf_half
; /* Elf_Half. */
192 typedef uint32_t b_elf_word
; /* Elf_Word. */
193 typedef int32_t b_elf_sword
; /* Elf_Sword. */
195 #if BACKTRACE_ELF_SIZE == 32
197 typedef uint32_t b_elf_addr
; /* Elf_Addr. */
198 typedef uint32_t b_elf_off
; /* Elf_Off. */
200 typedef uint32_t b_elf_wxword
; /* 32-bit Elf_Word, 64-bit ELF_Xword. */
204 typedef uint64_t b_elf_addr
; /* Elf_Addr. */
205 typedef uint64_t b_elf_off
; /* Elf_Off. */
206 typedef uint64_t b_elf_xword
; /* Elf_Xword. */
207 typedef int64_t b_elf_sxword
; /* Elf_Sxword. */
209 typedef uint64_t b_elf_wxword
; /* 32-bit Elf_Word, 64-bit ELF_Xword. */
213 /* Data structures and associated constants. */
218 unsigned char e_ident
[EI_NIDENT
]; /* ELF "magic number" */
219 b_elf_half e_type
; /* Identifies object file type */
220 b_elf_half e_machine
; /* Specifies required architecture */
221 b_elf_word e_version
; /* Identifies object file version */
222 b_elf_addr e_entry
; /* Entry point virtual address */
223 b_elf_off e_phoff
; /* Program header table file offset */
224 b_elf_off e_shoff
; /* Section header table file offset */
225 b_elf_word e_flags
; /* Processor-specific flags */
226 b_elf_half e_ehsize
; /* ELF header size in bytes */
227 b_elf_half e_phentsize
; /* Program header table entry size */
228 b_elf_half e_phnum
; /* Program header table entry count */
229 b_elf_half e_shentsize
; /* Section header table entry size */
230 b_elf_half e_shnum
; /* Section header table entry count */
231 b_elf_half e_shstrndx
; /* Section header string table index */
232 } b_elf_ehdr
; /* Elf_Ehdr. */
250 #define ELFDATA2LSB 1
251 #define ELFDATA2MSB 2
258 #define EF_PPC64_ABI 3
261 b_elf_word sh_name
; /* Section name, index in string tbl */
262 b_elf_word sh_type
; /* Type of section */
263 b_elf_wxword sh_flags
; /* Miscellaneous section attributes */
264 b_elf_addr sh_addr
; /* Section virtual addr at execution */
265 b_elf_off sh_offset
; /* Section file offset */
266 b_elf_wxword sh_size
; /* Size of section in bytes */
267 b_elf_word sh_link
; /* Index of another section */
268 b_elf_word sh_info
; /* Additional section information */
269 b_elf_wxword sh_addralign
; /* Section alignment */
270 b_elf_wxword sh_entsize
; /* Entry size if section holds table */
271 } b_elf_shdr
; /* Elf_Shdr. */
273 #define SHN_UNDEF 0x0000 /* Undefined section */
274 #define SHN_LORESERVE 0xFF00 /* Begin range of reserved indices */
275 #define SHN_XINDEX 0xFFFF /* Section index is held elsewhere */
277 #define SHT_PROGBITS 1
280 #define SHT_DYNSYM 11
282 #define SHF_COMPRESSED 0x800
284 #if BACKTRACE_ELF_SIZE == 32
288 b_elf_word st_name
; /* Symbol name, index in string tbl */
289 b_elf_addr st_value
; /* Symbol value */
290 b_elf_word st_size
; /* Symbol size */
291 unsigned char st_info
; /* Symbol binding and type */
292 unsigned char st_other
; /* Visibility and other data */
293 b_elf_half st_shndx
; /* Symbol section index */
294 } b_elf_sym
; /* Elf_Sym. */
296 #else /* BACKTRACE_ELF_SIZE != 32 */
300 b_elf_word st_name
; /* Symbol name, index in string tbl */
301 unsigned char st_info
; /* Symbol binding and type */
302 unsigned char st_other
; /* Visibility and other data */
303 b_elf_half st_shndx
; /* Symbol section index */
304 b_elf_addr st_value
; /* Symbol value */
305 b_elf_xword st_size
; /* Symbol size */
306 } b_elf_sym
; /* Elf_Sym. */
308 #endif /* BACKTRACE_ELF_SIZE != 32 */
321 #define NT_GNU_BUILD_ID 3
323 #if BACKTRACE_ELF_SIZE == 32
327 b_elf_word ch_type
; /* Compresstion algorithm */
328 b_elf_word ch_size
; /* Uncompressed size */
329 b_elf_word ch_addralign
; /* Alignment for uncompressed data */
330 } b_elf_chdr
; /* Elf_Chdr */
332 #else /* BACKTRACE_ELF_SIZE != 32 */
336 b_elf_word ch_type
; /* Compression algorithm */
337 b_elf_word ch_reserved
; /* Reserved */
338 b_elf_xword ch_size
; /* Uncompressed size */
339 b_elf_xword ch_addralign
; /* Alignment for uncompressed data */
340 } b_elf_chdr
; /* Elf_Chdr */
342 #endif /* BACKTRACE_ELF_SIZE != 32 */
344 #define ELFCOMPRESS_ZLIB 1
345 #define ELFCOMPRESS_ZSTD 2
347 /* Names of sections, indexed by enum dwarf_section in internal.h. */
349 static const char * const dwarf_section_names
[DEBUG_MAX
] =
357 ".debug_str_offsets",
362 /* Information we gather for the sections we care about. */
364 struct debug_section_info
366 /* Section file offset. */
370 /* Section contents, after read from file. */
371 const unsigned char *data
;
372 /* Whether the SHF_COMPRESSED flag is set for the section. */
376 /* Information we keep for an ELF symbol. */
380 /* The name of the symbol. */
382 /* The address of the symbol. */
384 /* The size of the symbol. */
388 /* Information to pass to elf_syminfo. */
390 struct elf_syminfo_data
392 /* Symbols for the next module. */
393 struct elf_syminfo_data
*next
;
394 /* The ELF symbols, sorted by address. */
395 struct elf_symbol
*symbols
;
396 /* The number of symbols. */
400 /* A view that works for either a file or memory. */
404 struct backtrace_view view
;
405 int release
; /* If non-zero, must call backtrace_release_view. */
408 /* Information about PowerPC64 ELFv1 .opd section. */
410 struct elf_ppc64_opd_data
412 /* Address of the .opd section. */
416 /* Size of the .opd section. */
418 /* Corresponding section view. */
419 struct elf_view view
;
422 /* Create a view of SIZE bytes from DESCRIPTOR/MEMORY at OFFSET. */
425 elf_get_view (struct backtrace_state
*state
, int descriptor
,
426 const unsigned char *memory
, size_t memory_size
, off_t offset
,
427 uint64_t size
, backtrace_error_callback error_callback
,
428 void *data
, struct elf_view
*view
)
433 return backtrace_get_view (state
, descriptor
, offset
, size
,
434 error_callback
, data
, &view
->view
);
438 if ((uint64_t) offset
+ size
> (uint64_t) memory_size
)
440 error_callback (data
, "out of range for in-memory file", 0);
443 view
->view
.data
= (const void *) (memory
+ offset
);
444 view
->view
.base
= NULL
;
445 view
->view
.len
= size
;
451 /* Release a view read by elf_get_view. */
454 elf_release_view (struct backtrace_state
*state
, struct elf_view
*view
,
455 backtrace_error_callback error_callback
, void *data
)
458 backtrace_release_view (state
, &view
->view
, error_callback
, data
);
461 /* Compute the CRC-32 of BUF/LEN. This uses the CRC used for
462 .gnu_debuglink files. */
465 elf_crc32 (uint32_t crc
, const unsigned char *buf
, size_t len
)
467 static const uint32_t crc32_table
[256] =
469 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
470 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
471 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
472 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
473 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
474 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
475 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
476 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
477 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
478 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
479 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
480 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
481 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
482 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
483 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
484 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
485 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
486 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
487 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
488 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
489 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
490 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
491 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
492 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
493 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
494 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
495 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
496 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
497 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
498 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
499 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
500 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
501 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
502 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
503 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
504 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
505 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
506 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
507 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
508 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
509 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
510 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
511 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
512 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
513 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
514 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
515 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
516 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
517 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
518 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
519 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
522 const unsigned char *end
;
525 for (end
= buf
+ len
; buf
< end
; ++ buf
)
526 crc
= crc32_table
[(crc
^ *buf
) & 0xff] ^ (crc
>> 8);
530 /* Return the CRC-32 of the entire file open at DESCRIPTOR. */
533 elf_crc32_file (struct backtrace_state
*state
, int descriptor
,
534 backtrace_error_callback error_callback
, void *data
)
537 struct backtrace_view file_view
;
540 if (fstat (descriptor
, &st
) < 0)
542 error_callback (data
, "fstat", errno
);
546 if (!backtrace_get_view (state
, descriptor
, 0, st
.st_size
, error_callback
,
550 ret
= elf_crc32 (0, (const unsigned char *) file_view
.data
, st
.st_size
);
552 backtrace_release_view (state
, &file_view
, error_callback
, data
);
557 /* A dummy callback function used when we can't find a symbol
561 elf_nosyms (struct backtrace_state
*state ATTRIBUTE_UNUSED
,
562 uintptr_t addr ATTRIBUTE_UNUSED
,
563 backtrace_syminfo_callback callback ATTRIBUTE_UNUSED
,
564 backtrace_error_callback error_callback
, void *data
)
566 error_callback (data
, "no symbol table in ELF executable", -1);
569 /* A callback function used when we can't find any debug info. */
572 elf_nodebug (struct backtrace_state
*state
, uintptr_t pc
,
573 backtrace_full_callback callback
,
574 backtrace_error_callback error_callback
, void *data
)
576 if (state
->syminfo_fn
!= NULL
&& state
->syminfo_fn
!= elf_nosyms
)
578 struct backtrace_call_full bdata
;
580 /* Fetch symbol information so that we can least get the
583 bdata
.full_callback
= callback
;
584 bdata
.full_error_callback
= error_callback
;
585 bdata
.full_data
= data
;
587 state
->syminfo_fn (state
, pc
, backtrace_syminfo_to_full_callback
,
588 backtrace_syminfo_to_full_error_callback
, &bdata
);
592 error_callback (data
, "no debug info in ELF executable", -1);
596 /* Compare struct elf_symbol for qsort. */
599 elf_symbol_compare (const void *v1
, const void *v2
)
601 const struct elf_symbol
*e1
= (const struct elf_symbol
*) v1
;
602 const struct elf_symbol
*e2
= (const struct elf_symbol
*) v2
;
604 if (e1
->address
< e2
->address
)
606 else if (e1
->address
> e2
->address
)
612 /* Compare an ADDR against an elf_symbol for bsearch. We allocate one
613 extra entry in the array so that this can look safely at the next
617 elf_symbol_search (const void *vkey
, const void *ventry
)
619 const uintptr_t *key
= (const uintptr_t *) vkey
;
620 const struct elf_symbol
*entry
= (const struct elf_symbol
*) ventry
;
624 if (addr
< entry
->address
)
626 else if (addr
>= entry
->address
+ entry
->size
)
632 /* Initialize the symbol table info for elf_syminfo. */
635 elf_initialize_syminfo (struct backtrace_state
*state
,
636 uintptr_t base_address
,
637 const unsigned char *symtab_data
, size_t symtab_size
,
638 const unsigned char *strtab
, size_t strtab_size
,
639 backtrace_error_callback error_callback
,
640 void *data
, struct elf_syminfo_data
*sdata
,
641 struct elf_ppc64_opd_data
*opd
)
644 const b_elf_sym
*sym
;
645 size_t elf_symbol_count
;
646 size_t elf_symbol_size
;
647 struct elf_symbol
*elf_symbols
;
651 sym_count
= symtab_size
/ sizeof (b_elf_sym
);
653 /* We only care about function symbols. Count them. */
654 sym
= (const b_elf_sym
*) symtab_data
;
655 elf_symbol_count
= 0;
656 for (i
= 0; i
< sym_count
; ++i
, ++sym
)
660 info
= sym
->st_info
& 0xf;
661 if ((info
== STT_FUNC
|| info
== STT_OBJECT
)
662 && sym
->st_shndx
!= SHN_UNDEF
)
666 elf_symbol_size
= elf_symbol_count
* sizeof (struct elf_symbol
);
667 elf_symbols
= ((struct elf_symbol
*)
668 backtrace_alloc (state
, elf_symbol_size
, error_callback
,
670 if (elf_symbols
== NULL
)
673 sym
= (const b_elf_sym
*) symtab_data
;
675 for (i
= 0; i
< sym_count
; ++i
, ++sym
)
679 info
= sym
->st_info
& 0xf;
680 if (info
!= STT_FUNC
&& info
!= STT_OBJECT
)
682 if (sym
->st_shndx
== SHN_UNDEF
)
684 if (sym
->st_name
>= strtab_size
)
686 error_callback (data
, "symbol string index out of range", 0);
687 backtrace_free (state
, elf_symbols
, elf_symbol_size
, error_callback
,
691 elf_symbols
[j
].name
= (const char *) strtab
+ sym
->st_name
;
692 /* Special case PowerPC64 ELFv1 symbols in .opd section, if the symbol
693 is a function descriptor, read the actual code address from the
696 && sym
->st_value
>= opd
->addr
697 && sym
->st_value
< opd
->addr
+ opd
->size
)
698 elf_symbols
[j
].address
699 = *(const b_elf_addr
*) (opd
->data
+ (sym
->st_value
- opd
->addr
));
701 elf_symbols
[j
].address
= sym
->st_value
;
702 elf_symbols
[j
].address
+= base_address
;
703 elf_symbols
[j
].size
= sym
->st_size
;
707 backtrace_qsort (elf_symbols
, elf_symbol_count
, sizeof (struct elf_symbol
),
711 sdata
->symbols
= elf_symbols
;
712 sdata
->count
= elf_symbol_count
;
717 /* Add EDATA to the list in STATE. */
720 elf_add_syminfo_data (struct backtrace_state
*state
,
721 struct elf_syminfo_data
*edata
)
723 if (!state
->threaded
)
725 struct elf_syminfo_data
**pp
;
727 for (pp
= (struct elf_syminfo_data
**) (void *) &state
->syminfo_data
;
737 struct elf_syminfo_data
**pp
;
739 pp
= (struct elf_syminfo_data
**) (void *) &state
->syminfo_data
;
743 struct elf_syminfo_data
*p
;
745 p
= backtrace_atomic_load_pointer (pp
);
753 if (__sync_bool_compare_and_swap (pp
, NULL
, edata
))
759 /* Return the symbol name and value for an ADDR. */
762 elf_syminfo (struct backtrace_state
*state
, uintptr_t addr
,
763 backtrace_syminfo_callback callback
,
764 backtrace_error_callback error_callback ATTRIBUTE_UNUSED
,
767 struct elf_syminfo_data
*edata
;
768 struct elf_symbol
*sym
= NULL
;
770 if (!state
->threaded
)
772 for (edata
= (struct elf_syminfo_data
*) state
->syminfo_data
;
776 sym
= ((struct elf_symbol
*)
777 bsearch (&addr
, edata
->symbols
, edata
->count
,
778 sizeof (struct elf_symbol
), elf_symbol_search
));
785 struct elf_syminfo_data
**pp
;
787 pp
= (struct elf_syminfo_data
**) (void *) &state
->syminfo_data
;
790 edata
= backtrace_atomic_load_pointer (pp
);
794 sym
= ((struct elf_symbol
*)
795 bsearch (&addr
, edata
->symbols
, edata
->count
,
796 sizeof (struct elf_symbol
), elf_symbol_search
));
805 callback (data
, addr
, NULL
, 0, 0);
807 callback (data
, addr
, sym
->name
, sym
->address
, sym
->size
);
810 /* Return whether FILENAME is a symlink. */
813 elf_is_symlink (const char *filename
)
817 if (lstat (filename
, &st
) < 0)
819 return S_ISLNK (st
.st_mode
);
822 /* Return the results of reading the symlink FILENAME in a buffer
823 allocated by backtrace_alloc. Return the length of the buffer in
827 elf_readlink (struct backtrace_state
*state
, const char *filename
,
828 backtrace_error_callback error_callback
, void *data
,
839 buf
= backtrace_alloc (state
, len
, error_callback
, data
);
842 rl
= readlink (filename
, buf
, len
);
845 backtrace_free (state
, buf
, len
, error_callback
, data
);
848 if ((size_t) rl
< len
- 1)
854 backtrace_free (state
, buf
, len
, error_callback
, data
);
859 #define SYSTEM_BUILD_ID_DIR "/usr/lib/debug/.build-id/"
861 /* Open a separate debug info file, using the build ID to find it.
862 Returns an open file descriptor, or -1.
864 The GDB manual says that the only place gdb looks for a debug file
865 when the build ID is known is in /usr/lib/debug/.build-id. */
868 elf_open_debugfile_by_buildid (struct backtrace_state
*state
,
869 const char *buildid_data
, size_t buildid_size
,
870 backtrace_error_callback error_callback
,
873 const char * const prefix
= SYSTEM_BUILD_ID_DIR
;
874 const size_t prefix_len
= strlen (prefix
);
875 const char * const suffix
= ".debug";
876 const size_t suffix_len
= strlen (suffix
);
884 len
= prefix_len
+ buildid_size
* 2 + suffix_len
+ 2;
885 bd_filename
= backtrace_alloc (state
, len
, error_callback
, data
);
886 if (bd_filename
== NULL
)
890 memcpy (t
, prefix
, prefix_len
);
892 for (i
= 0; i
< buildid_size
; i
++)
897 b
= (unsigned char) buildid_data
[i
];
898 nib
= (b
& 0xf0) >> 4;
899 *t
++ = nib
< 10 ? '0' + nib
: 'a' + nib
- 10;
901 *t
++ = nib
< 10 ? '0' + nib
: 'a' + nib
- 10;
905 memcpy (t
, suffix
, suffix_len
);
906 t
[suffix_len
] = '\0';
908 ret
= backtrace_open (bd_filename
, error_callback
, data
, &does_not_exist
);
910 backtrace_free (state
, bd_filename
, len
, error_callback
, data
);
912 /* gdb checks that the debuginfo file has the same build ID note.
913 That seems kind of pointless to me--why would it have the right
914 name but not the right build ID?--so skipping the check. */
919 /* Try to open a file whose name is PREFIX (length PREFIX_LEN)
920 concatenated with PREFIX2 (length PREFIX2_LEN) concatenated with
921 DEBUGLINK_NAME. Returns an open file descriptor, or -1. */
924 elf_try_debugfile (struct backtrace_state
*state
, const char *prefix
,
925 size_t prefix_len
, const char *prefix2
, size_t prefix2_len
,
926 const char *debuglink_name
,
927 backtrace_error_callback error_callback
, void *data
)
929 size_t debuglink_len
;
935 debuglink_len
= strlen (debuglink_name
);
936 try_len
= prefix_len
+ prefix2_len
+ debuglink_len
+ 1;
937 try = backtrace_alloc (state
, try_len
, error_callback
, data
);
941 memcpy (try, prefix
, prefix_len
);
942 memcpy (try + prefix_len
, prefix2
, prefix2_len
);
943 memcpy (try + prefix_len
+ prefix2_len
, debuglink_name
, debuglink_len
);
944 try[prefix_len
+ prefix2_len
+ debuglink_len
] = '\0';
946 ret
= backtrace_open (try, error_callback
, data
, &does_not_exist
);
948 backtrace_free (state
, try, try_len
, error_callback
, data
);
953 /* Find a separate debug info file, using the debuglink section data
954 to find it. Returns an open file descriptor, or -1. */
957 elf_find_debugfile_by_debuglink (struct backtrace_state
*state
,
958 const char *filename
,
959 const char *debuglink_name
,
960 backtrace_error_callback error_callback
,
971 /* Resolve symlinks in FILENAME. Since FILENAME is fairly likely to
972 be /proc/self/exe, symlinks are common. We don't try to resolve
973 the whole path name, just the base name. */
977 while (elf_is_symlink (filename
))
982 new_buf
= elf_readlink (state
, filename
, error_callback
, data
, &new_len
);
986 if (new_buf
[0] == '/')
990 slash
= strrchr (filename
, '/');
999 clen
= slash
- filename
+ strlen (new_buf
) + 1;
1000 c
= backtrace_alloc (state
, clen
, error_callback
, data
);
1004 memcpy (c
, filename
, slash
- filename
);
1005 memcpy (c
+ (slash
- filename
), new_buf
, strlen (new_buf
));
1006 c
[slash
- filename
+ strlen (new_buf
)] = '\0';
1007 backtrace_free (state
, new_buf
, new_len
, error_callback
, data
);
1015 backtrace_free (state
, alc
, alc_len
, error_callback
, data
);
1020 /* Look for DEBUGLINK_NAME in the same directory as FILENAME. */
1022 slash
= strrchr (filename
, '/');
1032 prefix_len
= slash
- filename
;
1035 ddescriptor
= elf_try_debugfile (state
, prefix
, prefix_len
, "", 0,
1036 debuglink_name
, error_callback
, data
);
1037 if (ddescriptor
>= 0)
1043 /* Look for DEBUGLINK_NAME in a .debug subdirectory of FILENAME. */
1045 ddescriptor
= elf_try_debugfile (state
, prefix
, prefix_len
, ".debug/",
1046 strlen (".debug/"), debuglink_name
,
1047 error_callback
, data
);
1048 if (ddescriptor
>= 0)
1054 /* Look for DEBUGLINK_NAME in /usr/lib/debug. */
1056 ddescriptor
= elf_try_debugfile (state
, "/usr/lib/debug/",
1057 strlen ("/usr/lib/debug/"), prefix
,
1058 prefix_len
, debuglink_name
,
1059 error_callback
, data
);
1060 if (ddescriptor
>= 0)
1064 if (alc
!= NULL
&& alc_len
> 0)
1065 backtrace_free (state
, alc
, alc_len
, error_callback
, data
);
1069 /* Open a separate debug info file, using the debuglink section data
1070 to find it. Returns an open file descriptor, or -1. */
1073 elf_open_debugfile_by_debuglink (struct backtrace_state
*state
,
1074 const char *filename
,
1075 const char *debuglink_name
,
1076 uint32_t debuglink_crc
,
1077 backtrace_error_callback error_callback
,
1082 ddescriptor
= elf_find_debugfile_by_debuglink (state
, filename
,
1084 error_callback
, data
);
1085 if (ddescriptor
< 0)
1088 if (debuglink_crc
!= 0)
1092 got_crc
= elf_crc32_file (state
, ddescriptor
, error_callback
, data
);
1093 if (got_crc
!= debuglink_crc
)
1095 backtrace_close (ddescriptor
, error_callback
, data
);
1103 /* A function useful for setting a breakpoint for an inflation failure
1104 when this code is compiled with -g. */
1107 elf_uncompress_failed(void)
1111 /* *PVAL is the current value being read from the stream, and *PBITS
1112 is the number of valid bits. Ensure that *PVAL holds at least 15
1113 bits by reading additional bits from *PPIN, up to PINEND, as
1114 needed. Updates *PPIN, *PVAL and *PBITS. Returns 1 on success, 0
1118 elf_fetch_bits (const unsigned char **ppin
, const unsigned char *pinend
,
1119 uint64_t *pval
, unsigned int *pbits
)
1122 const unsigned char *pin
;
1132 if (unlikely (pinend
- pin
< 4))
1134 elf_uncompress_failed ();
1138 #if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) \
1139 && defined(__ORDER_BIG_ENDIAN__) \
1140 && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ \
1141 || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
1142 /* We've ensured that PIN is aligned. */
1143 next
= *(const uint32_t *)pin
;
1145 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1146 next
= __builtin_bswap32 (next
);
1149 next
= pin
[0] | (pin
[1] << 8) | (pin
[2] << 16) | (pin
[3] << 24);
1152 val
|= (uint64_t)next
<< bits
;
1156 /* We will need the next four bytes soon. */
1157 __builtin_prefetch (pin
, 0, 0);
1165 /* This is like elf_fetch_bits, but it fetchs the bits backward, and ensures at
1166 least 16 bits. This is for zstd. */
1169 elf_fetch_bits_backward (const unsigned char **ppin
,
1170 const unsigned char *pinend
,
1171 uint64_t *pval
, unsigned int *pbits
)
1174 const unsigned char *pin
;
1184 if (unlikely (pin
<= pinend
))
1188 elf_uncompress_failed ();
1196 #if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) \
1197 && defined(__ORDER_BIG_ENDIAN__) \
1198 && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ \
1199 || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
1200 /* We've ensured that PIN is aligned. */
1201 next
= *(const uint32_t *)pin
;
1203 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1204 next
= __builtin_bswap32 (next
);
1207 next
= pin
[0] | (pin
[1] << 8) | (pin
[2] << 16) | (pin
[3] << 24);
1214 if (unlikely (pin
< pinend
))
1216 val
>>= (pinend
- pin
) * 8;
1217 bits
-= (pinend
- pin
) * 8;
1226 /* Initialize backward fetching when the bitstream starts with a 1 bit in the
1227 last byte in memory (which is the first one that we read). This is used by
1228 zstd decompression. Returns 1 on success, 0 on error. */
1231 elf_fetch_backward_init (const unsigned char **ppin
,
1232 const unsigned char *pinend
,
1233 uint64_t *pval
, unsigned int *pbits
)
1235 const unsigned char *pin
;
1236 unsigned int stream_start
;
1241 stream_start
= (unsigned int)*pin
;
1242 if (unlikely (stream_start
== 0))
1244 elf_uncompress_failed ();
1250 /* Align to a 32-bit boundary. */
1251 while ((((uintptr_t)pin
) & 3) != 0)
1254 val
|= (uint64_t)*pin
;
1260 val
|= (uint64_t)*pin
;
1266 if (!elf_fetch_bits_backward (ppin
, pinend
, pval
, pbits
))
1269 *pbits
-= __builtin_clz (stream_start
) - (sizeof (unsigned int) - 1) * 8 + 1;
1271 if (!elf_fetch_bits_backward (ppin
, pinend
, pval
, pbits
))
1277 /* Huffman code tables, like the rest of the zlib format, are defined
1278 by RFC 1951. We store a Huffman code table as a series of tables
1279 stored sequentially in memory. Each entry in a table is 16 bits.
1280 The first, main, table has 256 entries. It is followed by a set of
1281 secondary tables of length 2 to 128 entries. The maximum length of
1282 a code sequence in the deflate format is 15 bits, so that is all we
1283 need. Each secondary table has an index, which is the offset of
1284 the table in the overall memory storage.
1286 The deflate format says that all codes of a given bit length are
1287 lexicographically consecutive. Perhaps we could have 130 values
1288 that require a 15-bit code, perhaps requiring three secondary
1289 tables of size 128. I don't know if this is actually possible, but
1290 it suggests that the maximum size required for secondary tables is
1291 3 * 128 + 3 * 64 ... == 768. The zlib enough program reports 660
1292 as the maximum. We permit 768, since in addition to the 256 for
1293 the primary table, with two bytes per entry, and with the two
1294 tables we need, that gives us a page.
1296 A single table entry needs to store a value or (for the main table
1297 only) the index and size of a secondary table. Values range from 0
1298 to 285, inclusive. Secondary table indexes, per above, range from
1299 0 to 510. For a value we need to store the number of bits we need
1300 to determine that value (one value may appear multiple times in the
1301 table), which is 1 to 8. For a secondary table we need to store
1302 the number of bits used to index into the table, which is 1 to 7.
1303 And of course we need 1 bit to decide whether we have a value or a
1304 secondary table index. So each entry needs 9 bits for value/table
1305 index, 3 bits for size, 1 bit what it is. For simplicity we use 16
1308 /* Number of entries we allocate to for one code table. We get a page
1309 for the two code tables we need. */
1311 #define ZLIB_HUFFMAN_TABLE_SIZE (1024)
1313 /* Bit masks and shifts for the values in the table. */
1315 #define ZLIB_HUFFMAN_VALUE_MASK 0x01ff
1316 #define ZLIB_HUFFMAN_BITS_SHIFT 9
1317 #define ZLIB_HUFFMAN_BITS_MASK 0x7
1318 #define ZLIB_HUFFMAN_SECONDARY_SHIFT 12
1320 /* For working memory while inflating we need two code tables, we need
1321 an array of code lengths (max value 15, so we use unsigned char),
1322 and an array of unsigned shorts used while building a table. The
1323 latter two arrays must be large enough to hold the maximum number
1324 of code lengths, which RFC 1951 defines as 286 + 30. */
1326 #define ZLIB_TABLE_SIZE \
1327 (2 * ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t) \
1328 + (286 + 30) * sizeof (uint16_t) \
1329 + (286 + 30) * sizeof (unsigned char))
1331 #define ZLIB_TABLE_CODELEN_OFFSET \
1332 (2 * ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t) \
1333 + (286 + 30) * sizeof (uint16_t))
1335 #define ZLIB_TABLE_WORK_OFFSET \
1336 (2 * ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t))
1338 #ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE
1340 /* Used by the main function that generates the fixed table to learn
1342 static size_t final_next_secondary
;
1346 /* Build a Huffman code table from an array of lengths in CODES of
1347 length CODES_LEN. The table is stored into *TABLE. ZDEBUG_TABLE
1348 is the same as for elf_zlib_inflate, used to find some work space.
1349 Returns 1 on success, 0 on error. */
1352 elf_zlib_inflate_table (unsigned char *codes
, size_t codes_len
,
1353 uint16_t *zdebug_table
, uint16_t *table
)
1358 uint16_t firstcode
[7];
1363 size_t next_secondary
;
1365 /* Count the number of code of each length. Set NEXT[val] to be the
1366 next value after VAL with the same bit length. */
1368 next
= (uint16_t *) (((unsigned char *) zdebug_table
)
1369 + ZLIB_TABLE_WORK_OFFSET
);
1371 memset (&count
[0], 0, 16 * sizeof (uint16_t));
1372 for (i
= 0; i
< codes_len
; ++i
)
1374 if (unlikely (codes
[i
] >= 16))
1376 elf_uncompress_failed ();
1380 if (count
[codes
[i
]] == 0)
1382 start
[codes
[i
]] = i
;
1387 next
[prev
[codes
[i
]]] = i
;
1394 /* For each length, fill in the table for the codes of that
1397 memset (table
, 0, ZLIB_HUFFMAN_TABLE_SIZE
* sizeof (uint16_t));
1399 /* Handle the values that do not require a secondary table. */
1402 for (j
= 1; j
<= 8; ++j
)
1411 if (unlikely (jcnt
> (1U << j
)))
1413 elf_uncompress_failed ();
1417 /* There are JCNT values that have this length, the values
1418 starting from START[j] continuing through NEXT[VAL]. Those
1419 values are assigned consecutive values starting at CODE. */
1422 for (i
= 0; i
< jcnt
; ++i
)
1428 /* In the compressed bit stream, the value VAL is encoded as
1429 J bits with the value C. */
1431 if (unlikely ((val
& ~ZLIB_HUFFMAN_VALUE_MASK
) != 0))
1433 elf_uncompress_failed ();
1437 tval
= val
| ((j
- 1) << ZLIB_HUFFMAN_BITS_SHIFT
);
1439 /* The table lookup uses 8 bits. If J is less than 8, we
1440 don't know what the other bits will be. We need to fill
1441 in all possibilities in the table. Since the Huffman
1442 code is unambiguous, those entries can't be used for any
1445 for (ind
= code
; ind
< 0x100; ind
+= 1 << j
)
1447 if (unlikely (table
[ind
] != 0))
1449 elf_uncompress_failed ();
1455 /* Advance to the next value with this length. */
1459 /* The Huffman codes are stored in the bitstream with the
1460 most significant bit first, as is required to make them
1461 unambiguous. The effect is that when we read them from
1462 the bitstream we see the bit sequence in reverse order:
1463 the most significant bit of the Huffman code is the least
1464 significant bit of the value we read from the bitstream.
1465 That means that to make our table lookups work, we need
1466 to reverse the bits of CODE. Since reversing bits is
1467 tedious and in general requires using a table, we instead
1468 increment CODE in reverse order. That is, if the number
1469 of bits we are currently using, here named J, is 3, we
1470 count as 000, 100, 010, 110, 001, 101, 011, 111, which is
1471 to say the numbers from 0 to 7 but with the bits
1472 reversed. Going to more bits, aka incrementing J,
1473 effectively just adds more zero bits as the beginning,
1474 and as such does not change the numeric value of CODE.
1476 To increment CODE of length J in reverse order, find the
1477 most significant zero bit and set it to one while
1478 clearing all higher bits. In other words, add 1 modulo
1479 2^J, only reversed. */
1481 incr
= 1U << (j
- 1);
1482 while ((code
& incr
) != 0)
1494 /* Handle the values that require a secondary table. */
1496 /* Set FIRSTCODE, the number at which the codes start, for each
1499 for (j
= 9; j
< 16; j
++)
1508 /* There are JCNT values that have this length, the values
1509 starting from START[j]. Those values are assigned
1510 consecutive values starting at CODE. */
1512 firstcode
[j
- 9] = code
;
1514 /* Reverse add JCNT to CODE modulo 2^J. */
1515 for (k
= 0; k
< j
; ++k
)
1517 if ((jcnt
& (1U << k
)) != 0)
1522 bit
= 1U << (j
- k
- 1);
1523 for (m
= 0; m
< j
- k
; ++m
, bit
>>= 1)
1525 if ((code
& bit
) == 0)
1535 if (unlikely (jcnt
!= 0))
1537 elf_uncompress_failed ();
1542 /* For J from 9 to 15, inclusive, we store COUNT[J] consecutive
1543 values starting at START[J] with consecutive codes starting at
1544 FIRSTCODE[J - 9]. In the primary table we need to point to the
1545 secondary table, and the secondary table will be indexed by J - 9
1546 bits. We count down from 15 so that we install the larger
1547 secondary tables first, as the smaller ones may be embedded in
1550 next_secondary
= 0; /* Index of next secondary table (after primary). */
1551 for (j
= 15; j
>= 9; j
--)
1555 size_t primary
; /* Current primary index. */
1556 size_t secondary
; /* Offset to current secondary table. */
1557 size_t secondary_bits
; /* Bit size of current secondary table. */
1564 code
= firstcode
[j
- 9];
1568 for (i
= 0; i
< jcnt
; ++i
)
1574 if ((code
& 0xff) != primary
)
1578 /* Fill in a new primary table entry. */
1580 primary
= code
& 0xff;
1582 tprimary
= table
[primary
];
1585 /* Start a new secondary table. */
1587 if (unlikely ((next_secondary
& ZLIB_HUFFMAN_VALUE_MASK
)
1590 elf_uncompress_failed ();
1594 secondary
= next_secondary
;
1595 secondary_bits
= j
- 8;
1596 next_secondary
+= 1 << secondary_bits
;
1597 table
[primary
] = (secondary
1598 + ((j
- 8) << ZLIB_HUFFMAN_BITS_SHIFT
)
1599 + (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT
));
1603 /* There is an existing entry. It had better be a
1604 secondary table with enough bits. */
1605 if (unlikely ((tprimary
1606 & (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT
))
1609 elf_uncompress_failed ();
1612 secondary
= tprimary
& ZLIB_HUFFMAN_VALUE_MASK
;
1613 secondary_bits
= ((tprimary
>> ZLIB_HUFFMAN_BITS_SHIFT
)
1614 & ZLIB_HUFFMAN_BITS_MASK
);
1615 if (unlikely (secondary_bits
< j
- 8))
1617 elf_uncompress_failed ();
1623 /* Fill in secondary table entries. */
1625 tval
= val
| ((j
- 8) << ZLIB_HUFFMAN_BITS_SHIFT
);
1627 for (ind
= code
>> 8;
1628 ind
< (1U << secondary_bits
);
1629 ind
+= 1U << (j
- 8))
1631 if (unlikely (table
[secondary
+ 0x100 + ind
] != 0))
1633 elf_uncompress_failed ();
1636 table
[secondary
+ 0x100 + ind
] = tval
;
1642 incr
= 1U << (j
- 1);
1643 while ((code
& incr
) != 0)
1655 #ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE
1656 final_next_secondary
= next_secondary
;
1662 #ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE
1664 /* Used to generate the fixed Huffman table for block type 1. */
1668 static uint16_t table
[ZLIB_TABLE_SIZE
];
1669 static unsigned char codes
[288];
1676 for (i
= 0; i
<= 143; ++i
)
1678 for (i
= 144; i
<= 255; ++i
)
1680 for (i
= 256; i
<= 279; ++i
)
1682 for (i
= 280; i
<= 287; ++i
)
1684 if (!elf_zlib_inflate_table (&codes
[0], 288, &table
[0], &table
[0]))
1686 fprintf (stderr
, "elf_zlib_inflate_table failed\n");
1687 exit (EXIT_FAILURE
);
1690 printf ("static const uint16_t elf_zlib_default_table[%#zx] =\n",
1691 final_next_secondary
+ 0x100);
1693 for (i
= 0; i
< final_next_secondary
+ 0x100; i
+= 8)
1698 for (j
= i
; j
< final_next_secondary
+ 0x100 && j
< i
+ 8; ++j
)
1699 printf (" %#x,", table
[j
]);
1705 for (i
= 0; i
< 32; ++i
)
1707 if (!elf_zlib_inflate_table (&codes
[0], 32, &table
[0], &table
[0]))
1709 fprintf (stderr
, "elf_zlib_inflate_table failed\n");
1710 exit (EXIT_FAILURE
);
1713 printf ("static const uint16_t elf_zlib_default_dist_table[%#zx] =\n",
1714 final_next_secondary
+ 0x100);
1716 for (i
= 0; i
< final_next_secondary
+ 0x100; i
+= 8)
1721 for (j
= i
; j
< final_next_secondary
+ 0x100 && j
< i
+ 8; ++j
)
1722 printf (" %#x,", table
[j
]);
1732 /* The fixed tables generated by the #ifdef'ed out main function
1735 static const uint16_t elf_zlib_default_table
[0x170] =
1737 0xd00, 0xe50, 0xe10, 0xf18, 0xd10, 0xe70, 0xe30, 0x1230,
1738 0xd08, 0xe60, 0xe20, 0x1210, 0xe00, 0xe80, 0xe40, 0x1250,
1739 0xd04, 0xe58, 0xe18, 0x1200, 0xd14, 0xe78, 0xe38, 0x1240,
1740 0xd0c, 0xe68, 0xe28, 0x1220, 0xe08, 0xe88, 0xe48, 0x1260,
1741 0xd02, 0xe54, 0xe14, 0xf1c, 0xd12, 0xe74, 0xe34, 0x1238,
1742 0xd0a, 0xe64, 0xe24, 0x1218, 0xe04, 0xe84, 0xe44, 0x1258,
1743 0xd06, 0xe5c, 0xe1c, 0x1208, 0xd16, 0xe7c, 0xe3c, 0x1248,
1744 0xd0e, 0xe6c, 0xe2c, 0x1228, 0xe0c, 0xe8c, 0xe4c, 0x1268,
1745 0xd01, 0xe52, 0xe12, 0xf1a, 0xd11, 0xe72, 0xe32, 0x1234,
1746 0xd09, 0xe62, 0xe22, 0x1214, 0xe02, 0xe82, 0xe42, 0x1254,
1747 0xd05, 0xe5a, 0xe1a, 0x1204, 0xd15, 0xe7a, 0xe3a, 0x1244,
1748 0xd0d, 0xe6a, 0xe2a, 0x1224, 0xe0a, 0xe8a, 0xe4a, 0x1264,
1749 0xd03, 0xe56, 0xe16, 0xf1e, 0xd13, 0xe76, 0xe36, 0x123c,
1750 0xd0b, 0xe66, 0xe26, 0x121c, 0xe06, 0xe86, 0xe46, 0x125c,
1751 0xd07, 0xe5e, 0xe1e, 0x120c, 0xd17, 0xe7e, 0xe3e, 0x124c,
1752 0xd0f, 0xe6e, 0xe2e, 0x122c, 0xe0e, 0xe8e, 0xe4e, 0x126c,
1753 0xd00, 0xe51, 0xe11, 0xf19, 0xd10, 0xe71, 0xe31, 0x1232,
1754 0xd08, 0xe61, 0xe21, 0x1212, 0xe01, 0xe81, 0xe41, 0x1252,
1755 0xd04, 0xe59, 0xe19, 0x1202, 0xd14, 0xe79, 0xe39, 0x1242,
1756 0xd0c, 0xe69, 0xe29, 0x1222, 0xe09, 0xe89, 0xe49, 0x1262,
1757 0xd02, 0xe55, 0xe15, 0xf1d, 0xd12, 0xe75, 0xe35, 0x123a,
1758 0xd0a, 0xe65, 0xe25, 0x121a, 0xe05, 0xe85, 0xe45, 0x125a,
1759 0xd06, 0xe5d, 0xe1d, 0x120a, 0xd16, 0xe7d, 0xe3d, 0x124a,
1760 0xd0e, 0xe6d, 0xe2d, 0x122a, 0xe0d, 0xe8d, 0xe4d, 0x126a,
1761 0xd01, 0xe53, 0xe13, 0xf1b, 0xd11, 0xe73, 0xe33, 0x1236,
1762 0xd09, 0xe63, 0xe23, 0x1216, 0xe03, 0xe83, 0xe43, 0x1256,
1763 0xd05, 0xe5b, 0xe1b, 0x1206, 0xd15, 0xe7b, 0xe3b, 0x1246,
1764 0xd0d, 0xe6b, 0xe2b, 0x1226, 0xe0b, 0xe8b, 0xe4b, 0x1266,
1765 0xd03, 0xe57, 0xe17, 0xf1f, 0xd13, 0xe77, 0xe37, 0x123e,
1766 0xd0b, 0xe67, 0xe27, 0x121e, 0xe07, 0xe87, 0xe47, 0x125e,
1767 0xd07, 0xe5f, 0xe1f, 0x120e, 0xd17, 0xe7f, 0xe3f, 0x124e,
1768 0xd0f, 0xe6f, 0xe2f, 0x122e, 0xe0f, 0xe8f, 0xe4f, 0x126e,
1769 0x290, 0x291, 0x292, 0x293, 0x294, 0x295, 0x296, 0x297,
1770 0x298, 0x299, 0x29a, 0x29b, 0x29c, 0x29d, 0x29e, 0x29f,
1771 0x2a0, 0x2a1, 0x2a2, 0x2a3, 0x2a4, 0x2a5, 0x2a6, 0x2a7,
1772 0x2a8, 0x2a9, 0x2aa, 0x2ab, 0x2ac, 0x2ad, 0x2ae, 0x2af,
1773 0x2b0, 0x2b1, 0x2b2, 0x2b3, 0x2b4, 0x2b5, 0x2b6, 0x2b7,
1774 0x2b8, 0x2b9, 0x2ba, 0x2bb, 0x2bc, 0x2bd, 0x2be, 0x2bf,
1775 0x2c0, 0x2c1, 0x2c2, 0x2c3, 0x2c4, 0x2c5, 0x2c6, 0x2c7,
1776 0x2c8, 0x2c9, 0x2ca, 0x2cb, 0x2cc, 0x2cd, 0x2ce, 0x2cf,
1777 0x2d0, 0x2d1, 0x2d2, 0x2d3, 0x2d4, 0x2d5, 0x2d6, 0x2d7,
1778 0x2d8, 0x2d9, 0x2da, 0x2db, 0x2dc, 0x2dd, 0x2de, 0x2df,
1779 0x2e0, 0x2e1, 0x2e2, 0x2e3, 0x2e4, 0x2e5, 0x2e6, 0x2e7,
1780 0x2e8, 0x2e9, 0x2ea, 0x2eb, 0x2ec, 0x2ed, 0x2ee, 0x2ef,
1781 0x2f0, 0x2f1, 0x2f2, 0x2f3, 0x2f4, 0x2f5, 0x2f6, 0x2f7,
1782 0x2f8, 0x2f9, 0x2fa, 0x2fb, 0x2fc, 0x2fd, 0x2fe, 0x2ff,
1785 static const uint16_t elf_zlib_default_dist_table
[0x100] =
1787 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1788 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1789 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1790 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1791 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1792 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1793 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1794 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1795 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1796 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1797 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1798 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1799 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1800 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1801 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1802 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1803 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1804 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1805 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1806 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1807 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1808 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1809 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1810 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1811 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1812 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1813 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1814 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1815 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1816 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1817 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1818 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1821 /* Inflate a zlib stream from PIN/SIN to POUT/SOUT. Return 1 on
1822 success, 0 on some error parsing the stream. */
1825 elf_zlib_inflate (const unsigned char *pin
, size_t sin
, uint16_t *zdebug_table
,
1826 unsigned char *pout
, size_t sout
)
1828 unsigned char *porigout
;
1829 const unsigned char *pinend
;
1830 unsigned char *poutend
;
1832 /* We can apparently see multiple zlib streams concatenated
1833 together, so keep going as long as there is something to read.
1834 The last 4 bytes are the checksum. */
1837 poutend
= pout
+ sout
;
1838 while ((pinend
- pin
) > 4)
1844 /* Read the two byte zlib header. */
1846 if (unlikely ((pin
[0] & 0xf) != 8)) /* 8 is zlib encoding. */
1848 /* Unknown compression method. */
1849 elf_uncompress_failed ();
1852 if (unlikely ((pin
[0] >> 4) > 7))
1854 /* Window size too large. Other than this check, we don't
1855 care about the window size. */
1856 elf_uncompress_failed ();
1859 if (unlikely ((pin
[1] & 0x20) != 0))
1861 /* Stream expects a predefined dictionary, but we have no
1863 elf_uncompress_failed ();
1866 val
= (pin
[0] << 8) | pin
[1];
1867 if (unlikely (val
% 31 != 0))
1869 /* Header check failure. */
1870 elf_uncompress_failed ();
1875 /* Align PIN to a 32-bit boundary. */
1879 while ((((uintptr_t) pin
) & 3) != 0)
1881 val
|= (uint64_t)*pin
<< bits
;
1886 /* Read blocks until one is marked last. */
1893 const uint16_t *tlit
;
1894 const uint16_t *tdist
;
1896 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
1900 type
= (val
>> 1) & 3;
1904 if (unlikely (type
== 3))
1906 /* Invalid block type. */
1907 elf_uncompress_failed ();
1916 /* An uncompressed block. */
1918 /* If we've read ahead more than a byte, back up. */
1927 if (unlikely ((pinend
- pin
) < 4))
1929 /* Missing length. */
1930 elf_uncompress_failed ();
1933 len
= pin
[0] | (pin
[1] << 8);
1934 lenc
= pin
[2] | (pin
[3] << 8);
1937 if (unlikely (len
!= lenc
))
1940 elf_uncompress_failed ();
1943 if (unlikely (len
> (unsigned int) (pinend
- pin
)
1944 || len
> (unsigned int) (poutend
- pout
)))
1946 /* Not enough space in buffers. */
1947 elf_uncompress_failed ();
1950 memcpy (pout
, pin
, len
);
1955 while ((((uintptr_t) pin
) & 3) != 0)
1957 val
|= (uint64_t)*pin
<< bits
;
1962 /* Go around to read the next block. */
1968 tlit
= elf_zlib_default_table
;
1969 tdist
= elf_zlib_default_dist_table
;
1976 unsigned char codebits
[19];
1977 unsigned char *plenbase
;
1978 unsigned char *plen
;
1979 unsigned char *plenend
;
1981 /* Read a Huffman encoding table. The various magic
1982 numbers here are from RFC 1951. */
1984 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
1987 nlit
= (val
& 0x1f) + 257;
1989 ndist
= (val
& 0x1f) + 1;
1991 nclen
= (val
& 0xf) + 4;
1994 if (unlikely (nlit
> 286 || ndist
> 30))
1996 /* Values out of range. */
1997 elf_uncompress_failed ();
2001 /* Read and build the table used to compress the
2002 literal, length, and distance codes. */
2004 memset(&codebits
[0], 0, 19);
2006 /* There are always at least 4 elements in the
2009 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2012 codebits
[16] = val
& 7;
2013 codebits
[17] = (val
>> 3) & 7;
2014 codebits
[18] = (val
>> 6) & 7;
2015 codebits
[0] = (val
>> 9) & 7;
2022 codebits
[8] = val
& 7;
2029 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2032 codebits
[7] = val
& 7;
2039 codebits
[9] = val
& 7;
2046 codebits
[6] = val
& 7;
2053 codebits
[10] = val
& 7;
2060 codebits
[5] = val
& 7;
2067 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2070 codebits
[11] = val
& 7;
2077 codebits
[4] = val
& 7;
2084 codebits
[12] = val
& 7;
2091 codebits
[3] = val
& 7;
2098 codebits
[13] = val
& 7;
2105 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2108 codebits
[2] = val
& 7;
2115 codebits
[14] = val
& 7;
2122 codebits
[1] = val
& 7;
2129 codebits
[15] = val
& 7;
2135 if (!elf_zlib_inflate_table (codebits
, 19, zdebug_table
,
2139 /* Read the compressed bit lengths of the literal,
2140 length, and distance codes. We have allocated space
2141 at the end of zdebug_table to hold them. */
2143 plenbase
= (((unsigned char *) zdebug_table
)
2144 + ZLIB_TABLE_CODELEN_OFFSET
);
2146 plenend
= plen
+ nlit
+ ndist
;
2147 while (plen
< plenend
)
2153 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2156 t
= zdebug_table
[val
& 0xff];
2158 /* The compression here uses bit lengths up to 7, so
2159 a secondary table is never necessary. */
2160 if (unlikely ((t
& (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT
))
2163 elf_uncompress_failed ();
2167 b
= (t
>> ZLIB_HUFFMAN_BITS_SHIFT
) & ZLIB_HUFFMAN_BITS_MASK
;
2171 v
= t
& ZLIB_HUFFMAN_VALUE_MASK
;
2179 /* Copy previous entry 3 to 6 times. */
2181 if (unlikely (plen
== plenbase
))
2183 elf_uncompress_failed ();
2187 /* We used up to 7 bits since the last
2188 elf_fetch_bits, so we have at least 8 bits
2191 c
= 3 + (val
& 0x3);
2194 if (unlikely ((unsigned int) (plenend
- plen
) < c
))
2196 elf_uncompress_failed ();
2205 ATTRIBUTE_FALLTHROUGH
;
2208 ATTRIBUTE_FALLTHROUGH
;
2220 /* Store zero 3 to 10 times. */
2222 /* We used up to 7 bits since the last
2223 elf_fetch_bits, so we have at least 8 bits
2226 c
= 3 + (val
& 0x7);
2229 if (unlikely ((unsigned int) (plenend
- plen
) < c
))
2231 elf_uncompress_failed ();
2239 ATTRIBUTE_FALLTHROUGH
;
2242 ATTRIBUTE_FALLTHROUGH
;
2245 ATTRIBUTE_FALLTHROUGH
;
2248 ATTRIBUTE_FALLTHROUGH
;
2251 ATTRIBUTE_FALLTHROUGH
;
2254 ATTRIBUTE_FALLTHROUGH
;
2266 /* Store zero 11 to 138 times. */
2268 /* We used up to 7 bits since the last
2269 elf_fetch_bits, so we have at least 8 bits
2272 c
= 11 + (val
& 0x7f);
2275 if (unlikely ((unsigned int) (plenend
- plen
) < c
))
2277 elf_uncompress_failed ();
2281 memset (plen
, 0, c
);
2286 elf_uncompress_failed ();
2291 /* Make sure that the stop code can appear. */
2294 if (unlikely (plen
[256] == 0))
2296 elf_uncompress_failed ();
2300 /* Build the decompression tables. */
2302 if (!elf_zlib_inflate_table (plen
, nlit
, zdebug_table
,
2305 if (!elf_zlib_inflate_table (plen
+ nlit
, ndist
, zdebug_table
,
2307 + ZLIB_HUFFMAN_TABLE_SIZE
)))
2309 tlit
= zdebug_table
;
2310 tdist
= zdebug_table
+ ZLIB_HUFFMAN_TABLE_SIZE
;
2313 /* Inflate values until the end of the block. This is the
2314 main loop of the inflation code. */
2323 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2326 t
= tlit
[val
& 0xff];
2327 b
= (t
>> ZLIB_HUFFMAN_BITS_SHIFT
) & ZLIB_HUFFMAN_BITS_MASK
;
2328 v
= t
& ZLIB_HUFFMAN_VALUE_MASK
;
2330 if ((t
& (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT
)) == 0)
2338 t
= tlit
[v
+ 0x100 + ((val
>> 8) & ((1U << b
) - 1))];
2339 b
= (t
>> ZLIB_HUFFMAN_BITS_SHIFT
) & ZLIB_HUFFMAN_BITS_MASK
;
2340 lit
= t
& ZLIB_HUFFMAN_VALUE_MASK
;
2347 if (unlikely (pout
== poutend
))
2349 elf_uncompress_failed ();
2355 /* We will need to write the next byte soon. We ask
2356 for high temporal locality because we will write
2357 to the whole cache line soon. */
2358 __builtin_prefetch (pout
, 1, 3);
2360 else if (lit
== 256)
2362 /* The end of the block. */
2370 /* Convert lit into a length. */
2373 len
= lit
- 257 + 3;
2374 else if (lit
== 285)
2376 else if (unlikely (lit
> 285))
2378 elf_uncompress_failed ();
2385 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2388 /* This is an expression for the table of length
2389 codes in RFC 1951 3.2.5. */
2391 extra
= (lit
>> 2) + 1;
2392 len
= (lit
& 3) << extra
;
2394 len
+= ((1U << (extra
- 1)) - 1) << 3;
2395 len
+= val
& ((1U << extra
) - 1);
2400 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2403 t
= tdist
[val
& 0xff];
2404 b
= (t
>> ZLIB_HUFFMAN_BITS_SHIFT
) & ZLIB_HUFFMAN_BITS_MASK
;
2405 v
= t
& ZLIB_HUFFMAN_VALUE_MASK
;
2407 if ((t
& (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT
)) == 0)
2415 t
= tdist
[v
+ 0x100 + ((val
>> 8) & ((1U << b
) - 1))];
2416 b
= ((t
>> ZLIB_HUFFMAN_BITS_SHIFT
)
2417 & ZLIB_HUFFMAN_BITS_MASK
);
2418 dist
= t
& ZLIB_HUFFMAN_VALUE_MASK
;
2423 /* Convert dist to a distance. */
2427 /* A distance of 1. A common case, meaning
2428 repeat the last character LEN times. */
2430 if (unlikely (pout
== porigout
))
2432 elf_uncompress_failed ();
2436 if (unlikely ((unsigned int) (poutend
- pout
) < len
))
2438 elf_uncompress_failed ();
2442 memset (pout
, pout
[-1], len
);
2445 else if (unlikely (dist
> 29))
2447 elf_uncompress_failed ();
2458 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2461 /* This is an expression for the table of
2462 distance codes in RFC 1951 3.2.5. */
2464 extra
= (dist
>> 1) + 1;
2465 dist
= (dist
& 1) << extra
;
2467 dist
+= ((1U << (extra
- 1)) - 1) << 2;
2468 dist
+= val
& ((1U << extra
) - 1);
2473 /* Go back dist bytes, and copy len bytes from
2476 if (unlikely ((unsigned int) (pout
- porigout
) < dist
))
2478 elf_uncompress_failed ();
2482 if (unlikely ((unsigned int) (poutend
- pout
) < len
))
2484 elf_uncompress_failed ();
2490 memcpy (pout
, pout
- dist
, len
);
2499 copy
= len
< dist
? len
: dist
;
2500 memcpy (pout
, pout
- dist
, copy
);
2511 /* We should have filled the output buffer. */
2512 if (unlikely (pout
!= poutend
))
2514 elf_uncompress_failed ();
2521 /* Verify the zlib checksum. The checksum is in the 4 bytes at
2522 CHECKBYTES, and the uncompressed data is at UNCOMPRESSED /
2523 UNCOMPRESSED_SIZE. Returns 1 on success, 0 on failure. */
2526 elf_zlib_verify_checksum (const unsigned char *checkbytes
,
2527 const unsigned char *uncompressed
,
2528 size_t uncompressed_size
)
2532 const unsigned char *p
;
2538 for (i
= 0; i
< 4; i
++)
2539 cksum
= (cksum
<< 8) | checkbytes
[i
];
2544 /* Minimize modulo operations. */
2547 hsz
= uncompressed_size
;
2550 for (i
= 0; i
< 5552; i
+= 16)
2552 /* Manually unroll loop 16 times. */
2593 /* Manually unroll loop 16 times. */
2630 for (i
= 0; i
< hsz
; ++i
)
2639 if (unlikely ((s2
<< 16) + s1
!= cksum
))
2641 elf_uncompress_failed ();
2648 /* Inflate a zlib stream from PIN/SIN to POUT/SOUT, and verify the
2649 checksum. Return 1 on success, 0 on error. */
2652 elf_zlib_inflate_and_verify (const unsigned char *pin
, size_t sin
,
2653 uint16_t *zdebug_table
, unsigned char *pout
,
2656 if (!elf_zlib_inflate (pin
, sin
, zdebug_table
, pout
, sout
))
2658 if (!elf_zlib_verify_checksum (pin
+ sin
- 4, pout
, sout
))
2663 /* For working memory during zstd compression, we need
2664 - a literal length FSE table: 512 64-bit values == 4096 bytes
2665 - a match length FSE table: 512 64-bit values == 4096 bytes
2666 - a offset FSE table: 256 64-bit values == 2048 bytes
2667 - a Huffman tree: 2048 uint16_t values == 4096 bytes
2668 - scratch space, one of
2669 - to build an FSE table: 512 uint16_t values == 1024 bytes
2670 - to build a Huffman tree: 512 uint16_t + 256 uint32_t == 2048 bytes
2673 #define ZSTD_TABLE_SIZE \
2674 (2 * 512 * sizeof (struct elf_zstd_fse_baseline_entry) \
2675 + 256 * sizeof (struct elf_zstd_fse_baseline_entry) \
2676 + 2048 * sizeof (uint16_t) \
2677 + 512 * sizeof (uint16_t) + 256 * sizeof (uint32_t))
2679 #define ZSTD_TABLE_LITERAL_FSE_OFFSET (0)
2681 #define ZSTD_TABLE_MATCH_FSE_OFFSET \
2682 (512 * sizeof (struct elf_zstd_fse_baseline_entry))
2684 #define ZSTD_TABLE_OFFSET_FSE_OFFSET \
2685 (ZSTD_TABLE_MATCH_FSE_OFFSET \
2686 + 512 * sizeof (struct elf_zstd_fse_baseline_entry))
2688 #define ZSTD_TABLE_HUFFMAN_OFFSET \
2689 (ZSTD_TABLE_OFFSET_FSE_OFFSET \
2690 + 256 * sizeof (struct elf_zstd_fse_baseline_entry))
2692 #define ZSTD_TABLE_WORK_OFFSET \
2693 (ZSTD_TABLE_HUFFMAN_OFFSET + 2048 * sizeof (uint16_t))
2695 /* An entry in a zstd FSE table. */
2697 struct elf_zstd_fse_entry
2699 /* The value that this FSE entry represents. */
2700 unsigned char symbol
;
2701 /* The number of bits to read to determine the next state. */
2703 /* Add the bits to this base to get the next state. */
2708 elf_zstd_build_fse (const int16_t *, int, uint16_t *, int,
2709 struct elf_zstd_fse_entry
*);
2711 /* Read a zstd FSE table and build the decoding table in *TABLE, updating *PPIN
2712 as it reads. ZDEBUG_TABLE is scratch space; it must be enough for 512
2713 uint16_t values (1024 bytes). MAXIDX is the maximum number of symbols
2714 permitted. *TABLE_BITS is the maximum number of bits for symbols in the
2715 table: the size of *TABLE is at least 1 << *TABLE_BITS. This updates
2716 *TABLE_BITS to the actual number of bits. Returns 1 on success, 0 on
2720 elf_zstd_read_fse (const unsigned char **ppin
, const unsigned char *pinend
,
2721 uint16_t *zdebug_table
, int maxidx
,
2722 struct elf_zstd_fse_entry
*table
, int *table_bits
)
2724 const unsigned char *pin
;
2738 norm
= (int16_t *) zdebug_table
;
2739 next
= zdebug_table
+ 256;
2741 if (unlikely (pin
+ 3 >= pinend
))
2743 elf_uncompress_failed ();
2747 /* Align PIN to a 32-bit boundary. */
2751 while ((((uintptr_t) pin
) & 3) != 0)
2753 val
|= (uint64_t)*pin
<< bits
;
2758 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2761 accuracy_log
= (val
& 0xf) + 5;
2762 if (accuracy_log
> *table_bits
)
2764 elf_uncompress_failed ();
2767 *table_bits
= accuracy_log
;
2771 /* This code is mostly copied from the reference implementation. */
2773 /* The number of remaining probabilities, plus 1. This sets the number of
2774 bits that need to be read for the next value. */
2775 remaining
= (1 << accuracy_log
) + 1;
2777 /* The current difference between small and large values, which depends on
2778 the number of remaining values. Small values use one less bit. */
2779 threshold
= 1 << accuracy_log
;
2781 /* The number of bits used to compute threshold. */
2782 bits_needed
= accuracy_log
+ 1;
2784 /* The next character value. */
2787 /* Whether the last count was 0. */
2790 while (remaining
> 1 && idx
<= maxidx
)
2795 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2802 /* Previous count was 0, so there is a 2-bit repeat flag. If the
2803 2-bit flag is 0b11, it adds 3 and then there is another repeat
2806 while ((val
& 0xfff) == 0xfff)
2811 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2814 while ((val
& 3) == 3)
2819 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2822 /* We have at least 13 bits here, don't need to fetch. */
2827 if (unlikely (zidx
> maxidx
))
2829 elf_uncompress_failed ();
2833 for (; idx
< zidx
; idx
++)
2840 max
= (2 * threshold
- 1) - remaining
;
2841 if ((val
& (threshold
- 1)) < max
)
2843 /* A small value. */
2844 count
= (int32_t) ((uint32_t) val
& (threshold
- 1));
2845 val
>>= bits_needed
- 1;
2846 bits
-= bits_needed
- 1;
2850 /* A large value. */
2851 count
= (int32_t) ((uint32_t) val
& (2 * threshold
- 1));
2852 if (count
>= (int32_t) threshold
)
2853 count
-= (int32_t) max
;
2854 val
>>= bits_needed
;
2855 bits
-= bits_needed
;
2863 if (unlikely (idx
>= 256))
2865 elf_uncompress_failed ();
2868 norm
[idx
] = (int16_t) count
;
2873 while (remaining
< threshold
)
2880 if (unlikely (remaining
!= 1))
2882 elf_uncompress_failed ();
2886 /* If we've read ahead more than a byte, back up. */
2895 for (; idx
<= maxidx
; idx
++)
2898 return elf_zstd_build_fse (norm
, idx
, next
, *table_bits
, table
);
2901 /* Build the FSE decoding table from a list of probabilities. This reads from
2902 NORM of length IDX, uses NEXT as scratch space, and writes to *TABLE, whose
2903 size is TABLE_BITS. */
2906 elf_zstd_build_fse (const int16_t *norm
, int idx
, uint16_t *next
,
2907 int table_bits
, struct elf_zstd_fse_entry
*table
)
2916 table_size
= 1 << table_bits
;
2917 high_threshold
= table_size
- 1;
2918 for (i
= 0; i
< idx
; i
++)
2924 next
[i
] = (uint16_t) n
;
2927 table
[high_threshold
].symbol
= (unsigned char) i
;
2934 step
= (table_size
>> 1) + (table_size
>> 3) + 3;
2935 mask
= table_size
- 1;
2936 for (i
= 0; i
< idx
; i
++)
2942 for (j
= 0; j
< n
; j
++)
2944 table
[pos
].symbol
= (unsigned char) i
;
2945 pos
= (pos
+ step
) & mask
;
2946 while (unlikely (pos
> high_threshold
))
2947 pos
= (pos
+ step
) & mask
;
2950 if (unlikely (pos
!= 0))
2952 elf_uncompress_failed ();
2956 for (i
= 0; i
< table_size
; i
++)
2959 uint16_t next_state
;
2963 sym
= table
[i
].symbol
;
2964 next_state
= next
[sym
];
2967 if (next_state
== 0)
2969 elf_uncompress_failed ();
2972 high_bit
= 31 - __builtin_clz (next_state
);
2974 bits
= table_bits
- high_bit
;
2975 table
[i
].bits
= (unsigned char) bits
;
2976 table
[i
].base
= (uint16_t) ((next_state
<< bits
) - table_size
);
2982 /* Encode the baseline and bits into a single 32-bit value. */
2984 #define ZSTD_ENCODE_BASELINE_BITS(baseline, basebits) \
2985 ((uint32_t)(baseline) | ((uint32_t)(basebits) << 24))
2987 #define ZSTD_DECODE_BASELINE(baseline_basebits) \
2988 ((uint32_t)(baseline_basebits) & 0xffffff)
2990 #define ZSTD_DECODE_BASEBITS(baseline_basebits) \
2991 ((uint32_t)(baseline_basebits) >> 24)
2993 /* Given a literal length code, we need to read a number of bits and add that
2994 to a baseline. For states 0 to 15 the baseline is the state and the number
2997 #define ZSTD_LITERAL_LENGTH_BASELINE_OFFSET (16)
2999 static const uint32_t elf_zstd_literal_length_base
[] =
3001 ZSTD_ENCODE_BASELINE_BITS(16, 1),
3002 ZSTD_ENCODE_BASELINE_BITS(18, 1),
3003 ZSTD_ENCODE_BASELINE_BITS(20, 1),
3004 ZSTD_ENCODE_BASELINE_BITS(22, 1),
3005 ZSTD_ENCODE_BASELINE_BITS(24, 2),
3006 ZSTD_ENCODE_BASELINE_BITS(28, 2),
3007 ZSTD_ENCODE_BASELINE_BITS(32, 3),
3008 ZSTD_ENCODE_BASELINE_BITS(40, 3),
3009 ZSTD_ENCODE_BASELINE_BITS(48, 4),
3010 ZSTD_ENCODE_BASELINE_BITS(64, 6),
3011 ZSTD_ENCODE_BASELINE_BITS(128, 7),
3012 ZSTD_ENCODE_BASELINE_BITS(256, 8),
3013 ZSTD_ENCODE_BASELINE_BITS(512, 9),
3014 ZSTD_ENCODE_BASELINE_BITS(1024, 10),
3015 ZSTD_ENCODE_BASELINE_BITS(2048, 11),
3016 ZSTD_ENCODE_BASELINE_BITS(4096, 12),
3017 ZSTD_ENCODE_BASELINE_BITS(8192, 13),
3018 ZSTD_ENCODE_BASELINE_BITS(16384, 14),
3019 ZSTD_ENCODE_BASELINE_BITS(32768, 15),
3020 ZSTD_ENCODE_BASELINE_BITS(65536, 16)
3023 /* The same applies to match length codes. For states 0 to 31 the baseline is
3024 the state + 3 and the number of bits is zero. */
3026 #define ZSTD_MATCH_LENGTH_BASELINE_OFFSET (32)
3028 static const uint32_t elf_zstd_match_length_base
[] =
3030 ZSTD_ENCODE_BASELINE_BITS(35, 1),
3031 ZSTD_ENCODE_BASELINE_BITS(37, 1),
3032 ZSTD_ENCODE_BASELINE_BITS(39, 1),
3033 ZSTD_ENCODE_BASELINE_BITS(41, 1),
3034 ZSTD_ENCODE_BASELINE_BITS(43, 2),
3035 ZSTD_ENCODE_BASELINE_BITS(47, 2),
3036 ZSTD_ENCODE_BASELINE_BITS(51, 3),
3037 ZSTD_ENCODE_BASELINE_BITS(59, 3),
3038 ZSTD_ENCODE_BASELINE_BITS(67, 4),
3039 ZSTD_ENCODE_BASELINE_BITS(83, 4),
3040 ZSTD_ENCODE_BASELINE_BITS(99, 5),
3041 ZSTD_ENCODE_BASELINE_BITS(131, 7),
3042 ZSTD_ENCODE_BASELINE_BITS(259, 8),
3043 ZSTD_ENCODE_BASELINE_BITS(515, 9),
3044 ZSTD_ENCODE_BASELINE_BITS(1027, 10),
3045 ZSTD_ENCODE_BASELINE_BITS(2051, 11),
3046 ZSTD_ENCODE_BASELINE_BITS(4099, 12),
3047 ZSTD_ENCODE_BASELINE_BITS(8195, 13),
3048 ZSTD_ENCODE_BASELINE_BITS(16387, 14),
3049 ZSTD_ENCODE_BASELINE_BITS(32771, 15),
3050 ZSTD_ENCODE_BASELINE_BITS(65539, 16)
3053 /* An entry in an FSE table used for literal/match/length values. For these we
3054 have to map the symbol to a baseline value, and we have to read zero or more
3055 bits and add that value to the baseline value. Rather than look the values
3056 up in a separate table, we grow the FSE table so that we get better memory
3059 struct elf_zstd_fse_baseline_entry
3061 /* The baseline for the value that this FSE entry represents.. */
3063 /* The number of bits to read to add to the baseline. */
3064 unsigned char basebits
;
3065 /* The number of bits to read to determine the next state. */
3067 /* Add the bits to this base to get the next state. */
3071 /* Convert the literal length FSE table FSE_TABLE to an FSE baseline table at
3072 BASELINE_TABLE. Note that FSE_TABLE and BASELINE_TABLE will overlap. */
3075 elf_zstd_make_literal_baseline_fse (
3076 const struct elf_zstd_fse_entry
*fse_table
,
3078 struct elf_zstd_fse_baseline_entry
*baseline_table
)
3081 const struct elf_zstd_fse_entry
*pfse
;
3082 struct elf_zstd_fse_baseline_entry
*pbaseline
;
3084 /* Convert backward to avoid overlap. */
3086 count
= 1U << table_bits
;
3087 pfse
= fse_table
+ count
;
3088 pbaseline
= baseline_table
+ count
;
3089 while (pfse
> fse_table
)
3091 unsigned char symbol
;
3097 symbol
= pfse
->symbol
;
3100 if (symbol
< ZSTD_LITERAL_LENGTH_BASELINE_OFFSET
)
3102 pbaseline
->baseline
= (uint32_t)symbol
;
3103 pbaseline
->basebits
= 0;
3110 if (unlikely (symbol
> 35))
3112 elf_uncompress_failed ();
3115 idx
= symbol
- ZSTD_LITERAL_LENGTH_BASELINE_OFFSET
;
3116 basebits
= elf_zstd_literal_length_base
[idx
];
3117 pbaseline
->baseline
= ZSTD_DECODE_BASELINE(basebits
);
3118 pbaseline
->basebits
= ZSTD_DECODE_BASEBITS(basebits
);
3120 pbaseline
->bits
= bits
;
3121 pbaseline
->base
= base
;
3127 /* Convert the offset length FSE table FSE_TABLE to an FSE baseline table at
3128 BASELINE_TABLE. Note that FSE_TABLE and BASELINE_TABLE will overlap. */
3131 elf_zstd_make_offset_baseline_fse (
3132 const struct elf_zstd_fse_entry
*fse_table
,
3134 struct elf_zstd_fse_baseline_entry
*baseline_table
)
3137 const struct elf_zstd_fse_entry
*pfse
;
3138 struct elf_zstd_fse_baseline_entry
*pbaseline
;
3140 /* Convert backward to avoid overlap. */
3142 count
= 1U << table_bits
;
3143 pfse
= fse_table
+ count
;
3144 pbaseline
= baseline_table
+ count
;
3145 while (pfse
> fse_table
)
3147 unsigned char symbol
;
3153 symbol
= pfse
->symbol
;
3156 if (unlikely (symbol
> 31))
3158 elf_uncompress_failed ();
3162 /* The simple way to write this is
3164 pbaseline->baseline = (uint32_t)1 << symbol;
3165 pbaseline->basebits = symbol;
3167 That will give us an offset value that corresponds to the one
3168 described in the RFC. However, for offset values > 3, we have to
3169 subtract 3. And for offset values 1, 2, 3 we use a repeated offset.
3170 The baseline is always a power of 2, and is never 0, so for these low
3171 values we will see one entry that is baseline 1, basebits 0, and one
3172 entry that is baseline 2, basebits 1. All other entries will have
3173 baseline >= 4 and basebits >= 2.
3175 So we can check for RFC offset <= 3 by checking for basebits <= 1.
3176 And that means that we can subtract 3 here and not worry about doing
3177 it in the hot loop. */
3179 pbaseline
->baseline
= (uint32_t)1 << symbol
;
3181 pbaseline
->baseline
-= 3;
3182 pbaseline
->basebits
= symbol
;
3183 pbaseline
->bits
= bits
;
3184 pbaseline
->base
= base
;
3190 /* Convert the match length FSE table FSE_TABLE to an FSE baseline table at
3191 BASELINE_TABLE. Note that FSE_TABLE and BASELINE_TABLE will overlap. */
3194 elf_zstd_make_match_baseline_fse (
3195 const struct elf_zstd_fse_entry
*fse_table
,
3197 struct elf_zstd_fse_baseline_entry
*baseline_table
)
3200 const struct elf_zstd_fse_entry
*pfse
;
3201 struct elf_zstd_fse_baseline_entry
*pbaseline
;
3203 /* Convert backward to avoid overlap. */
3205 count
= 1U << table_bits
;
3206 pfse
= fse_table
+ count
;
3207 pbaseline
= baseline_table
+ count
;
3208 while (pfse
> fse_table
)
3210 unsigned char symbol
;
3216 symbol
= pfse
->symbol
;
3219 if (symbol
< ZSTD_MATCH_LENGTH_BASELINE_OFFSET
)
3221 pbaseline
->baseline
= (uint32_t)symbol
+ 3;
3222 pbaseline
->basebits
= 0;
3229 if (unlikely (symbol
> 52))
3231 elf_uncompress_failed ();
3234 idx
= symbol
- ZSTD_MATCH_LENGTH_BASELINE_OFFSET
;
3235 basebits
= elf_zstd_match_length_base
[idx
];
3236 pbaseline
->baseline
= ZSTD_DECODE_BASELINE(basebits
);
3237 pbaseline
->basebits
= ZSTD_DECODE_BASEBITS(basebits
);
3239 pbaseline
->bits
= bits
;
3240 pbaseline
->base
= base
;
3246 #ifdef BACKTRACE_GENERATE_ZSTD_FSE_TABLES
3248 /* Used to generate the predefined FSE decoding tables for zstd. */
3252 /* These values are straight from RFC 8878. */
3254 static int16_t lit
[36] =
3256 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
3257 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
3261 static int16_t match
[53] =
3263 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
3264 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
3265 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,
3269 static int16_t offset
[29] =
3271 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
3272 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1
3275 static uint16_t next
[256];
3278 print_table (const struct elf_zstd_fse_baseline_entry
*table
, size_t size
)
3283 for (i
= 0; i
< size
; i
+= 3)
3288 for (j
= 0; j
< 3 && i
+ j
< size
; ++j
)
3289 printf (" { %u, %d, %d, %d },", table
[i
+ j
].baseline
,
3290 table
[i
+ j
].basebits
, table
[i
+ j
].bits
,
3300 struct elf_zstd_fse_entry lit_table
[64];
3301 struct elf_zstd_fse_baseline_entry lit_baseline
[64];
3302 struct elf_zstd_fse_entry match_table
[64];
3303 struct elf_zstd_fse_baseline_entry match_baseline
[64];
3304 struct elf_zstd_fse_entry offset_table
[32];
3305 struct elf_zstd_fse_baseline_entry offset_baseline
[32];
3307 if (!elf_zstd_build_fse (lit
, sizeof lit
/ sizeof lit
[0], next
,
3310 fprintf (stderr
, "elf_zstd_build_fse failed\n");
3311 exit (EXIT_FAILURE
);
3314 if (!elf_zstd_make_literal_baseline_fse (lit_table
, 6, lit_baseline
))
3316 fprintf (stderr
, "elf_zstd_make_literal_baseline_fse failed\n");
3317 exit (EXIT_FAILURE
);
3320 printf ("static const struct elf_zstd_fse_baseline_entry "
3321 "elf_zstd_lit_table[64] =\n");
3322 print_table (lit_baseline
,
3323 sizeof lit_baseline
/ sizeof lit_baseline
[0]);
3326 if (!elf_zstd_build_fse (match
, sizeof match
/ sizeof match
[0], next
,
3329 fprintf (stderr
, "elf_zstd_build_fse failed\n");
3330 exit (EXIT_FAILURE
);
3333 if (!elf_zstd_make_match_baseline_fse (match_table
, 6, match_baseline
))
3335 fprintf (stderr
, "elf_zstd_make_match_baseline_fse failed\n");
3336 exit (EXIT_FAILURE
);
3339 printf ("static const struct elf_zstd_fse_baseline_entry "
3340 "elf_zstd_match_table[64] =\n");
3341 print_table (match_baseline
,
3342 sizeof match_baseline
/ sizeof match_baseline
[0]);
3345 if (!elf_zstd_build_fse (offset
, sizeof offset
/ sizeof offset
[0], next
,
3348 fprintf (stderr
, "elf_zstd_build_fse failed\n");
3349 exit (EXIT_FAILURE
);
3352 if (!elf_zstd_make_offset_baseline_fse (offset_table
, 5, offset_baseline
))
3354 fprintf (stderr
, "elf_zstd_make_offset_baseline_fse failed\n");
3355 exit (EXIT_FAILURE
);
3358 printf ("static const struct elf_zstd_fse_baseline_entry "
3359 "elf_zstd_offset_table[32] =\n");
3360 print_table (offset_baseline
,
3361 sizeof offset_baseline
/ sizeof offset_baseline
[0]);
3369 /* The fixed tables generated by the #ifdef'ed out main function
3372 static const struct elf_zstd_fse_baseline_entry elf_zstd_lit_table
[64] =
3374 { 0, 0, 4, 0 }, { 0, 0, 4, 16 }, { 1, 0, 5, 32 },
3375 { 3, 0, 5, 0 }, { 4, 0, 5, 0 }, { 6, 0, 5, 0 },
3376 { 7, 0, 5, 0 }, { 9, 0, 5, 0 }, { 10, 0, 5, 0 },
3377 { 12, 0, 5, 0 }, { 14, 0, 6, 0 }, { 16, 1, 5, 0 },
3378 { 20, 1, 5, 0 }, { 22, 1, 5, 0 }, { 28, 2, 5, 0 },
3379 { 32, 3, 5, 0 }, { 48, 4, 5, 0 }, { 64, 6, 5, 32 },
3380 { 128, 7, 5, 0 }, { 256, 8, 6, 0 }, { 1024, 10, 6, 0 },
3381 { 4096, 12, 6, 0 }, { 0, 0, 4, 32 }, { 1, 0, 4, 0 },
3382 { 2, 0, 5, 0 }, { 4, 0, 5, 32 }, { 5, 0, 5, 0 },
3383 { 7, 0, 5, 32 }, { 8, 0, 5, 0 }, { 10, 0, 5, 32 },
3384 { 11, 0, 5, 0 }, { 13, 0, 6, 0 }, { 16, 1, 5, 32 },
3385 { 18, 1, 5, 0 }, { 22, 1, 5, 32 }, { 24, 2, 5, 0 },
3386 { 32, 3, 5, 32 }, { 40, 3, 5, 0 }, { 64, 6, 4, 0 },
3387 { 64, 6, 4, 16 }, { 128, 7, 5, 32 }, { 512, 9, 6, 0 },
3388 { 2048, 11, 6, 0 }, { 0, 0, 4, 48 }, { 1, 0, 4, 16 },
3389 { 2, 0, 5, 32 }, { 3, 0, 5, 32 }, { 5, 0, 5, 32 },
3390 { 6, 0, 5, 32 }, { 8, 0, 5, 32 }, { 9, 0, 5, 32 },
3391 { 11, 0, 5, 32 }, { 12, 0, 5, 32 }, { 15, 0, 6, 0 },
3392 { 18, 1, 5, 32 }, { 20, 1, 5, 32 }, { 24, 2, 5, 32 },
3393 { 28, 2, 5, 32 }, { 40, 3, 5, 32 }, { 48, 4, 5, 32 },
3394 { 65536, 16, 6, 0 }, { 32768, 15, 6, 0 }, { 16384, 14, 6, 0 },
3398 static const struct elf_zstd_fse_baseline_entry elf_zstd_match_table
[64] =
3400 { 3, 0, 6, 0 }, { 4, 0, 4, 0 }, { 5, 0, 5, 32 },
3401 { 6, 0, 5, 0 }, { 8, 0, 5, 0 }, { 9, 0, 5, 0 },
3402 { 11, 0, 5, 0 }, { 13, 0, 6, 0 }, { 16, 0, 6, 0 },
3403 { 19, 0, 6, 0 }, { 22, 0, 6, 0 }, { 25, 0, 6, 0 },
3404 { 28, 0, 6, 0 }, { 31, 0, 6, 0 }, { 34, 0, 6, 0 },
3405 { 37, 1, 6, 0 }, { 41, 1, 6, 0 }, { 47, 2, 6, 0 },
3406 { 59, 3, 6, 0 }, { 83, 4, 6, 0 }, { 131, 7, 6, 0 },
3407 { 515, 9, 6, 0 }, { 4, 0, 4, 16 }, { 5, 0, 4, 0 },
3408 { 6, 0, 5, 32 }, { 7, 0, 5, 0 }, { 9, 0, 5, 32 },
3409 { 10, 0, 5, 0 }, { 12, 0, 6, 0 }, { 15, 0, 6, 0 },
3410 { 18, 0, 6, 0 }, { 21, 0, 6, 0 }, { 24, 0, 6, 0 },
3411 { 27, 0, 6, 0 }, { 30, 0, 6, 0 }, { 33, 0, 6, 0 },
3412 { 35, 1, 6, 0 }, { 39, 1, 6, 0 }, { 43, 2, 6, 0 },
3413 { 51, 3, 6, 0 }, { 67, 4, 6, 0 }, { 99, 5, 6, 0 },
3414 { 259, 8, 6, 0 }, { 4, 0, 4, 32 }, { 4, 0, 4, 48 },
3415 { 5, 0, 4, 16 }, { 7, 0, 5, 32 }, { 8, 0, 5, 32 },
3416 { 10, 0, 5, 32 }, { 11, 0, 5, 32 }, { 14, 0, 6, 0 },
3417 { 17, 0, 6, 0 }, { 20, 0, 6, 0 }, { 23, 0, 6, 0 },
3418 { 26, 0, 6, 0 }, { 29, 0, 6, 0 }, { 32, 0, 6, 0 },
3419 { 65539, 16, 6, 0 }, { 32771, 15, 6, 0 }, { 16387, 14, 6, 0 },
3420 { 8195, 13, 6, 0 }, { 4099, 12, 6, 0 }, { 2051, 11, 6, 0 },
3424 static const struct elf_zstd_fse_baseline_entry elf_zstd_offset_table
[32] =
3426 { 1, 0, 5, 0 }, { 61, 6, 4, 0 }, { 509, 9, 5, 0 },
3427 { 32765, 15, 5, 0 }, { 2097149, 21, 5, 0 }, { 5, 3, 5, 0 },
3428 { 125, 7, 4, 0 }, { 4093, 12, 5, 0 }, { 262141, 18, 5, 0 },
3429 { 8388605, 23, 5, 0 }, { 29, 5, 5, 0 }, { 253, 8, 4, 0 },
3430 { 16381, 14, 5, 0 }, { 1048573, 20, 5, 0 }, { 1, 2, 5, 0 },
3431 { 125, 7, 4, 16 }, { 2045, 11, 5, 0 }, { 131069, 17, 5, 0 },
3432 { 4194301, 22, 5, 0 }, { 13, 4, 5, 0 }, { 253, 8, 4, 16 },
3433 { 8189, 13, 5, 0 }, { 524285, 19, 5, 0 }, { 2, 1, 5, 0 },
3434 { 61, 6, 4, 16 }, { 1021, 10, 5, 0 }, { 65533, 16, 5, 0 },
3435 { 268435453, 28, 5, 0 }, { 134217725, 27, 5, 0 }, { 67108861, 26, 5, 0 },
3436 { 33554429, 25, 5, 0 }, { 16777213, 24, 5, 0 },
3439 /* Read a zstd Huffman table and build the decoding table in *TABLE, reading
3440 and updating *PPIN. This sets *PTABLE_BITS to the number of bits of the
3441 table, such that the table length is 1 << *TABLE_BITS. ZDEBUG_TABLE is
3442 scratch space; it must be enough for 512 uint16_t values + 256 32-bit values
3443 (2048 bytes). Returns 1 on success, 0 on error. */
3446 elf_zstd_read_huff (const unsigned char **ppin
, const unsigned char *pinend
,
3447 uint16_t *zdebug_table
, uint16_t *table
, int *ptable_bits
)
3449 const unsigned char *pin
;
3451 unsigned char *weights
;
3453 uint32_t *weight_mark
;
3455 uint32_t weight_mask
;
3459 if (unlikely (pin
>= pinend
))
3461 elf_uncompress_failed ();
3467 weights
= (unsigned char *) zdebug_table
;
3471 /* Table is compressed using FSE. */
3473 struct elf_zstd_fse_entry
*fse_table
;
3476 const unsigned char *pfse
;
3477 const unsigned char *pback
;
3480 unsigned int state1
, state2
;
3482 /* SCRATCH is used temporarily by elf_zstd_read_fse. It overlaps
3484 scratch
= zdebug_table
;
3485 fse_table
= (struct elf_zstd_fse_entry
*) (scratch
+ 512);
3489 if (!elf_zstd_read_fse (&pfse
, pinend
, scratch
, 255, fse_table
,
3493 if (unlikely (pin
+ hdr
> pinend
))
3495 elf_uncompress_failed ();
3499 /* We no longer need SCRATCH. Start recording weights. We need up to
3500 256 bytes of weights and 64 bytes of rank counts, so it won't overlap
3503 pback
= pin
+ hdr
- 1;
3505 if (!elf_fetch_backward_init (&pback
, pfse
, &val
, &bits
))
3508 bits
-= fse_table_bits
;
3509 state1
= (val
>> bits
) & ((1U << fse_table_bits
) - 1);
3510 bits
-= fse_table_bits
;
3511 state2
= (val
>> bits
) & ((1U << fse_table_bits
) - 1);
3513 /* There are two independent FSE streams, tracked by STATE1 and STATE2.
3514 We decode them alternately. */
3519 struct elf_zstd_fse_entry
*pt
;
3522 pt
= &fse_table
[state1
];
3524 if (unlikely (pin
< pinend
) && bits
< pt
->bits
)
3526 if (unlikely (count
>= 254))
3528 elf_uncompress_failed ();
3531 weights
[count
] = (unsigned char) pt
->symbol
;
3532 weights
[count
+ 1] = (unsigned char) fse_table
[state2
].symbol
;
3537 if (unlikely (pt
->bits
== 0))
3541 if (!elf_fetch_bits_backward (&pback
, pfse
, &val
, &bits
))
3545 v
= (val
>> bits
) & (((uint64_t)1 << pt
->bits
) - 1);
3548 state1
= pt
->base
+ v
;
3550 if (unlikely (count
>= 255))
3552 elf_uncompress_failed ();
3556 weights
[count
] = pt
->symbol
;
3559 pt
= &fse_table
[state2
];
3561 if (unlikely (pin
< pinend
&& bits
< pt
->bits
))
3563 if (unlikely (count
>= 254))
3565 elf_uncompress_failed ();
3568 weights
[count
] = (unsigned char) pt
->symbol
;
3569 weights
[count
+ 1] = (unsigned char) fse_table
[state1
].symbol
;
3574 if (unlikely (pt
->bits
== 0))
3578 if (!elf_fetch_bits_backward (&pback
, pfse
, &val
, &bits
))
3582 v
= (val
>> bits
) & (((uint64_t)1 << pt
->bits
) - 1);
3585 state2
= pt
->base
+ v
;
3587 if (unlikely (count
>= 255))
3589 elf_uncompress_failed ();
3593 weights
[count
] = pt
->symbol
;
3601 /* Table is not compressed. Each weight is 4 bits. */
3604 if (unlikely (pin
+ ((count
+ 1) / 2) >= pinend
))
3606 elf_uncompress_failed ();
3609 for (i
= 0; i
< count
; i
+= 2)
3615 weights
[i
] = b
>> 4;
3616 weights
[i
+ 1] = b
& 0xf;
3620 weight_mark
= (uint32_t *) (weights
+ 256);
3621 memset (weight_mark
, 0, 13 * sizeof (uint32_t));
3623 for (i
= 0; i
< count
; ++i
)
3628 if (unlikely (w
> 12))
3630 elf_uncompress_failed ();
3635 weight_mask
+= 1U << (w
- 1);
3637 if (unlikely (weight_mask
== 0))
3639 elf_uncompress_failed ();
3643 table_bits
= 32 - __builtin_clz (weight_mask
);
3644 if (unlikely (table_bits
> 11))
3646 elf_uncompress_failed ();
3650 /* Work out the last weight value, which is omitted because the weights must
3651 sum to a power of two. */
3656 left
= ((uint32_t)1 << table_bits
) - weight_mask
;
3659 elf_uncompress_failed ();
3662 high_bit
= 31 - __builtin_clz (left
);
3663 if (((uint32_t)1 << high_bit
) != left
)
3665 elf_uncompress_failed ();
3669 if (unlikely (count
>= 256))
3671 elf_uncompress_failed ();
3675 weights
[count
] = high_bit
+ 1;
3677 ++weight_mark
[high_bit
+ 1];
3680 if (weight_mark
[1] < 2 || (weight_mark
[1] & 1) != 0)
3682 elf_uncompress_failed ();
3686 /* Change WEIGHT_MARK from a count of weights to the index of the first
3687 symbol for that weight. We shift the indexes to also store how many we
3688 have seen so far, below. */
3693 for (i
= 0; i
< table_bits
; ++i
)
3698 next
+= weight_mark
[i
+ 1] << i
;
3699 weight_mark
[i
+ 1] = cur
;
3703 for (i
= 0; i
< count
; ++i
)
3705 unsigned char weight
;
3711 weight
= weights
[i
];
3715 length
= 1U << (weight
- 1);
3716 tval
= (i
<< 8) | (table_bits
+ 1 - weight
);
3717 start
= weight_mark
[weight
];
3718 for (j
= 0; j
< length
; ++j
)
3719 table
[start
+ j
] = tval
;
3720 weight_mark
[weight
] += length
;
3724 *ptable_bits
= (int)table_bits
;
3729 /* Read and decompress the literals and store them ending at POUTEND. This
3730 works because we are going to use all the literals in the output, so they
3731 must fit into the output buffer. HUFFMAN_TABLE, and PHUFFMAN_TABLE_BITS
3732 store the Huffman table across calls. SCRATCH is used to read a Huffman
3733 table. Store the start of the decompressed literals in *PPLIT. Update
3734 *PPIN. Return 1 on success, 0 on error. */
3737 elf_zstd_read_literals (const unsigned char **ppin
,
3738 const unsigned char *pinend
,
3739 unsigned char *pout
,
3740 unsigned char *poutend
,
3742 uint16_t *huffman_table
,
3743 int *phuffman_table_bits
,
3744 unsigned char **pplit
)
3746 const unsigned char *pin
;
3747 unsigned char *plit
;
3749 uint32_t regenerated_size
;
3750 uint32_t compressed_size
;
3752 uint32_t total_streams_size
;
3753 unsigned int huffman_table_bits
;
3754 uint64_t huffman_mask
;
3757 if (unlikely (pin
>= pinend
))
3759 elf_uncompress_failed ();
3765 if ((hdr
& 3) == 0 || (hdr
& 3) == 1)
3769 /* Raw_Literals_Block or RLE_Literals_Block */
3771 raw
= (hdr
& 3) == 0;
3773 switch ((hdr
>> 2) & 3)
3776 regenerated_size
= hdr
>> 3;
3779 if (unlikely (pin
>= pinend
))
3781 elf_uncompress_failed ();
3784 regenerated_size
= (hdr
>> 4) + ((uint32_t)(*pin
) << 4);
3788 if (unlikely (pin
+ 1 >= pinend
))
3790 elf_uncompress_failed ();
3793 regenerated_size
= ((hdr
>> 4)
3794 + ((uint32_t)*pin
<< 4)
3795 + ((uint32_t)pin
[1] << 12));
3799 elf_uncompress_failed ();
3803 if (unlikely ((size_t)(poutend
- pout
) < regenerated_size
))
3805 elf_uncompress_failed ();
3809 plit
= poutend
- regenerated_size
;
3813 if (unlikely (pin
+ regenerated_size
>= pinend
))
3815 elf_uncompress_failed ();
3818 memcpy (plit
, pin
, regenerated_size
);
3819 pin
+= regenerated_size
;
3825 elf_uncompress_failed ();
3828 memset (plit
, *pin
, regenerated_size
);
3838 /* Compressed_Literals_Block or Treeless_Literals_Block */
3840 switch ((hdr
>> 2) & 3)
3843 if (unlikely (pin
+ 1 >= pinend
))
3845 elf_uncompress_failed ();
3848 regenerated_size
= (hdr
>> 4) | ((uint32_t)(*pin
& 0x3f) << 4);
3849 compressed_size
= (uint32_t)*pin
>> 6 | ((uint32_t)pin
[1] << 2);
3851 streams
= ((hdr
>> 2) & 3) == 0 ? 1 : 4;
3854 if (unlikely (pin
+ 2 >= pinend
))
3856 elf_uncompress_failed ();
3859 regenerated_size
= (((uint32_t)hdr
>> 4)
3860 | ((uint32_t)*pin
<< 4)
3861 | (((uint32_t)pin
[1] & 3) << 12));
3862 compressed_size
= (((uint32_t)pin
[1] >> 2)
3863 | ((uint32_t)pin
[2] << 6));
3868 if (unlikely (pin
+ 3 >= pinend
))
3870 elf_uncompress_failed ();
3873 regenerated_size
= (((uint32_t)hdr
>> 4)
3874 | ((uint32_t)*pin
<< 4)
3875 | (((uint32_t)pin
[1] & 0x3f) << 12));
3876 compressed_size
= (((uint32_t)pin
[1] >> 6)
3877 | ((uint32_t)pin
[2] << 2)
3878 | ((uint32_t)pin
[3] << 10));
3883 elf_uncompress_failed ();
3887 if (unlikely (pin
+ compressed_size
> pinend
))
3889 elf_uncompress_failed ();
3893 pinend
= pin
+ compressed_size
;
3896 if (unlikely ((size_t)(poutend
- pout
) < regenerated_size
))
3898 elf_uncompress_failed ();
3902 plit
= poutend
- regenerated_size
;
3906 total_streams_size
= compressed_size
;
3909 const unsigned char *ptable
;
3911 /* Compressed_Literals_Block. Read Huffman tree. */
3914 if (!elf_zstd_read_huff (&ptable
, pinend
, scratch
, huffman_table
,
3915 phuffman_table_bits
))
3918 if (unlikely (total_streams_size
< (size_t)(ptable
- pin
)))
3920 elf_uncompress_failed ();
3924 total_streams_size
-= ptable
- pin
;
3929 /* Treeless_Literals_Block. Reuse previous Huffman tree. */
3930 if (unlikely (*phuffman_table_bits
== 0))
3932 elf_uncompress_failed ();
3937 /* Decompress COMPRESSED_SIZE bytes of data at PIN using the huffman table,
3938 storing REGENERATED_SIZE bytes of decompressed data at PLIT. */
3940 huffman_table_bits
= (unsigned int)*phuffman_table_bits
;
3941 huffman_mask
= ((uint64_t)1 << huffman_table_bits
) - 1;
3945 const unsigned char *pback
;
3946 const unsigned char *pbackend
;
3951 pback
= pin
+ total_streams_size
- 1;
3953 if (!elf_fetch_backward_init (&pback
, pbackend
, &val
, &bits
))
3956 /* This is one of the inner loops of the decompression algorithm, so we
3957 put some effort into optimization. We can't get more than 64 bytes
3958 from a single call to elf_fetch_bits_backward, and we can't subtract
3959 more than 11 bits at a time. */
3961 if (regenerated_size
>= 64)
3963 unsigned char *plitstart
;
3964 unsigned char *plitstop
;
3967 plitstop
= plit
+ regenerated_size
- 64;
3968 while (plit
< plitstop
)
3972 if (!elf_fetch_bits_backward (&pback
, pbackend
, &val
, &bits
))
3980 t
= huffman_table
[(val
>> (bits
- huffman_table_bits
))
3986 t
= huffman_table
[(val
>> (bits
- huffman_table_bits
))
3992 t
= huffman_table
[(val
>> (bits
- huffman_table_bits
))
4001 t
= huffman_table
[(val
>> (bits
- huffman_table_bits
))
4009 regenerated_size
-= plit
- plitstart
;
4012 for (i
= 0; i
< regenerated_size
; ++i
)
4016 if (!elf_fetch_bits_backward (&pback
, pbackend
, &val
, &bits
))
4019 if (unlikely (bits
< huffman_table_bits
))
4021 t
= huffman_table
[(val
<< (huffman_table_bits
- bits
))
4023 if (unlikely (bits
< (t
& 0xff)))
4025 elf_uncompress_failed ();
4030 t
= huffman_table
[(val
>> (bits
- huffman_table_bits
))
4042 uint32_t stream_size1
, stream_size2
, stream_size3
, stream_size4
;
4044 const unsigned char *pback1
, *pback2
, *pback3
, *pback4
;
4045 const unsigned char *pbackend1
, *pbackend2
, *pbackend3
, *pbackend4
;
4046 uint64_t val1
, val2
, val3
, val4
;
4047 unsigned int bits1
, bits2
, bits3
, bits4
;
4048 unsigned char *plit1
, *plit2
, *plit3
, *plit4
;
4049 uint32_t regenerated_stream_size
;
4050 uint32_t regenerated_stream_size4
;
4051 uint16_t t1
, t2
, t3
, t4
;
4055 /* Read jump table. */
4056 if (unlikely (pin
+ 5 >= pinend
))
4058 elf_uncompress_failed ();
4061 stream_size1
= (uint32_t)*pin
| ((uint32_t)pin
[1] << 8);
4063 stream_size2
= (uint32_t)*pin
| ((uint32_t)pin
[1] << 8);
4065 stream_size3
= (uint32_t)*pin
| ((uint32_t)pin
[1] << 8);
4067 tot
= stream_size1
+ stream_size2
+ stream_size3
;
4068 if (unlikely (tot
> total_streams_size
- 6))
4070 elf_uncompress_failed ();
4073 stream_size4
= total_streams_size
- 6 - tot
;
4075 pback1
= pin
+ stream_size1
- 1;
4078 pback2
= pback1
+ stream_size2
;
4079 pbackend2
= pback1
+ 1;
4081 pback3
= pback2
+ stream_size3
;
4082 pbackend3
= pback2
+ 1;
4084 pback4
= pback3
+ stream_size4
;
4085 pbackend4
= pback3
+ 1;
4087 if (!elf_fetch_backward_init (&pback1
, pbackend1
, &val1
, &bits1
))
4089 if (!elf_fetch_backward_init (&pback2
, pbackend2
, &val2
, &bits2
))
4091 if (!elf_fetch_backward_init (&pback3
, pbackend3
, &val3
, &bits3
))
4093 if (!elf_fetch_backward_init (&pback4
, pbackend4
, &val4
, &bits4
))
4096 regenerated_stream_size
= (regenerated_size
+ 3) / 4;
4099 plit2
= plit1
+ regenerated_stream_size
;
4100 plit3
= plit2
+ regenerated_stream_size
;
4101 plit4
= plit3
+ regenerated_stream_size
;
4103 regenerated_stream_size4
= regenerated_size
- regenerated_stream_size
* 3;
4105 /* We can't get more than 64 literal bytes from a single call to
4106 elf_fetch_bits_backward. The fourth stream can be up to 3 bytes less,
4107 so use as the limit. */
4109 limit
= regenerated_stream_size4
<= 64 ? 0 : regenerated_stream_size4
- 64;
4113 if (!elf_fetch_bits_backward (&pback1
, pbackend1
, &val1
, &bits1
))
4115 if (!elf_fetch_bits_backward (&pback2
, pbackend2
, &val2
, &bits2
))
4117 if (!elf_fetch_bits_backward (&pback3
, pbackend3
, &val3
, &bits3
))
4119 if (!elf_fetch_bits_backward (&pback4
, pbackend4
, &val4
, &bits4
))
4122 /* We can't subtract more than 11 bits at a time. */
4126 t1
= huffman_table
[(val1
>> (bits1
- huffman_table_bits
))
4128 t2
= huffman_table
[(val2
>> (bits2
- huffman_table_bits
))
4130 t3
= huffman_table
[(val3
>> (bits3
- huffman_table_bits
))
4132 t4
= huffman_table
[(val4
>> (bits4
- huffman_table_bits
))
4153 while (bits1
> 11 && bits2
> 11 && bits3
> 11 && bits4
> 11);
4156 while (i
< regenerated_stream_size
)
4160 use4
= i
< regenerated_stream_size4
;
4162 if (!elf_fetch_bits_backward (&pback1
, pbackend1
, &val1
, &bits1
))
4164 if (!elf_fetch_bits_backward (&pback2
, pbackend2
, &val2
, &bits2
))
4166 if (!elf_fetch_bits_backward (&pback3
, pbackend3
, &val3
, &bits3
))
4170 if (!elf_fetch_bits_backward (&pback4
, pbackend4
, &val4
, &bits4
))
4174 if (unlikely (bits1
< huffman_table_bits
))
4176 t1
= huffman_table
[(val1
<< (huffman_table_bits
- bits1
))
4178 if (unlikely (bits1
< (t1
& 0xff)))
4180 elf_uncompress_failed ();
4185 t1
= huffman_table
[(val1
>> (bits1
- huffman_table_bits
))
4188 if (unlikely (bits2
< huffman_table_bits
))
4190 t2
= huffman_table
[(val2
<< (huffman_table_bits
- bits2
))
4192 if (unlikely (bits2
< (t2
& 0xff)))
4194 elf_uncompress_failed ();
4199 t2
= huffman_table
[(val2
>> (bits2
- huffman_table_bits
))
4202 if (unlikely (bits3
< huffman_table_bits
))
4204 t3
= huffman_table
[(val3
<< (huffman_table_bits
- bits3
))
4206 if (unlikely (bits3
< (t3
& 0xff)))
4208 elf_uncompress_failed ();
4213 t3
= huffman_table
[(val3
>> (bits3
- huffman_table_bits
))
4218 if (unlikely (bits4
< huffman_table_bits
))
4220 t4
= huffman_table
[(val4
<< (huffman_table_bits
- bits4
))
4222 if (unlikely (bits4
< (t4
& 0xff)))
4224 elf_uncompress_failed ();
4229 t4
= huffman_table
[(val4
>> (bits4
- huffman_table_bits
))
4256 /* The information used to decompress a sequence code, which can be a literal
4257 length, an offset, or a match length. */
4259 struct elf_zstd_seq_decode
4261 const struct elf_zstd_fse_baseline_entry
*table
;
4265 /* Unpack a sequence code compression mode. */
4268 elf_zstd_unpack_seq_decode (int mode
,
4269 const unsigned char **ppin
,
4270 const unsigned char *pinend
,
4271 const struct elf_zstd_fse_baseline_entry
*predef
,
4275 struct elf_zstd_fse_baseline_entry
*table
,
4277 int (*conv
)(const struct elf_zstd_fse_entry
*,
4279 struct elf_zstd_fse_baseline_entry
*),
4280 struct elf_zstd_seq_decode
*decode
)
4285 decode
->table
= predef
;
4286 decode
->table_bits
= predef_bits
;
4291 struct elf_zstd_fse_entry entry
;
4293 if (unlikely (*ppin
>= pinend
))
4295 elf_uncompress_failed ();
4298 entry
.symbol
= **ppin
;
4302 decode
->table_bits
= 0;
4303 if (!conv (&entry
, 0, table
))
4310 struct elf_zstd_fse_entry
*fse_table
;
4312 /* We use the same space for the simple FSE table and the baseline
4314 fse_table
= (struct elf_zstd_fse_entry
*)table
;
4315 decode
->table_bits
= table_bits
;
4316 if (!elf_zstd_read_fse (ppin
, pinend
, scratch
, maxidx
, fse_table
,
4317 &decode
->table_bits
))
4319 if (!conv (fse_table
, decode
->table_bits
, table
))
4321 decode
->table
= table
;
4326 if (unlikely (decode
->table_bits
== -1))
4328 elf_uncompress_failed ();
4334 elf_uncompress_failed ();
4341 /* Decompress a zstd stream from PIN/SIN to POUT/SOUT. Code based on RFC 8878.
4342 Return 1 on success, 0 on error. */
4345 elf_zstd_decompress (const unsigned char *pin
, size_t sin
,
4346 unsigned char *zdebug_table
, unsigned char *pout
,
4349 const unsigned char *pinend
;
4350 unsigned char *poutstart
;
4351 unsigned char *poutend
;
4352 struct elf_zstd_seq_decode literal_decode
;
4353 struct elf_zstd_fse_baseline_entry
*literal_fse_table
;
4354 struct elf_zstd_seq_decode match_decode
;
4355 struct elf_zstd_fse_baseline_entry
*match_fse_table
;
4356 struct elf_zstd_seq_decode offset_decode
;
4357 struct elf_zstd_fse_baseline_entry
*offset_fse_table
;
4358 uint16_t *huffman_table
;
4359 int huffman_table_bits
;
4360 uint32_t repeated_offset1
;
4361 uint32_t repeated_offset2
;
4362 uint32_t repeated_offset3
;
4366 uint64_t content_size
;
4371 poutend
= pout
+ sout
;
4373 literal_decode
.table
= NULL
;
4374 literal_decode
.table_bits
= -1;
4375 literal_fse_table
= ((struct elf_zstd_fse_baseline_entry
*)
4376 (zdebug_table
+ ZSTD_TABLE_LITERAL_FSE_OFFSET
));
4378 match_decode
.table
= NULL
;
4379 match_decode
.table_bits
= -1;
4380 match_fse_table
= ((struct elf_zstd_fse_baseline_entry
*)
4381 (zdebug_table
+ ZSTD_TABLE_MATCH_FSE_OFFSET
));
4383 offset_decode
.table
= NULL
;
4384 offset_decode
.table_bits
= -1;
4385 offset_fse_table
= ((struct elf_zstd_fse_baseline_entry
*)
4386 (zdebug_table
+ ZSTD_TABLE_OFFSET_FSE_OFFSET
));
4387 huffman_table
= ((uint16_t *)
4388 (zdebug_table
+ ZSTD_TABLE_HUFFMAN_OFFSET
));
4389 huffman_table_bits
= 0;
4390 scratch
= ((uint16_t *)
4391 (zdebug_table
+ ZSTD_TABLE_WORK_OFFSET
));
4393 repeated_offset1
= 1;
4394 repeated_offset2
= 4;
4395 repeated_offset3
= 8;
4397 if (unlikely (sin
< 4))
4399 elf_uncompress_failed ();
4403 /* These values are the zstd magic number. */
4404 if (unlikely (pin
[0] != 0x28
4409 elf_uncompress_failed ();
4415 if (unlikely (pin
>= pinend
))
4417 elf_uncompress_failed ();
4423 /* We expect a single frame. */
4424 if (unlikely ((hdr
& (1 << 5)) == 0))
4426 elf_uncompress_failed ();
4429 /* Reserved bit must be zero. */
4430 if (unlikely ((hdr
& (1 << 3)) != 0))
4432 elf_uncompress_failed ();
4435 /* We do not expect a dictionary. */
4436 if (unlikely ((hdr
& 3) != 0))
4438 elf_uncompress_failed ();
4441 has_checksum
= (hdr
& (1 << 2)) != 0;
4445 if (unlikely (pin
>= pinend
))
4447 elf_uncompress_failed ();
4450 content_size
= (uint64_t) *pin
++;
4453 if (unlikely (pin
+ 1 >= pinend
))
4455 elf_uncompress_failed ();
4458 content_size
= (((uint64_t) pin
[0]) | (((uint64_t) pin
[1]) << 8)) + 256;
4462 if (unlikely (pin
+ 3 >= pinend
))
4464 elf_uncompress_failed ();
4467 content_size
= ((uint64_t) pin
[0]
4468 | (((uint64_t) pin
[1]) << 8)
4469 | (((uint64_t) pin
[2]) << 16)
4470 | (((uint64_t) pin
[3]) << 24));
4474 if (unlikely (pin
+ 7 >= pinend
))
4476 elf_uncompress_failed ();
4479 content_size
= ((uint64_t) pin
[0]
4480 | (((uint64_t) pin
[1]) << 8)
4481 | (((uint64_t) pin
[2]) << 16)
4482 | (((uint64_t) pin
[3]) << 24)
4483 | (((uint64_t) pin
[4]) << 32)
4484 | (((uint64_t) pin
[5]) << 40)
4485 | (((uint64_t) pin
[6]) << 48)
4486 | (((uint64_t) pin
[7]) << 56));
4490 elf_uncompress_failed ();
4494 if (unlikely (content_size
!= (size_t) content_size
4495 || (size_t) content_size
!= sout
))
4497 elf_uncompress_failed ();
4506 uint32_t block_size
;
4508 if (unlikely (pin
+ 2 >= pinend
))
4510 elf_uncompress_failed ();
4513 block_hdr
= ((uint32_t) pin
[0]
4514 | (((uint32_t) pin
[1]) << 8)
4515 | (((uint32_t) pin
[2]) << 16));
4518 last_block
= block_hdr
& 1;
4519 block_type
= (block_hdr
>> 1) & 3;
4520 block_size
= block_hdr
>> 3;
4526 if (unlikely ((size_t) block_size
> (size_t) (pinend
- pin
)))
4528 elf_uncompress_failed ();
4531 if (unlikely ((size_t) block_size
> (size_t) (poutend
- pout
)))
4533 elf_uncompress_failed ();
4536 memcpy (pout
, pin
, block_size
);
4543 if (unlikely (pin
>= pinend
))
4545 elf_uncompress_failed ();
4548 if (unlikely ((size_t) block_size
> (size_t) (poutend
- pout
)))
4550 elf_uncompress_failed ();
4553 memset (pout
, *pin
, block_size
);
4560 const unsigned char *pblockend
;
4561 unsigned char *plitstack
;
4562 unsigned char *plit
;
4563 uint32_t literal_count
;
4564 unsigned char seq_hdr
;
4567 const unsigned char *pback
;
4570 unsigned int literal_state
;
4571 unsigned int offset_state
;
4572 unsigned int match_state
;
4574 /* Compressed_Block */
4575 if (unlikely ((size_t) block_size
> (size_t) (pinend
- pin
)))
4577 elf_uncompress_failed ();
4581 pblockend
= pin
+ block_size
;
4583 /* Read the literals into the end of the output space, and leave
4584 PLIT pointing at them. */
4586 if (!elf_zstd_read_literals (&pin
, pblockend
, pout
, poutend
,
4587 scratch
, huffman_table
,
4588 &huffman_table_bits
,
4592 literal_count
= poutend
- plit
;
4597 seq_count
= seq_hdr
;
4598 else if (seq_hdr
< 255)
4600 if (unlikely (pin
>= pinend
))
4602 elf_uncompress_failed ();
4605 seq_count
= ((seq_hdr
- 128) << 8) + *pin
;
4610 if (unlikely (pin
+ 1 >= pinend
))
4612 elf_uncompress_failed ();
4615 seq_count
= *pin
+ (pin
[1] << 8) + 0x7f00;
4621 int (*pfn
)(const struct elf_zstd_fse_entry
*,
4622 int, struct elf_zstd_fse_baseline_entry
*);
4624 if (unlikely (pin
>= pinend
))
4626 elf_uncompress_failed ();
4632 pfn
= elf_zstd_make_literal_baseline_fse
;
4633 if (!elf_zstd_unpack_seq_decode ((seq_hdr
>> 6) & 3,
4635 &elf_zstd_lit_table
[0], 6,
4637 literal_fse_table
, 9, pfn
,
4641 pfn
= elf_zstd_make_offset_baseline_fse
;
4642 if (!elf_zstd_unpack_seq_decode ((seq_hdr
>> 4) & 3,
4644 &elf_zstd_offset_table
[0], 5,
4646 offset_fse_table
, 8, pfn
,
4650 pfn
= elf_zstd_make_match_baseline_fse
;
4651 if (!elf_zstd_unpack_seq_decode ((seq_hdr
>> 2) & 3,
4653 &elf_zstd_match_table
[0], 6,
4655 match_fse_table
, 9, pfn
,
4660 pback
= pblockend
- 1;
4661 if (!elf_fetch_backward_init (&pback
, pin
, &val
, &bits
))
4664 bits
-= literal_decode
.table_bits
;
4665 literal_state
= ((val
>> bits
)
4666 & ((1U << literal_decode
.table_bits
) - 1));
4668 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4670 bits
-= offset_decode
.table_bits
;
4671 offset_state
= ((val
>> bits
)
4672 & ((1U << offset_decode
.table_bits
) - 1));
4674 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4676 bits
-= match_decode
.table_bits
;
4677 match_state
= ((val
>> bits
)
4678 & ((1U << match_decode
.table_bits
) - 1));
4683 const struct elf_zstd_fse_baseline_entry
*pt
;
4684 uint32_t offset_basebits
;
4685 uint32_t offset_baseline
;
4686 uint32_t offset_bits
;
4687 uint32_t offset_base
;
4689 uint32_t match_baseline
;
4690 uint32_t match_bits
;
4691 uint32_t match_base
;
4693 uint32_t literal_baseline
;
4694 uint32_t literal_bits
;
4695 uint32_t literal_base
;
4700 pt
= &offset_decode
.table
[offset_state
];
4701 offset_basebits
= pt
->basebits
;
4702 offset_baseline
= pt
->baseline
;
4703 offset_bits
= pt
->bits
;
4704 offset_base
= pt
->base
;
4706 /* This case can be more than 16 bits, which is all that
4707 elf_fetch_bits_backward promises. */
4708 need
= offset_basebits
;
4710 if (unlikely (need
> 16))
4712 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4715 add
= (val
>> bits
) & ((1U << 16) - 1);
4721 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4724 add
+= (val
>> bits
) & ((1U << need
) - 1);
4727 offset
= offset_baseline
+ add
;
4729 pt
= &match_decode
.table
[match_state
];
4730 need
= pt
->basebits
;
4731 match_baseline
= pt
->baseline
;
4732 match_bits
= pt
->bits
;
4733 match_base
= pt
->base
;
4738 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4741 add
= (val
>> bits
) & ((1U << need
) - 1);
4744 match
= match_baseline
+ add
;
4746 pt
= &literal_decode
.table
[literal_state
];
4747 need
= pt
->basebits
;
4748 literal_baseline
= pt
->baseline
;
4749 literal_bits
= pt
->bits
;
4750 literal_base
= pt
->base
;
4755 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4758 add
= (val
>> bits
) & ((1U << need
) - 1);
4761 literal
= literal_baseline
+ add
;
4763 /* See the comment in elf_zstd_make_offset_baseline_fse. */
4764 if (offset_basebits
> 1)
4766 repeated_offset3
= repeated_offset2
;
4767 repeated_offset2
= repeated_offset1
;
4768 repeated_offset1
= offset
;
4772 if (unlikely (literal
== 0))
4777 offset
= repeated_offset1
;
4780 offset
= repeated_offset2
;
4781 repeated_offset2
= repeated_offset1
;
4782 repeated_offset1
= offset
;
4785 offset
= repeated_offset3
;
4786 repeated_offset3
= repeated_offset2
;
4787 repeated_offset2
= repeated_offset1
;
4788 repeated_offset1
= offset
;
4791 offset
= repeated_offset1
- 1;
4792 repeated_offset3
= repeated_offset2
;
4793 repeated_offset2
= repeated_offset1
;
4794 repeated_offset1
= offset
;
4800 if (seq
< seq_count
)
4804 /* Update the three states. */
4806 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4809 need
= literal_bits
;
4811 v
= (val
>> bits
) & (((uint32_t)1 << need
) - 1);
4813 literal_state
= literal_base
+ v
;
4815 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4820 v
= (val
>> bits
) & (((uint32_t)1 << need
) - 1);
4822 match_state
= match_base
+ v
;
4824 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4829 v
= (val
>> bits
) & (((uint32_t)1 << need
) - 1);
4831 offset_state
= offset_base
+ v
;
4834 /* The next sequence is now in LITERAL, OFFSET, MATCH. */
4836 /* Copy LITERAL bytes from the literals. */
4838 if (unlikely ((size_t)(poutend
- pout
) < literal
))
4840 elf_uncompress_failed ();
4844 if (unlikely (literal_count
< literal
))
4846 elf_uncompress_failed ();
4850 literal_count
-= literal
;
4852 /* Often LITERAL is small, so handle small cases quickly. */
4884 if (unlikely ((size_t)(plit
- pout
) < literal
))
4889 while (literal
> move
)
4891 memcpy (pout
, plit
, move
);
4898 memcpy (pout
, plit
, literal
);
4905 /* Copy MATCH bytes from the decoded output at OFFSET. */
4907 if (unlikely ((size_t)(poutend
- pout
) < match
))
4909 elf_uncompress_failed ();
4913 if (unlikely ((size_t)(pout
- poutstart
) < offset
))
4915 elf_uncompress_failed ();
4919 if (offset
>= match
)
4921 memcpy (pout
, pout
- offset
, match
);
4930 copy
= match
< offset
? match
: offset
;
4931 memcpy (pout
, pout
- offset
, copy
);
4938 if (unlikely (seq
>= seq_count
))
4940 /* Copy remaining literals. */
4941 if (literal_count
> 0 && plit
!= pout
)
4943 if (unlikely ((size_t)(poutend
- pout
)
4946 elf_uncompress_failed ();
4950 if ((size_t)(plit
- pout
) < literal_count
)
4955 while (literal_count
> move
)
4957 memcpy (pout
, plit
, move
);
4960 literal_count
-= move
;
4964 memcpy (pout
, plit
, literal_count
);
4967 pout
+= literal_count
;
4979 elf_uncompress_failed ();
4986 if (unlikely (pin
+ 4 > pinend
))
4988 elf_uncompress_failed ();
4992 /* We don't currently verify the checksum. Currently running GNU ld with
4993 --compress-debug-sections=zstd does not seem to generate a
5001 elf_uncompress_failed ();
5008 #define ZDEBUG_TABLE_SIZE \
5009 (ZLIB_TABLE_SIZE > ZSTD_TABLE_SIZE ? ZLIB_TABLE_SIZE : ZSTD_TABLE_SIZE)
5011 /* Uncompress the old compressed debug format, the one emitted by
5012 --compress-debug-sections=zlib-gnu. The compressed data is in
5013 COMPRESSED / COMPRESSED_SIZE, and the function writes to
5014 *UNCOMPRESSED / *UNCOMPRESSED_SIZE. ZDEBUG_TABLE is work space to
5015 hold Huffman tables. Returns 0 on error, 1 on successful
5016 decompression or if something goes wrong. In general we try to
5017 carry on, by returning 1, even if we can't decompress. */
5020 elf_uncompress_zdebug (struct backtrace_state
*state
,
5021 const unsigned char *compressed
, size_t compressed_size
,
5022 uint16_t *zdebug_table
,
5023 backtrace_error_callback error_callback
, void *data
,
5024 unsigned char **uncompressed
, size_t *uncompressed_size
)
5030 *uncompressed
= NULL
;
5031 *uncompressed_size
= 0;
5033 /* The format starts with the four bytes ZLIB, followed by the 8
5034 byte length of the uncompressed data in big-endian order,
5035 followed by a zlib stream. */
5037 if (compressed_size
< 12 || memcmp (compressed
, "ZLIB", 4) != 0)
5041 for (i
= 0; i
< 8; i
++)
5042 sz
= (sz
<< 8) | compressed
[i
+ 4];
5044 if (*uncompressed
!= NULL
&& *uncompressed_size
>= sz
)
5048 po
= (unsigned char *) backtrace_alloc (state
, sz
, error_callback
, data
);
5053 if (!elf_zlib_inflate_and_verify (compressed
+ 12, compressed_size
- 12,
5054 zdebug_table
, po
, sz
))
5058 *uncompressed_size
= sz
;
5063 /* Uncompress the new compressed debug format, the official standard
5064 ELF approach emitted by --compress-debug-sections=zlib-gabi. The
5065 compressed data is in COMPRESSED / COMPRESSED_SIZE, and the
5066 function writes to *UNCOMPRESSED / *UNCOMPRESSED_SIZE.
5067 ZDEBUG_TABLE is work space as for elf_uncompress_zdebug. Returns 0
5068 on error, 1 on successful decompression or if something goes wrong.
5069 In general we try to carry on, by returning 1, even if we can't
5073 elf_uncompress_chdr (struct backtrace_state
*state
,
5074 const unsigned char *compressed
, size_t compressed_size
,
5075 uint16_t *zdebug_table
,
5076 backtrace_error_callback error_callback
, void *data
,
5077 unsigned char **uncompressed
, size_t *uncompressed_size
)
5079 const b_elf_chdr
*chdr
;
5084 *uncompressed
= NULL
;
5085 *uncompressed_size
= 0;
5087 /* The format starts with an ELF compression header. */
5088 if (compressed_size
< sizeof (b_elf_chdr
))
5091 chdr
= (const b_elf_chdr
*) compressed
;
5095 if (*uncompressed
!= NULL
&& *uncompressed_size
>= chdr
->ch_size
)
5099 alc_len
= chdr
->ch_size
;
5100 alc
= backtrace_alloc (state
, alc_len
, error_callback
, data
);
5103 po
= (unsigned char *) alc
;
5106 switch (chdr
->ch_type
)
5108 case ELFCOMPRESS_ZLIB
:
5109 if (!elf_zlib_inflate_and_verify (compressed
+ sizeof (b_elf_chdr
),
5110 compressed_size
- sizeof (b_elf_chdr
),
5111 zdebug_table
, po
, chdr
->ch_size
))
5115 case ELFCOMPRESS_ZSTD
:
5116 if (!elf_zstd_decompress (compressed
+ sizeof (b_elf_chdr
),
5117 compressed_size
- sizeof (b_elf_chdr
),
5118 (unsigned char *)zdebug_table
, po
,
5124 /* Unsupported compression algorithm. */
5129 *uncompressed_size
= chdr
->ch_size
;
5134 if (alc
!= NULL
&& alc_len
> 0)
5135 backtrace_free (state
, alc
, alc_len
, error_callback
, data
);
5139 /* This function is a hook for testing the zlib support. It is only
5143 backtrace_uncompress_zdebug (struct backtrace_state
*state
,
5144 const unsigned char *compressed
,
5145 size_t compressed_size
,
5146 backtrace_error_callback error_callback
,
5147 void *data
, unsigned char **uncompressed
,
5148 size_t *uncompressed_size
)
5150 uint16_t *zdebug_table
;
5153 zdebug_table
= ((uint16_t *) backtrace_alloc (state
, ZDEBUG_TABLE_SIZE
,
5154 error_callback
, data
));
5155 if (zdebug_table
== NULL
)
5157 ret
= elf_uncompress_zdebug (state
, compressed
, compressed_size
,
5158 zdebug_table
, error_callback
, data
,
5159 uncompressed
, uncompressed_size
);
5160 backtrace_free (state
, zdebug_table
, ZDEBUG_TABLE_SIZE
,
5161 error_callback
, data
);
5165 /* This function is a hook for testing the zstd support. It is only used by
5169 backtrace_uncompress_zstd (struct backtrace_state
*state
,
5170 const unsigned char *compressed
,
5171 size_t compressed_size
,
5172 backtrace_error_callback error_callback
,
5173 void *data
, unsigned char *uncompressed
,
5174 size_t uncompressed_size
)
5176 unsigned char *zdebug_table
;
5179 zdebug_table
= ((unsigned char *) backtrace_alloc (state
, ZDEBUG_TABLE_SIZE
,
5180 error_callback
, data
));
5181 if (zdebug_table
== NULL
)
5183 ret
= elf_zstd_decompress (compressed
, compressed_size
,
5184 zdebug_table
, uncompressed
, uncompressed_size
);
5185 backtrace_free (state
, zdebug_table
, ZDEBUG_TABLE_SIZE
,
5186 error_callback
, data
);
5190 /* Number of LZMA states. */
5191 #define LZMA_STATES (12)
5193 /* Number of LZMA position states. The pb value of the property byte
5194 is the number of bits to include in these states, and the maximum
5195 value of pb is 4. */
5196 #define LZMA_POS_STATES (16)
5198 /* Number of LZMA distance states. These are used match distances
5199 with a short match length: up to 4 bytes. */
5200 #define LZMA_DIST_STATES (4)
5202 /* Number of LZMA distance slots. LZMA uses six bits to encode larger
5203 match lengths, so 1 << 6 possible probabilities. */
5204 #define LZMA_DIST_SLOTS (64)
5206 /* LZMA distances 0 to 3 are encoded directly, larger values use a
5207 probability model. */
5208 #define LZMA_DIST_MODEL_START (4)
5210 /* The LZMA probability model ends at 14. */
5211 #define LZMA_DIST_MODEL_END (14)
5213 /* LZMA distance slots for distances less than 127. */
5214 #define LZMA_FULL_DISTANCES (128)
5216 /* LZMA uses four alignment bits. */
5217 #define LZMA_ALIGN_SIZE (16)
5219 /* LZMA match length is encoded with 4, 5, or 10 bits, some of which
5220 are already known. */
5221 #define LZMA_LEN_LOW_SYMBOLS (8)
5222 #define LZMA_LEN_MID_SYMBOLS (8)
5223 #define LZMA_LEN_HIGH_SYMBOLS (256)
5225 /* LZMA literal encoding. */
5226 #define LZMA_LITERAL_CODERS_MAX (16)
5227 #define LZMA_LITERAL_CODER_SIZE (0x300)
5229 /* LZMA is based on a large set of probabilities, each managed
5230 independently. Each probability is an 11 bit number that we store
5231 in a uint16_t. We use a single large array of probabilities. */
5233 /* Lengths of entries in the LZMA probabilities array. The names used
5234 here are copied from the Linux kernel implementation. */
5236 #define LZMA_PROB_IS_MATCH_LEN (LZMA_STATES * LZMA_POS_STATES)
5237 #define LZMA_PROB_IS_REP_LEN LZMA_STATES
5238 #define LZMA_PROB_IS_REP0_LEN LZMA_STATES
5239 #define LZMA_PROB_IS_REP1_LEN LZMA_STATES
5240 #define LZMA_PROB_IS_REP2_LEN LZMA_STATES
5241 #define LZMA_PROB_IS_REP0_LONG_LEN (LZMA_STATES * LZMA_POS_STATES)
5242 #define LZMA_PROB_DIST_SLOT_LEN (LZMA_DIST_STATES * LZMA_DIST_SLOTS)
5243 #define LZMA_PROB_DIST_SPECIAL_LEN (LZMA_FULL_DISTANCES - LZMA_DIST_MODEL_END)
5244 #define LZMA_PROB_DIST_ALIGN_LEN LZMA_ALIGN_SIZE
5245 #define LZMA_PROB_MATCH_LEN_CHOICE_LEN 1
5246 #define LZMA_PROB_MATCH_LEN_CHOICE2_LEN 1
5247 #define LZMA_PROB_MATCH_LEN_LOW_LEN (LZMA_POS_STATES * LZMA_LEN_LOW_SYMBOLS)
5248 #define LZMA_PROB_MATCH_LEN_MID_LEN (LZMA_POS_STATES * LZMA_LEN_MID_SYMBOLS)
5249 #define LZMA_PROB_MATCH_LEN_HIGH_LEN LZMA_LEN_HIGH_SYMBOLS
5250 #define LZMA_PROB_REP_LEN_CHOICE_LEN 1
5251 #define LZMA_PROB_REP_LEN_CHOICE2_LEN 1
5252 #define LZMA_PROB_REP_LEN_LOW_LEN (LZMA_POS_STATES * LZMA_LEN_LOW_SYMBOLS)
5253 #define LZMA_PROB_REP_LEN_MID_LEN (LZMA_POS_STATES * LZMA_LEN_MID_SYMBOLS)
5254 #define LZMA_PROB_REP_LEN_HIGH_LEN LZMA_LEN_HIGH_SYMBOLS
5255 #define LZMA_PROB_LITERAL_LEN \
5256 (LZMA_LITERAL_CODERS_MAX * LZMA_LITERAL_CODER_SIZE)
5258 /* Offsets into the LZMA probabilities array. This is mechanically
5259 generated from the above lengths. */
5261 #define LZMA_PROB_IS_MATCH_OFFSET 0
5262 #define LZMA_PROB_IS_REP_OFFSET \
5263 (LZMA_PROB_IS_MATCH_OFFSET + LZMA_PROB_IS_MATCH_LEN)
5264 #define LZMA_PROB_IS_REP0_OFFSET \
5265 (LZMA_PROB_IS_REP_OFFSET + LZMA_PROB_IS_REP_LEN)
5266 #define LZMA_PROB_IS_REP1_OFFSET \
5267 (LZMA_PROB_IS_REP0_OFFSET + LZMA_PROB_IS_REP0_LEN)
5268 #define LZMA_PROB_IS_REP2_OFFSET \
5269 (LZMA_PROB_IS_REP1_OFFSET + LZMA_PROB_IS_REP1_LEN)
5270 #define LZMA_PROB_IS_REP0_LONG_OFFSET \
5271 (LZMA_PROB_IS_REP2_OFFSET + LZMA_PROB_IS_REP2_LEN)
5272 #define LZMA_PROB_DIST_SLOT_OFFSET \
5273 (LZMA_PROB_IS_REP0_LONG_OFFSET + LZMA_PROB_IS_REP0_LONG_LEN)
5274 #define LZMA_PROB_DIST_SPECIAL_OFFSET \
5275 (LZMA_PROB_DIST_SLOT_OFFSET + LZMA_PROB_DIST_SLOT_LEN)
5276 #define LZMA_PROB_DIST_ALIGN_OFFSET \
5277 (LZMA_PROB_DIST_SPECIAL_OFFSET + LZMA_PROB_DIST_SPECIAL_LEN)
5278 #define LZMA_PROB_MATCH_LEN_CHOICE_OFFSET \
5279 (LZMA_PROB_DIST_ALIGN_OFFSET + LZMA_PROB_DIST_ALIGN_LEN)
5280 #define LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET \
5281 (LZMA_PROB_MATCH_LEN_CHOICE_OFFSET + LZMA_PROB_MATCH_LEN_CHOICE_LEN)
5282 #define LZMA_PROB_MATCH_LEN_LOW_OFFSET \
5283 (LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET + LZMA_PROB_MATCH_LEN_CHOICE2_LEN)
5284 #define LZMA_PROB_MATCH_LEN_MID_OFFSET \
5285 (LZMA_PROB_MATCH_LEN_LOW_OFFSET + LZMA_PROB_MATCH_LEN_LOW_LEN)
5286 #define LZMA_PROB_MATCH_LEN_HIGH_OFFSET \
5287 (LZMA_PROB_MATCH_LEN_MID_OFFSET + LZMA_PROB_MATCH_LEN_MID_LEN)
5288 #define LZMA_PROB_REP_LEN_CHOICE_OFFSET \
5289 (LZMA_PROB_MATCH_LEN_HIGH_OFFSET + LZMA_PROB_MATCH_LEN_HIGH_LEN)
5290 #define LZMA_PROB_REP_LEN_CHOICE2_OFFSET \
5291 (LZMA_PROB_REP_LEN_CHOICE_OFFSET + LZMA_PROB_REP_LEN_CHOICE_LEN)
5292 #define LZMA_PROB_REP_LEN_LOW_OFFSET \
5293 (LZMA_PROB_REP_LEN_CHOICE2_OFFSET + LZMA_PROB_REP_LEN_CHOICE2_LEN)
5294 #define LZMA_PROB_REP_LEN_MID_OFFSET \
5295 (LZMA_PROB_REP_LEN_LOW_OFFSET + LZMA_PROB_REP_LEN_LOW_LEN)
5296 #define LZMA_PROB_REP_LEN_HIGH_OFFSET \
5297 (LZMA_PROB_REP_LEN_MID_OFFSET + LZMA_PROB_REP_LEN_MID_LEN)
5298 #define LZMA_PROB_LITERAL_OFFSET \
5299 (LZMA_PROB_REP_LEN_HIGH_OFFSET + LZMA_PROB_REP_LEN_HIGH_LEN)
5301 #define LZMA_PROB_TOTAL_COUNT \
5302 (LZMA_PROB_LITERAL_OFFSET + LZMA_PROB_LITERAL_LEN)
5304 /* Check that the number of LZMA probabilities is the same as the
5305 Linux kernel implementation. */
5307 #if LZMA_PROB_TOTAL_COUNT != 1846 + (1 << 4) * 0x300
5308 #error Wrong number of LZMA probabilities
5311 /* Expressions for the offset in the LZMA probabilities array of a
5312 specific probability. */
5314 #define LZMA_IS_MATCH(state, pos) \
5315 (LZMA_PROB_IS_MATCH_OFFSET + (state) * LZMA_POS_STATES + (pos))
5316 #define LZMA_IS_REP(state) \
5317 (LZMA_PROB_IS_REP_OFFSET + (state))
5318 #define LZMA_IS_REP0(state) \
5319 (LZMA_PROB_IS_REP0_OFFSET + (state))
5320 #define LZMA_IS_REP1(state) \
5321 (LZMA_PROB_IS_REP1_OFFSET + (state))
5322 #define LZMA_IS_REP2(state) \
5323 (LZMA_PROB_IS_REP2_OFFSET + (state))
5324 #define LZMA_IS_REP0_LONG(state, pos) \
5325 (LZMA_PROB_IS_REP0_LONG_OFFSET + (state) * LZMA_POS_STATES + (pos))
5326 #define LZMA_DIST_SLOT(dist, slot) \
5327 (LZMA_PROB_DIST_SLOT_OFFSET + (dist) * LZMA_DIST_SLOTS + (slot))
5328 #define LZMA_DIST_SPECIAL(dist) \
5329 (LZMA_PROB_DIST_SPECIAL_OFFSET + (dist))
5330 #define LZMA_DIST_ALIGN(dist) \
5331 (LZMA_PROB_DIST_ALIGN_OFFSET + (dist))
5332 #define LZMA_MATCH_LEN_CHOICE \
5333 LZMA_PROB_MATCH_LEN_CHOICE_OFFSET
5334 #define LZMA_MATCH_LEN_CHOICE2 \
5335 LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET
5336 #define LZMA_MATCH_LEN_LOW(pos, sym) \
5337 (LZMA_PROB_MATCH_LEN_LOW_OFFSET + (pos) * LZMA_LEN_LOW_SYMBOLS + (sym))
5338 #define LZMA_MATCH_LEN_MID(pos, sym) \
5339 (LZMA_PROB_MATCH_LEN_MID_OFFSET + (pos) * LZMA_LEN_MID_SYMBOLS + (sym))
5340 #define LZMA_MATCH_LEN_HIGH(sym) \
5341 (LZMA_PROB_MATCH_LEN_HIGH_OFFSET + (sym))
5342 #define LZMA_REP_LEN_CHOICE \
5343 LZMA_PROB_REP_LEN_CHOICE_OFFSET
5344 #define LZMA_REP_LEN_CHOICE2 \
5345 LZMA_PROB_REP_LEN_CHOICE2_OFFSET
5346 #define LZMA_REP_LEN_LOW(pos, sym) \
5347 (LZMA_PROB_REP_LEN_LOW_OFFSET + (pos) * LZMA_LEN_LOW_SYMBOLS + (sym))
5348 #define LZMA_REP_LEN_MID(pos, sym) \
5349 (LZMA_PROB_REP_LEN_MID_OFFSET + (pos) * LZMA_LEN_MID_SYMBOLS + (sym))
5350 #define LZMA_REP_LEN_HIGH(sym) \
5351 (LZMA_PROB_REP_LEN_HIGH_OFFSET + (sym))
5352 #define LZMA_LITERAL(code, size) \
5353 (LZMA_PROB_LITERAL_OFFSET + (code) * LZMA_LITERAL_CODER_SIZE + (size))
5355 /* Read an LZMA varint from BUF, reading and updating *POFFSET,
5356 setting *VAL. Returns 0 on error, 1 on success. */
5359 elf_lzma_varint (const unsigned char *compressed
, size_t compressed_size
,
5360 size_t *poffset
, uint64_t *val
)
5372 if (unlikely (off
>= compressed_size
))
5374 elf_uncompress_failed ();
5377 b
= compressed
[off
];
5378 v
|= (b
& 0x7f) << (i
* 7);
5380 if ((b
& 0x80) == 0)
5387 if (unlikely (i
>= 9))
5389 elf_uncompress_failed ();
5395 /* Normalize the LZMA range decoder, pulling in an extra input byte if
5399 elf_lzma_range_normalize (const unsigned char *compressed
,
5400 size_t compressed_size
, size_t *poffset
,
5401 uint32_t *prange
, uint32_t *pcode
)
5403 if (*prange
< (1U << 24))
5405 if (unlikely (*poffset
>= compressed_size
))
5407 /* We assume this will be caught elsewhere. */
5408 elf_uncompress_failed ();
5413 *pcode
+= compressed
[*poffset
];
5418 /* Read and return a single bit from the LZMA stream, reading and
5419 updating *PROB. Each bit comes from the range coder. */
5422 elf_lzma_bit (const unsigned char *compressed
, size_t compressed_size
,
5423 uint16_t *prob
, size_t *poffset
, uint32_t *prange
,
5428 elf_lzma_range_normalize (compressed
, compressed_size
, poffset
,
5430 bound
= (*prange
>> 11) * (uint32_t) *prob
;
5434 *prob
+= ((1U << 11) - *prob
) >> 5;
5441 *prob
-= *prob
>> 5;
5446 /* Read an integer of size BITS from the LZMA stream, most significant
5447 bit first. The bits are predicted using PROBS. */
5450 elf_lzma_integer (const unsigned char *compressed
, size_t compressed_size
,
5451 uint16_t *probs
, uint32_t bits
, size_t *poffset
,
5452 uint32_t *prange
, uint32_t *pcode
)
5458 for (i
= 0; i
< bits
; i
++)
5462 bit
= elf_lzma_bit (compressed
, compressed_size
, probs
+ sym
, poffset
,
5467 return sym
- (1 << bits
);
5470 /* Read an integer of size BITS from the LZMA stream, least
5471 significant bit first. The bits are predicted using PROBS. */
5474 elf_lzma_reverse_integer (const unsigned char *compressed
,
5475 size_t compressed_size
, uint16_t *probs
,
5476 uint32_t bits
, size_t *poffset
, uint32_t *prange
,
5485 for (i
= 0; i
< bits
; i
++)
5489 bit
= elf_lzma_bit (compressed
, compressed_size
, probs
+ sym
, poffset
,
5498 /* Read a length from the LZMA stream. IS_REP picks either LZMA_MATCH
5499 or LZMA_REP probabilities. */
5502 elf_lzma_len (const unsigned char *compressed
, size_t compressed_size
,
5503 uint16_t *probs
, int is_rep
, unsigned int pos_state
,
5504 size_t *poffset
, uint32_t *prange
, uint32_t *pcode
)
5506 uint16_t *probs_choice
;
5507 uint16_t *probs_sym
;
5511 probs_choice
= probs
+ (is_rep
5512 ? LZMA_REP_LEN_CHOICE
5513 : LZMA_MATCH_LEN_CHOICE
);
5514 if (elf_lzma_bit (compressed
, compressed_size
, probs_choice
, poffset
,
5517 probs_choice
= probs
+ (is_rep
5518 ? LZMA_REP_LEN_CHOICE2
5519 : LZMA_MATCH_LEN_CHOICE2
);
5520 if (elf_lzma_bit (compressed
, compressed_size
, probs_choice
,
5521 poffset
, prange
, pcode
))
5523 probs_sym
= probs
+ (is_rep
5524 ? LZMA_REP_LEN_HIGH (0)
5525 : LZMA_MATCH_LEN_HIGH (0));
5531 probs_sym
= probs
+ (is_rep
5532 ? LZMA_REP_LEN_MID (pos_state
, 0)
5533 : LZMA_MATCH_LEN_MID (pos_state
, 0));
5540 probs_sym
= probs
+ (is_rep
5541 ? LZMA_REP_LEN_LOW (pos_state
, 0)
5542 : LZMA_MATCH_LEN_LOW (pos_state
, 0));
5547 len
+= elf_lzma_integer (compressed
, compressed_size
, probs_sym
, bits
,
5548 poffset
, prange
, pcode
);
5552 /* Uncompress one LZMA block from a minidebug file. The compressed
5553 data is at COMPRESSED + *POFFSET. Update *POFFSET. Store the data
5554 into the memory at UNCOMPRESSED, size UNCOMPRESSED_SIZE. CHECK is
5555 the stream flag from the xz header. Return 1 on successful
5559 elf_uncompress_lzma_block (const unsigned char *compressed
,
5560 size_t compressed_size
, unsigned char check
,
5561 uint16_t *probs
, unsigned char *uncompressed
,
5562 size_t uncompressed_size
, size_t *poffset
)
5565 size_t block_header_offset
;
5566 size_t block_header_size
;
5567 unsigned char block_flags
;
5568 uint64_t header_compressed_size
;
5569 uint64_t header_uncompressed_size
;
5570 unsigned char lzma2_properties
;
5571 uint32_t computed_crc
;
5572 uint32_t stream_crc
;
5573 size_t uncompressed_offset
;
5574 size_t dict_start_offset
;
5584 block_header_offset
= off
;
5586 /* Block header size is a single byte. */
5587 if (unlikely (off
>= compressed_size
))
5589 elf_uncompress_failed ();
5592 block_header_size
= (compressed
[off
] + 1) * 4;
5593 if (unlikely (off
+ block_header_size
> compressed_size
))
5595 elf_uncompress_failed ();
5600 block_flags
= compressed
[off
+ 1];
5601 if (unlikely ((block_flags
& 0x3c) != 0))
5603 elf_uncompress_failed ();
5609 /* Optional compressed size. */
5610 header_compressed_size
= 0;
5611 if ((block_flags
& 0x40) != 0)
5614 if (!elf_lzma_varint (compressed
, compressed_size
, poffset
,
5615 &header_compressed_size
))
5620 /* Optional uncompressed size. */
5621 header_uncompressed_size
= 0;
5622 if ((block_flags
& 0x80) != 0)
5625 if (!elf_lzma_varint (compressed
, compressed_size
, poffset
,
5626 &header_uncompressed_size
))
5631 /* The recipe for creating a minidebug file is to run the xz program
5632 with no arguments, so we expect exactly one filter: lzma2. */
5634 if (unlikely ((block_flags
& 0x3) != 0))
5636 elf_uncompress_failed ();
5640 if (unlikely (off
+ 2 >= block_header_offset
+ block_header_size
))
5642 elf_uncompress_failed ();
5646 /* The filter ID for LZMA2 is 0x21. */
5647 if (unlikely (compressed
[off
] != 0x21))
5649 elf_uncompress_failed ();
5654 /* The size of the filter properties for LZMA2 is 1. */
5655 if (unlikely (compressed
[off
] != 1))
5657 elf_uncompress_failed ();
5662 lzma2_properties
= compressed
[off
];
5665 if (unlikely (lzma2_properties
> 40))
5667 elf_uncompress_failed ();
5671 /* The properties describe the dictionary size, but we don't care
5674 /* Block header padding. */
5675 if (unlikely (off
+ 4 > compressed_size
))
5677 elf_uncompress_failed ();
5681 off
= (off
+ 3) &~ (size_t) 3;
5683 if (unlikely (off
+ 4 > compressed_size
))
5685 elf_uncompress_failed ();
5689 /* Block header CRC. */
5690 computed_crc
= elf_crc32 (0, compressed
+ block_header_offset
,
5691 block_header_size
- 4);
5692 stream_crc
= ((uint32_t)compressed
[off
]
5693 | ((uint32_t)compressed
[off
+ 1] << 8)
5694 | ((uint32_t)compressed
[off
+ 2] << 16)
5695 | ((uint32_t)compressed
[off
+ 3] << 24));
5696 if (unlikely (computed_crc
!= stream_crc
))
5698 elf_uncompress_failed ();
5703 /* Read a sequence of LZMA2 packets. */
5705 uncompressed_offset
= 0;
5706 dict_start_offset
= 0;
5711 while (off
< compressed_size
)
5713 unsigned char control
;
5718 control
= compressed
[off
];
5720 if (unlikely (control
== 0))
5722 /* End of packets. */
5726 if (control
== 1 || control
>= 0xe0)
5728 /* Reset dictionary to empty. */
5729 dict_start_offset
= uncompressed_offset
;
5736 /* The only valid values here are 1 or 2. A 1 means to
5737 reset the dictionary (done above). Then we see an
5738 uncompressed chunk. */
5740 if (unlikely (control
> 2))
5742 elf_uncompress_failed ();
5746 /* An uncompressed chunk is a two byte size followed by
5749 if (unlikely (off
+ 2 > compressed_size
))
5751 elf_uncompress_failed ();
5755 chunk_size
= compressed
[off
] << 8;
5756 chunk_size
+= compressed
[off
+ 1];
5761 if (unlikely (off
+ chunk_size
> compressed_size
))
5763 elf_uncompress_failed ();
5766 if (unlikely (uncompressed_offset
+ chunk_size
> uncompressed_size
))
5768 elf_uncompress_failed ();
5772 memcpy (uncompressed
+ uncompressed_offset
, compressed
+ off
,
5774 uncompressed_offset
+= chunk_size
;
5779 size_t uncompressed_chunk_start
;
5780 size_t uncompressed_chunk_size
;
5781 size_t compressed_chunk_size
;
5784 /* An LZMA chunk. This starts with an uncompressed size and
5785 a compressed size. */
5787 if (unlikely (off
+ 4 >= compressed_size
))
5789 elf_uncompress_failed ();
5793 uncompressed_chunk_start
= uncompressed_offset
;
5795 uncompressed_chunk_size
= (control
& 0x1f) << 16;
5796 uncompressed_chunk_size
+= compressed
[off
] << 8;
5797 uncompressed_chunk_size
+= compressed
[off
+ 1];
5798 ++uncompressed_chunk_size
;
5800 compressed_chunk_size
= compressed
[off
+ 2] << 8;
5801 compressed_chunk_size
+= compressed
[off
+ 3];
5802 ++compressed_chunk_size
;
5806 /* Bit 7 (0x80) is set.
5807 Bits 6 and 5 (0x40 and 0x20) are as follows:
5808 0: don't reset anything
5810 2: reset state, read properties
5811 3: reset state, read properties, reset dictionary (done above) */
5813 if (control
>= 0xc0)
5815 unsigned char props
;
5817 /* Bit 6 is set, read properties. */
5819 if (unlikely (off
>= compressed_size
))
5821 elf_uncompress_failed ();
5824 props
= compressed
[off
];
5826 if (unlikely (props
> (4 * 5 + 4) * 9 + 8))
5828 elf_uncompress_failed ();
5832 while (props
>= 9 * 5)
5844 if (unlikely (lc
+ lp
> 4))
5846 elf_uncompress_failed ();
5851 if (control
>= 0xa0)
5855 /* Bit 5 or 6 is set, reset LZMA state. */
5858 memset (&dist
, 0, sizeof dist
);
5859 for (i
= 0; i
< LZMA_PROB_TOTAL_COUNT
; i
++)
5865 /* Read the range code. */
5867 if (unlikely (off
+ 5 > compressed_size
))
5869 elf_uncompress_failed ();
5873 /* The byte at compressed[off] is ignored for some
5876 code
= ((compressed
[off
+ 1] << 24)
5877 + (compressed
[off
+ 2] << 16)
5878 + (compressed
[off
+ 3] << 8)
5879 + compressed
[off
+ 4]);
5882 /* This is the main LZMA decode loop. */
5884 limit
= off
+ compressed_chunk_size
;
5886 while (*poffset
< limit
)
5888 unsigned int pos_state
;
5890 if (unlikely (uncompressed_offset
5891 == (uncompressed_chunk_start
5892 + uncompressed_chunk_size
)))
5894 /* We've decompressed all the expected bytes. */
5898 pos_state
= ((uncompressed_offset
- dict_start_offset
)
5901 if (elf_lzma_bit (compressed
, compressed_size
,
5902 probs
+ LZMA_IS_MATCH (lstate
, pos_state
),
5903 poffset
, &range
, &code
))
5907 if (elf_lzma_bit (compressed
, compressed_size
,
5908 probs
+ LZMA_IS_REP (lstate
),
5909 poffset
, &range
, &code
))
5914 /* Repeated match. */
5917 if (elf_lzma_bit (compressed
, compressed_size
,
5918 probs
+ LZMA_IS_REP0 (lstate
),
5919 poffset
, &range
, &code
))
5921 if (elf_lzma_bit (compressed
, compressed_size
,
5922 probs
+ LZMA_IS_REP1 (lstate
),
5923 poffset
, &range
, &code
))
5925 if (elf_lzma_bit (compressed
, compressed_size
,
5926 probs
+ LZMA_IS_REP2 (lstate
),
5927 poffset
, &range
, &code
))
5929 next_dist
= dist
[3];
5934 next_dist
= dist
[2];
5940 next_dist
= dist
[1];
5944 dist
[0] = next_dist
;
5948 if (!elf_lzma_bit (compressed
, compressed_size
,
5950 + LZMA_IS_REP0_LONG (lstate
,
5952 poffset
, &range
, &code
))
5957 lstate
= short_rep
? 9 : 8;
5964 len
= elf_lzma_len (compressed
, compressed_size
,
5965 probs
, 1, pos_state
, poffset
,
5970 uint32_t dist_state
;
5972 uint16_t *probs_dist
;
5983 len
= elf_lzma_len (compressed
, compressed_size
,
5984 probs
, 0, pos_state
, poffset
,
5988 dist_state
= len
- 2;
5991 probs_dist
= probs
+ LZMA_DIST_SLOT (dist_state
, 0);
5992 dist_slot
= elf_lzma_integer (compressed
,
5997 if (dist_slot
< LZMA_DIST_MODEL_START
)
5998 dist
[0] = dist_slot
;
6003 limit
= (dist_slot
>> 1) - 1;
6004 dist
[0] = 2 + (dist_slot
& 1);
6005 if (dist_slot
< LZMA_DIST_MODEL_END
)
6009 + LZMA_DIST_SPECIAL(dist
[0]
6013 elf_lzma_reverse_integer (compressed
,
6025 for (i
= 0; i
< limit
- 4; i
++)
6029 elf_lzma_range_normalize (compressed
,
6035 mask
= -(code
>> 31);
6036 code
+= range
& mask
;
6041 probs_dist
= probs
+ LZMA_DIST_ALIGN (0);
6043 elf_lzma_reverse_integer (compressed
,
6053 if (unlikely (uncompressed_offset
6054 - dict_start_offset
< dist
[0] + 1))
6056 elf_uncompress_failed ();
6059 if (unlikely (uncompressed_offset
+ len
> uncompressed_size
))
6061 elf_uncompress_failed ();
6067 /* A common case, meaning repeat the last
6068 character LEN times. */
6069 memset (uncompressed
+ uncompressed_offset
,
6070 uncompressed
[uncompressed_offset
- 1],
6072 uncompressed_offset
+= len
;
6074 else if (dist
[0] + 1 >= len
)
6076 memcpy (uncompressed
+ uncompressed_offset
,
6077 uncompressed
+ uncompressed_offset
- dist
[0] - 1,
6079 uncompressed_offset
+= len
;
6087 copy
= len
< dist
[0] + 1 ? len
: dist
[0] + 1;
6088 memcpy (uncompressed
+ uncompressed_offset
,
6089 (uncompressed
+ uncompressed_offset
6093 uncompressed_offset
+= copy
;
6102 uint16_t *lit_probs
;
6105 /* Literal value. */
6107 if (uncompressed_offset
> 0)
6108 prev
= uncompressed
[uncompressed_offset
- 1];
6111 low
= prev
>> (8 - lc
);
6112 high
= (((uncompressed_offset
- dict_start_offset
)
6115 lit_probs
= probs
+ LZMA_LITERAL (low
+ high
, 0);
6117 sym
= elf_lzma_integer (compressed
, compressed_size
,
6118 lit_probs
, 8, poffset
, &range
,
6124 unsigned int match_bit
;
6128 if (uncompressed_offset
>= dist
[0] + 1)
6129 match
= uncompressed
[uncompressed_offset
- dist
[0] - 1];
6136 match_bit
= match
& bit
;
6138 idx
= bit
+ match_bit
+ sym
;
6140 if (elf_lzma_bit (compressed
, compressed_size
,
6141 lit_probs
+ idx
, poffset
,
6152 while (sym
< 0x100);
6155 if (unlikely (uncompressed_offset
>= uncompressed_size
))
6157 elf_uncompress_failed ();
6161 uncompressed
[uncompressed_offset
] = (unsigned char) sym
;
6162 ++uncompressed_offset
;
6165 else if (lstate
<= 9)
6172 elf_lzma_range_normalize (compressed
, compressed_size
, poffset
,
6179 /* We have reached the end of the block. Pad to four byte
6181 off
= (off
+ 3) &~ (size_t) 3;
6182 if (unlikely (off
> compressed_size
))
6184 elf_uncompress_failed ();
6196 if (unlikely (off
+ 4 > compressed_size
))
6198 elf_uncompress_failed ();
6201 computed_crc
= elf_crc32 (0, uncompressed
, uncompressed_offset
);
6202 stream_crc
= (compressed
[off
]
6203 | (compressed
[off
+ 1] << 8)
6204 | (compressed
[off
+ 2] << 16)
6205 | (compressed
[off
+ 3] << 24));
6206 if (computed_crc
!= stream_crc
)
6208 elf_uncompress_failed ();
6215 /* CRC64. We don't bother computing a CRC64 checksum. */
6216 if (unlikely (off
+ 8 > compressed_size
))
6218 elf_uncompress_failed ();
6225 /* SHA. We don't bother computing a SHA checksum. */
6226 if (unlikely (off
+ 32 > compressed_size
))
6228 elf_uncompress_failed ();
6235 elf_uncompress_failed ();
6244 /* Uncompress LZMA data found in a minidebug file. The minidebug
6245 format is described at
6246 https://sourceware.org/gdb/current/onlinedocs/gdb/MiniDebugInfo.html.
6247 Returns 0 on error, 1 on successful decompression. For this
6248 function we return 0 on failure to decompress, as the calling code
6249 will carry on in that case. */
6252 elf_uncompress_lzma (struct backtrace_state
*state
,
6253 const unsigned char *compressed
, size_t compressed_size
,
6254 backtrace_error_callback error_callback
, void *data
,
6255 unsigned char **uncompressed
, size_t *uncompressed_size
)
6259 unsigned char check
;
6260 uint32_t computed_crc
;
6261 uint32_t stream_crc
;
6264 size_t footer_offset
;
6265 size_t index_offset
;
6266 uint64_t index_compressed_size
;
6267 uint64_t index_uncompressed_size
;
6270 size_t compressed_block_size
;
6272 /* The format starts with a stream header and ends with a stream
6276 if (unlikely (compressed_size
< header_size
+ footer_size
))
6278 elf_uncompress_failed ();
6282 /* The stream header starts with a magic string. */
6283 if (unlikely (memcmp (compressed
, "\375" "7zXZ\0", 6) != 0))
6285 elf_uncompress_failed ();
6289 /* Next come stream flags. The first byte is zero, the second byte
6291 if (unlikely (compressed
[6] != 0))
6293 elf_uncompress_failed ();
6296 check
= compressed
[7];
6297 if (unlikely ((check
& 0xf8) != 0))
6299 elf_uncompress_failed ();
6303 /* Next comes a CRC of the stream flags. */
6304 computed_crc
= elf_crc32 (0, compressed
+ 6, 2);
6305 stream_crc
= ((uint32_t)compressed
[8]
6306 | ((uint32_t)compressed
[9] << 8)
6307 | ((uint32_t)compressed
[10] << 16)
6308 | ((uint32_t)compressed
[11] << 24));
6309 if (unlikely (computed_crc
!= stream_crc
))
6311 elf_uncompress_failed ();
6315 /* Now that we've parsed the header, parse the footer, so that we
6316 can get the uncompressed size. */
6318 /* The footer ends with two magic bytes. */
6320 offset
= compressed_size
;
6321 if (unlikely (memcmp (compressed
+ offset
- 2, "YZ", 2) != 0))
6323 elf_uncompress_failed ();
6328 /* Before that are the stream flags, which should be the same as the
6329 flags in the header. */
6330 if (unlikely (compressed
[offset
- 2] != 0
6331 || compressed
[offset
- 1] != check
))
6333 elf_uncompress_failed ();
6338 /* Before that is the size of the index field, which precedes the
6340 index_size
= (compressed
[offset
- 4]
6341 | (compressed
[offset
- 3] << 8)
6342 | (compressed
[offset
- 2] << 16)
6343 | (compressed
[offset
- 1] << 24));
6344 index_size
= (index_size
+ 1) * 4;
6347 /* Before that is a footer CRC. */
6348 computed_crc
= elf_crc32 (0, compressed
+ offset
, 6);
6349 stream_crc
= ((uint32_t)compressed
[offset
- 4]
6350 | ((uint32_t)compressed
[offset
- 3] << 8)
6351 | ((uint32_t)compressed
[offset
- 2] << 16)
6352 | ((uint32_t)compressed
[offset
- 1] << 24));
6353 if (unlikely (computed_crc
!= stream_crc
))
6355 elf_uncompress_failed ();
6360 /* The index comes just before the footer. */
6361 if (unlikely (offset
< index_size
+ header_size
))
6363 elf_uncompress_failed ();
6367 footer_offset
= offset
;
6368 offset
-= index_size
;
6369 index_offset
= offset
;
6371 /* The index starts with a zero byte. */
6372 if (unlikely (compressed
[offset
] != 0))
6374 elf_uncompress_failed ();
6379 /* Next is the number of blocks. We expect zero blocks for an empty
6380 stream, and otherwise a single block. */
6381 if (unlikely (compressed
[offset
] == 0))
6383 *uncompressed
= NULL
;
6384 *uncompressed_size
= 0;
6387 if (unlikely (compressed
[offset
] != 1))
6389 elf_uncompress_failed ();
6394 /* Next is the compressed size and the uncompressed size. */
6395 if (!elf_lzma_varint (compressed
, compressed_size
, &offset
,
6396 &index_compressed_size
))
6398 if (!elf_lzma_varint (compressed
, compressed_size
, &offset
,
6399 &index_uncompressed_size
))
6402 /* Pad to a four byte boundary. */
6403 offset
= (offset
+ 3) &~ (size_t) 3;
6405 /* Next is a CRC of the index. */
6406 computed_crc
= elf_crc32 (0, compressed
+ index_offset
,
6407 offset
- index_offset
);
6408 stream_crc
= ((uint32_t)compressed
[offset
]
6409 | ((uint32_t)compressed
[offset
+ 1] << 8)
6410 | ((uint32_t)compressed
[offset
+ 2] << 16)
6411 | ((uint32_t)compressed
[offset
+ 3] << 24));
6412 if (unlikely (computed_crc
!= stream_crc
))
6414 elf_uncompress_failed ();
6419 /* We should now be back at the footer. */
6420 if (unlikely (offset
!= footer_offset
))
6422 elf_uncompress_failed ();
6426 /* Allocate space to hold the uncompressed data. If we succeed in
6427 uncompressing the LZMA data, we never free this memory. */
6428 mem
= (unsigned char *) backtrace_alloc (state
, index_uncompressed_size
,
6429 error_callback
, data
);
6430 if (unlikely (mem
== NULL
))
6432 *uncompressed
= mem
;
6433 *uncompressed_size
= index_uncompressed_size
;
6435 /* Allocate space for probabilities. */
6436 probs
= ((uint16_t *)
6437 backtrace_alloc (state
,
6438 LZMA_PROB_TOTAL_COUNT
* sizeof (uint16_t),
6439 error_callback
, data
));
6440 if (unlikely (probs
== NULL
))
6442 backtrace_free (state
, mem
, index_uncompressed_size
, error_callback
,
6447 /* Uncompress the block, which follows the header. */
6449 if (!elf_uncompress_lzma_block (compressed
, compressed_size
, check
, probs
,
6450 mem
, index_uncompressed_size
, &offset
))
6452 backtrace_free (state
, mem
, index_uncompressed_size
, error_callback
,
6457 compressed_block_size
= offset
- 12;
6458 if (unlikely (compressed_block_size
6459 != ((index_compressed_size
+ 3) &~ (size_t) 3)))
6461 elf_uncompress_failed ();
6462 backtrace_free (state
, mem
, index_uncompressed_size
, error_callback
,
6467 offset
= (offset
+ 3) &~ (size_t) 3;
6468 if (unlikely (offset
!= index_offset
))
6470 elf_uncompress_failed ();
6471 backtrace_free (state
, mem
, index_uncompressed_size
, error_callback
,
6479 /* This function is a hook for testing the LZMA support. It is only
6483 backtrace_uncompress_lzma (struct backtrace_state
*state
,
6484 const unsigned char *compressed
,
6485 size_t compressed_size
,
6486 backtrace_error_callback error_callback
,
6487 void *data
, unsigned char **uncompressed
,
6488 size_t *uncompressed_size
)
6490 return elf_uncompress_lzma (state
, compressed
, compressed_size
,
6491 error_callback
, data
, uncompressed
,
6495 /* Add the backtrace data for one ELF file. Returns 1 on success,
6496 0 on failure (in both cases descriptor is closed) or -1 if exe
6497 is non-zero and the ELF file is ET_DYN, which tells the caller that
6498 elf_add will need to be called on the descriptor again after
6499 base_address is determined. */
6502 elf_add (struct backtrace_state
*state
, const char *filename
, int descriptor
,
6503 const unsigned char *memory
, size_t memory_size
,
6504 uintptr_t base_address
, backtrace_error_callback error_callback
,
6505 void *data
, fileline
*fileline_fn
, int *found_sym
, int *found_dwarf
,
6506 struct dwarf_data
**fileline_entry
, int exe
, int debuginfo
,
6507 const char *with_buildid_data
, uint32_t with_buildid_size
)
6509 struct elf_view ehdr_view
;
6513 unsigned int shstrndx
;
6514 struct elf_view shdrs_view
;
6515 int shdrs_view_valid
;
6516 const b_elf_shdr
*shdrs
;
6517 const b_elf_shdr
*shstrhdr
;
6520 struct elf_view names_view
;
6521 int names_view_valid
;
6523 unsigned int symtab_shndx
;
6524 unsigned int dynsym_shndx
;
6526 struct debug_section_info sections
[DEBUG_MAX
];
6527 struct debug_section_info zsections
[DEBUG_MAX
];
6528 struct elf_view symtab_view
;
6529 int symtab_view_valid
;
6530 struct elf_view strtab_view
;
6531 int strtab_view_valid
;
6532 struct elf_view buildid_view
;
6533 int buildid_view_valid
;
6534 const char *buildid_data
;
6535 uint32_t buildid_size
;
6536 struct elf_view debuglink_view
;
6537 int debuglink_view_valid
;
6538 const char *debuglink_name
;
6539 uint32_t debuglink_crc
;
6540 struct elf_view debugaltlink_view
;
6541 int debugaltlink_view_valid
;
6542 const char *debugaltlink_name
;
6543 const char *debugaltlink_buildid_data
;
6544 uint32_t debugaltlink_buildid_size
;
6545 struct elf_view gnu_debugdata_view
;
6546 int gnu_debugdata_view_valid
;
6547 size_t gnu_debugdata_size
;
6548 unsigned char *gnu_debugdata_uncompressed
;
6549 size_t gnu_debugdata_uncompressed_size
;
6553 struct elf_view debug_view
;
6554 int debug_view_valid
;
6555 unsigned int using_debug_view
;
6556 uint16_t *zdebug_table
;
6557 struct elf_view split_debug_view
[DEBUG_MAX
];
6558 unsigned char split_debug_view_valid
[DEBUG_MAX
];
6559 struct elf_ppc64_opd_data opd_data
, *opd
;
6560 struct dwarf_sections dwarf_sections
;
6568 shdrs_view_valid
= 0;
6569 names_view_valid
= 0;
6570 symtab_view_valid
= 0;
6571 strtab_view_valid
= 0;
6572 buildid_view_valid
= 0;
6573 buildid_data
= NULL
;
6575 debuglink_view_valid
= 0;
6576 debuglink_name
= NULL
;
6578 debugaltlink_view_valid
= 0;
6579 debugaltlink_name
= NULL
;
6580 debugaltlink_buildid_data
= NULL
;
6581 debugaltlink_buildid_size
= 0;
6582 gnu_debugdata_view_valid
= 0;
6583 gnu_debugdata_size
= 0;
6584 debug_view_valid
= 0;
6585 memset (&split_debug_view_valid
[0], 0, sizeof split_debug_view_valid
);
6588 if (!elf_get_view (state
, descriptor
, memory
, memory_size
, 0, sizeof ehdr
,
6589 error_callback
, data
, &ehdr_view
))
6592 memcpy (&ehdr
, ehdr_view
.view
.data
, sizeof ehdr
);
6594 elf_release_view (state
, &ehdr_view
, error_callback
, data
);
6596 if (ehdr
.e_ident
[EI_MAG0
] != ELFMAG0
6597 || ehdr
.e_ident
[EI_MAG1
] != ELFMAG1
6598 || ehdr
.e_ident
[EI_MAG2
] != ELFMAG2
6599 || ehdr
.e_ident
[EI_MAG3
] != ELFMAG3
)
6601 error_callback (data
, "executable file is not ELF", 0);
6604 if (ehdr
.e_ident
[EI_VERSION
] != EV_CURRENT
)
6606 error_callback (data
, "executable file is unrecognized ELF version", 0);
6610 #if BACKTRACE_ELF_SIZE == 32
6611 #define BACKTRACE_ELFCLASS ELFCLASS32
6613 #define BACKTRACE_ELFCLASS ELFCLASS64
6616 if (ehdr
.e_ident
[EI_CLASS
] != BACKTRACE_ELFCLASS
)
6618 error_callback (data
, "executable file is unexpected ELF class", 0);
6622 if (ehdr
.e_ident
[EI_DATA
] != ELFDATA2LSB
6623 && ehdr
.e_ident
[EI_DATA
] != ELFDATA2MSB
)
6625 error_callback (data
, "executable file has unknown endianness", 0);
6629 /* If the executable is ET_DYN, it is either a PIE, or we are running
6630 directly a shared library with .interp. We need to wait for
6631 dl_iterate_phdr in that case to determine the actual base_address. */
6632 if (exe
&& ehdr
.e_type
== ET_DYN
)
6635 shoff
= ehdr
.e_shoff
;
6636 shnum
= ehdr
.e_shnum
;
6637 shstrndx
= ehdr
.e_shstrndx
;
6639 if ((shnum
== 0 || shstrndx
== SHN_XINDEX
)
6642 struct elf_view shdr_view
;
6643 const b_elf_shdr
*shdr
;
6645 if (!elf_get_view (state
, descriptor
, memory
, memory_size
, shoff
,
6646 sizeof shdr
, error_callback
, data
, &shdr_view
))
6649 shdr
= (const b_elf_shdr
*) shdr_view
.view
.data
;
6652 shnum
= shdr
->sh_size
;
6654 if (shstrndx
== SHN_XINDEX
)
6656 shstrndx
= shdr
->sh_link
;
6658 /* Versions of the GNU binutils between 2.12 and 2.18 did
6659 not handle objects with more than SHN_LORESERVE sections
6660 correctly. All large section indexes were offset by
6661 0x100. There is more information at
6662 http://sourceware.org/bugzilla/show_bug.cgi?id-5900 .
6663 Fortunately these object files are easy to detect, as the
6664 GNU binutils always put the section header string table
6665 near the end of the list of sections. Thus if the
6666 section header string table index is larger than the
6667 number of sections, then we know we have to subtract
6668 0x100 to get the real section index. */
6669 if (shstrndx
>= shnum
&& shstrndx
>= SHN_LORESERVE
+ 0x100)
6673 elf_release_view (state
, &shdr_view
, error_callback
, data
);
6676 if (shnum
== 0 || shstrndx
== 0)
6679 /* To translate PC to file/line when using DWARF, we need to find
6680 the .debug_info and .debug_line sections. */
6682 /* Read the section headers, skipping the first one. */
6684 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
6685 shoff
+ sizeof (b_elf_shdr
),
6686 (shnum
- 1) * sizeof (b_elf_shdr
),
6687 error_callback
, data
, &shdrs_view
))
6689 shdrs_view_valid
= 1;
6690 shdrs
= (const b_elf_shdr
*) shdrs_view
.view
.data
;
6692 /* Read the section names. */
6694 shstrhdr
= &shdrs
[shstrndx
- 1];
6695 shstr_size
= shstrhdr
->sh_size
;
6696 shstr_off
= shstrhdr
->sh_offset
;
6698 if (!elf_get_view (state
, descriptor
, memory
, memory_size
, shstr_off
,
6699 shstrhdr
->sh_size
, error_callback
, data
, &names_view
))
6701 names_view_valid
= 1;
6702 names
= (const char *) names_view
.view
.data
;
6707 memset (sections
, 0, sizeof sections
);
6708 memset (zsections
, 0, sizeof zsections
);
6710 /* Look for the symbol table. */
6711 for (i
= 1; i
< shnum
; ++i
)
6713 const b_elf_shdr
*shdr
;
6714 unsigned int sh_name
;
6718 shdr
= &shdrs
[i
- 1];
6720 if (shdr
->sh_type
== SHT_SYMTAB
)
6722 else if (shdr
->sh_type
== SHT_DYNSYM
)
6725 sh_name
= shdr
->sh_name
;
6726 if (sh_name
>= shstr_size
)
6728 error_callback (data
, "ELF section name out of range", 0);
6732 name
= names
+ sh_name
;
6734 for (j
= 0; j
< (int) DEBUG_MAX
; ++j
)
6736 if (strcmp (name
, dwarf_section_names
[j
]) == 0)
6738 sections
[j
].offset
= shdr
->sh_offset
;
6739 sections
[j
].size
= shdr
->sh_size
;
6740 sections
[j
].compressed
= (shdr
->sh_flags
& SHF_COMPRESSED
) != 0;
6745 if (name
[0] == '.' && name
[1] == 'z')
6747 for (j
= 0; j
< (int) DEBUG_MAX
; ++j
)
6749 if (strcmp (name
+ 2, dwarf_section_names
[j
] + 1) == 0)
6751 zsections
[j
].offset
= shdr
->sh_offset
;
6752 zsections
[j
].size
= shdr
->sh_size
;
6758 /* Read the build ID if present. This could check for any
6759 SHT_NOTE section with the right note name and type, but gdb
6760 looks for a specific section name. */
6761 if ((!debuginfo
|| with_buildid_data
!= NULL
)
6762 && !buildid_view_valid
6763 && strcmp (name
, ".note.gnu.build-id") == 0)
6765 const b_elf_note
*note
;
6767 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
6768 shdr
->sh_offset
, shdr
->sh_size
, error_callback
,
6769 data
, &buildid_view
))
6772 buildid_view_valid
= 1;
6773 note
= (const b_elf_note
*) buildid_view
.view
.data
;
6774 if (note
->type
== NT_GNU_BUILD_ID
6775 && note
->namesz
== 4
6776 && strncmp (note
->name
, "GNU", 4) == 0
6777 && shdr
->sh_size
<= 12 + ((note
->namesz
+ 3) & ~ 3) + note
->descsz
)
6779 buildid_data
= ¬e
->name
[0] + ((note
->namesz
+ 3) & ~ 3);
6780 buildid_size
= note
->descsz
;
6783 if (with_buildid_size
!= 0)
6785 if (buildid_size
!= with_buildid_size
)
6788 if (memcmp (buildid_data
, with_buildid_data
, buildid_size
) != 0)
6793 /* Read the debuglink file if present. */
6795 && !debuglink_view_valid
6796 && strcmp (name
, ".gnu_debuglink") == 0)
6798 const char *debuglink_data
;
6801 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
6802 shdr
->sh_offset
, shdr
->sh_size
, error_callback
,
6803 data
, &debuglink_view
))
6806 debuglink_view_valid
= 1;
6807 debuglink_data
= (const char *) debuglink_view
.view
.data
;
6808 crc_offset
= strnlen (debuglink_data
, shdr
->sh_size
);
6809 crc_offset
= (crc_offset
+ 3) & ~3;
6810 if (crc_offset
+ 4 <= shdr
->sh_size
)
6812 debuglink_name
= debuglink_data
;
6813 debuglink_crc
= *(const uint32_t*)(debuglink_data
+ crc_offset
);
6817 if (!debugaltlink_view_valid
6818 && strcmp (name
, ".gnu_debugaltlink") == 0)
6820 const char *debugaltlink_data
;
6821 size_t debugaltlink_name_len
;
6823 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
6824 shdr
->sh_offset
, shdr
->sh_size
, error_callback
,
6825 data
, &debugaltlink_view
))
6828 debugaltlink_view_valid
= 1;
6829 debugaltlink_data
= (const char *) debugaltlink_view
.view
.data
;
6830 debugaltlink_name
= debugaltlink_data
;
6831 debugaltlink_name_len
= strnlen (debugaltlink_data
, shdr
->sh_size
);
6832 if (debugaltlink_name_len
< shdr
->sh_size
)
6834 /* Include terminating zero. */
6835 debugaltlink_name_len
+= 1;
6837 debugaltlink_buildid_data
6838 = debugaltlink_data
+ debugaltlink_name_len
;
6839 debugaltlink_buildid_size
= shdr
->sh_size
- debugaltlink_name_len
;
6843 if (!gnu_debugdata_view_valid
6844 && strcmp (name
, ".gnu_debugdata") == 0)
6846 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
6847 shdr
->sh_offset
, shdr
->sh_size
, error_callback
,
6848 data
, &gnu_debugdata_view
))
6851 gnu_debugdata_size
= shdr
->sh_size
;
6852 gnu_debugdata_view_valid
= 1;
6855 /* Read the .opd section on PowerPC64 ELFv1. */
6856 if (ehdr
.e_machine
== EM_PPC64
6857 && (ehdr
.e_flags
& EF_PPC64_ABI
) < 2
6858 && shdr
->sh_type
== SHT_PROGBITS
6859 && strcmp (name
, ".opd") == 0)
6861 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
6862 shdr
->sh_offset
, shdr
->sh_size
, error_callback
,
6863 data
, &opd_data
.view
))
6867 opd
->addr
= shdr
->sh_addr
;
6868 opd
->data
= (const char *) opd_data
.view
.view
.data
;
6869 opd
->size
= shdr
->sh_size
;
6873 if (symtab_shndx
== 0)
6874 symtab_shndx
= dynsym_shndx
;
6875 if (symtab_shndx
!= 0 && !debuginfo
)
6877 const b_elf_shdr
*symtab_shdr
;
6878 unsigned int strtab_shndx
;
6879 const b_elf_shdr
*strtab_shdr
;
6880 struct elf_syminfo_data
*sdata
;
6882 symtab_shdr
= &shdrs
[symtab_shndx
- 1];
6883 strtab_shndx
= symtab_shdr
->sh_link
;
6884 if (strtab_shndx
>= shnum
)
6886 error_callback (data
,
6887 "ELF symbol table strtab link out of range", 0);
6890 strtab_shdr
= &shdrs
[strtab_shndx
- 1];
6892 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
6893 symtab_shdr
->sh_offset
, symtab_shdr
->sh_size
,
6894 error_callback
, data
, &symtab_view
))
6896 symtab_view_valid
= 1;
6898 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
6899 strtab_shdr
->sh_offset
, strtab_shdr
->sh_size
,
6900 error_callback
, data
, &strtab_view
))
6902 strtab_view_valid
= 1;
6904 sdata
= ((struct elf_syminfo_data
*)
6905 backtrace_alloc (state
, sizeof *sdata
, error_callback
, data
));
6909 if (!elf_initialize_syminfo (state
, base_address
,
6910 symtab_view
.view
.data
, symtab_shdr
->sh_size
,
6911 strtab_view
.view
.data
, strtab_shdr
->sh_size
,
6912 error_callback
, data
, sdata
, opd
))
6914 backtrace_free (state
, sdata
, sizeof *sdata
, error_callback
, data
);
6918 /* We no longer need the symbol table, but we hold on to the
6919 string table permanently. */
6920 elf_release_view (state
, &symtab_view
, error_callback
, data
);
6921 symtab_view_valid
= 0;
6922 strtab_view_valid
= 0;
6926 elf_add_syminfo_data (state
, sdata
);
6929 elf_release_view (state
, &shdrs_view
, error_callback
, data
);
6930 shdrs_view_valid
= 0;
6931 elf_release_view (state
, &names_view
, error_callback
, data
);
6932 names_view_valid
= 0;
6934 /* If the debug info is in a separate file, read that one instead. */
6936 if (buildid_data
!= NULL
)
6940 d
= elf_open_debugfile_by_buildid (state
, buildid_data
, buildid_size
,
6941 error_callback
, data
);
6946 elf_release_view (state
, &buildid_view
, error_callback
, data
);
6947 if (debuglink_view_valid
)
6948 elf_release_view (state
, &debuglink_view
, error_callback
, data
);
6949 if (debugaltlink_view_valid
)
6950 elf_release_view (state
, &debugaltlink_view
, error_callback
, data
);
6951 ret
= elf_add (state
, "", d
, NULL
, 0, base_address
, error_callback
,
6952 data
, fileline_fn
, found_sym
, found_dwarf
, NULL
, 0,
6955 backtrace_close (d
, error_callback
, data
);
6956 else if (descriptor
>= 0)
6957 backtrace_close (descriptor
, error_callback
, data
);
6962 if (buildid_view_valid
)
6964 elf_release_view (state
, &buildid_view
, error_callback
, data
);
6965 buildid_view_valid
= 0;
6970 elf_release_view (state
, &opd
->view
, error_callback
, data
);
6974 if (debuglink_name
!= NULL
)
6978 d
= elf_open_debugfile_by_debuglink (state
, filename
, debuglink_name
,
6979 debuglink_crc
, error_callback
,
6985 elf_release_view (state
, &debuglink_view
, error_callback
, data
);
6986 if (debugaltlink_view_valid
)
6987 elf_release_view (state
, &debugaltlink_view
, error_callback
, data
);
6988 ret
= elf_add (state
, "", d
, NULL
, 0, base_address
, error_callback
,
6989 data
, fileline_fn
, found_sym
, found_dwarf
, NULL
, 0,
6992 backtrace_close (d
, error_callback
, data
);
6993 else if (descriptor
>= 0)
6994 backtrace_close(descriptor
, error_callback
, data
);
6999 if (debuglink_view_valid
)
7001 elf_release_view (state
, &debuglink_view
, error_callback
, data
);
7002 debuglink_view_valid
= 0;
7005 struct dwarf_data
*fileline_altlink
= NULL
;
7006 if (debugaltlink_name
!= NULL
)
7010 d
= elf_open_debugfile_by_debuglink (state
, filename
, debugaltlink_name
,
7011 0, error_callback
, data
);
7016 ret
= elf_add (state
, filename
, d
, NULL
, 0, base_address
,
7017 error_callback
, data
, fileline_fn
, found_sym
,
7018 found_dwarf
, &fileline_altlink
, 0, 1,
7019 debugaltlink_buildid_data
, debugaltlink_buildid_size
);
7020 elf_release_view (state
, &debugaltlink_view
, error_callback
, data
);
7021 debugaltlink_view_valid
= 0;
7024 backtrace_close (d
, error_callback
, data
);
7030 if (debugaltlink_view_valid
)
7032 elf_release_view (state
, &debugaltlink_view
, error_callback
, data
);
7033 debugaltlink_view_valid
= 0;
7036 if (gnu_debugdata_view_valid
)
7040 ret
= elf_uncompress_lzma (state
,
7041 ((const unsigned char *)
7042 gnu_debugdata_view
.view
.data
),
7043 gnu_debugdata_size
, error_callback
, data
,
7044 &gnu_debugdata_uncompressed
,
7045 &gnu_debugdata_uncompressed_size
);
7047 elf_release_view (state
, &gnu_debugdata_view
, error_callback
, data
);
7048 gnu_debugdata_view_valid
= 0;
7052 ret
= elf_add (state
, filename
, -1, gnu_debugdata_uncompressed
,
7053 gnu_debugdata_uncompressed_size
, base_address
,
7054 error_callback
, data
, fileline_fn
, found_sym
,
7055 found_dwarf
, NULL
, 0, 0, NULL
, 0);
7056 if (ret
>= 0 && descriptor
>= 0)
7057 backtrace_close(descriptor
, error_callback
, data
);
7062 /* Read all the debug sections in a single view, since they are
7063 probably adjacent in the file. If any of sections are
7064 uncompressed, we never release this view. */
7069 for (i
= 0; i
< (int) DEBUG_MAX
; ++i
)
7073 if (sections
[i
].size
!= 0)
7075 if (min_offset
== 0 || sections
[i
].offset
< min_offset
)
7076 min_offset
= sections
[i
].offset
;
7077 end
= sections
[i
].offset
+ sections
[i
].size
;
7078 if (end
> max_offset
)
7080 debug_size
+= sections
[i
].size
;
7082 if (zsections
[i
].size
!= 0)
7084 if (min_offset
== 0 || zsections
[i
].offset
< min_offset
)
7085 min_offset
= zsections
[i
].offset
;
7086 end
= zsections
[i
].offset
+ zsections
[i
].size
;
7087 if (end
> max_offset
)
7089 debug_size
+= zsections
[i
].size
;
7092 if (min_offset
== 0 || max_offset
== 0)
7094 if (descriptor
>= 0)
7096 if (!backtrace_close (descriptor
, error_callback
, data
))
7102 /* If the total debug section size is large, assume that there are
7103 gaps between the sections, and read them individually. */
7105 if (max_offset
- min_offset
< 0x20000000
7106 || max_offset
- min_offset
< debug_size
+ 0x10000)
7108 if (!elf_get_view (state
, descriptor
, memory
, memory_size
, min_offset
,
7109 max_offset
- min_offset
, error_callback
, data
,
7112 debug_view_valid
= 1;
7116 memset (&split_debug_view
[0], 0, sizeof split_debug_view
);
7117 for (i
= 0; i
< (int) DEBUG_MAX
; ++i
)
7119 struct debug_section_info
*dsec
;
7121 if (sections
[i
].size
!= 0)
7122 dsec
= §ions
[i
];
7123 else if (zsections
[i
].size
!= 0)
7124 dsec
= &zsections
[i
];
7128 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
7129 dsec
->offset
, dsec
->size
, error_callback
, data
,
7130 &split_debug_view
[i
]))
7132 split_debug_view_valid
[i
] = 1;
7134 if (sections
[i
].size
!= 0)
7135 sections
[i
].data
= ((const unsigned char *)
7136 split_debug_view
[i
].view
.data
);
7138 zsections
[i
].data
= ((const unsigned char *)
7139 split_debug_view
[i
].view
.data
);
7143 /* We've read all we need from the executable. */
7144 if (descriptor
>= 0)
7146 if (!backtrace_close (descriptor
, error_callback
, data
))
7151 using_debug_view
= 0;
7152 if (debug_view_valid
)
7154 for (i
= 0; i
< (int) DEBUG_MAX
; ++i
)
7156 if (sections
[i
].size
== 0)
7157 sections
[i
].data
= NULL
;
7160 sections
[i
].data
= ((const unsigned char *) debug_view
.view
.data
7161 + (sections
[i
].offset
- min_offset
));
7165 if (zsections
[i
].size
== 0)
7166 zsections
[i
].data
= NULL
;
7168 zsections
[i
].data
= ((const unsigned char *) debug_view
.view
.data
7169 + (zsections
[i
].offset
- min_offset
));
7173 /* Uncompress the old format (--compress-debug-sections=zlib-gnu). */
7175 zdebug_table
= NULL
;
7176 for (i
= 0; i
< (int) DEBUG_MAX
; ++i
)
7178 if (sections
[i
].size
== 0 && zsections
[i
].size
> 0)
7180 unsigned char *uncompressed_data
;
7181 size_t uncompressed_size
;
7183 if (zdebug_table
== NULL
)
7185 zdebug_table
= ((uint16_t *)
7186 backtrace_alloc (state
, ZLIB_TABLE_SIZE
,
7187 error_callback
, data
));
7188 if (zdebug_table
== NULL
)
7192 uncompressed_data
= NULL
;
7193 uncompressed_size
= 0;
7194 if (!elf_uncompress_zdebug (state
, zsections
[i
].data
,
7195 zsections
[i
].size
, zdebug_table
,
7196 error_callback
, data
,
7197 &uncompressed_data
, &uncompressed_size
))
7199 sections
[i
].data
= uncompressed_data
;
7200 sections
[i
].size
= uncompressed_size
;
7201 sections
[i
].compressed
= 0;
7203 if (split_debug_view_valid
[i
])
7205 elf_release_view (state
, &split_debug_view
[i
],
7206 error_callback
, data
);
7207 split_debug_view_valid
[i
] = 0;
7212 if (zdebug_table
!= NULL
)
7214 backtrace_free (state
, zdebug_table
, ZLIB_TABLE_SIZE
,
7215 error_callback
, data
);
7216 zdebug_table
= NULL
;
7219 /* Uncompress the official ELF format
7220 (--compress-debug-sections=zlib-gabi, --compress-debug-sections=zstd). */
7221 for (i
= 0; i
< (int) DEBUG_MAX
; ++i
)
7223 unsigned char *uncompressed_data
;
7224 size_t uncompressed_size
;
7226 if (sections
[i
].size
== 0 || !sections
[i
].compressed
)
7229 if (zdebug_table
== NULL
)
7231 zdebug_table
= ((uint16_t *)
7232 backtrace_alloc (state
, ZDEBUG_TABLE_SIZE
,
7233 error_callback
, data
));
7234 if (zdebug_table
== NULL
)
7238 uncompressed_data
= NULL
;
7239 uncompressed_size
= 0;
7240 if (!elf_uncompress_chdr (state
, sections
[i
].data
, sections
[i
].size
,
7241 zdebug_table
, error_callback
, data
,
7242 &uncompressed_data
, &uncompressed_size
))
7244 sections
[i
].data
= uncompressed_data
;
7245 sections
[i
].size
= uncompressed_size
;
7246 sections
[i
].compressed
= 0;
7248 if (debug_view_valid
)
7250 else if (split_debug_view_valid
[i
])
7252 elf_release_view (state
, &split_debug_view
[i
], error_callback
, data
);
7253 split_debug_view_valid
[i
] = 0;
7257 if (zdebug_table
!= NULL
)
7258 backtrace_free (state
, zdebug_table
, ZDEBUG_TABLE_SIZE
,
7259 error_callback
, data
);
7261 if (debug_view_valid
&& using_debug_view
== 0)
7263 elf_release_view (state
, &debug_view
, error_callback
, data
);
7264 debug_view_valid
= 0;
7267 for (i
= 0; i
< (int) DEBUG_MAX
; ++i
)
7269 dwarf_sections
.data
[i
] = sections
[i
].data
;
7270 dwarf_sections
.size
[i
] = sections
[i
].size
;
7273 if (!backtrace_dwarf_add (state
, base_address
, &dwarf_sections
,
7274 ehdr
.e_ident
[EI_DATA
] == ELFDATA2MSB
,
7276 error_callback
, data
, fileline_fn
,
7285 if (shdrs_view_valid
)
7286 elf_release_view (state
, &shdrs_view
, error_callback
, data
);
7287 if (names_view_valid
)
7288 elf_release_view (state
, &names_view
, error_callback
, data
);
7289 if (symtab_view_valid
)
7290 elf_release_view (state
, &symtab_view
, error_callback
, data
);
7291 if (strtab_view_valid
)
7292 elf_release_view (state
, &strtab_view
, error_callback
, data
);
7293 if (debuglink_view_valid
)
7294 elf_release_view (state
, &debuglink_view
, error_callback
, data
);
7295 if (debugaltlink_view_valid
)
7296 elf_release_view (state
, &debugaltlink_view
, error_callback
, data
);
7297 if (gnu_debugdata_view_valid
)
7298 elf_release_view (state
, &gnu_debugdata_view
, error_callback
, data
);
7299 if (buildid_view_valid
)
7300 elf_release_view (state
, &buildid_view
, error_callback
, data
);
7301 if (debug_view_valid
)
7302 elf_release_view (state
, &debug_view
, error_callback
, data
);
7303 for (i
= 0; i
< (int) DEBUG_MAX
; ++i
)
7305 if (split_debug_view_valid
[i
])
7306 elf_release_view (state
, &split_debug_view
[i
], error_callback
, data
);
7309 elf_release_view (state
, &opd
->view
, error_callback
, data
);
7310 if (descriptor
>= 0)
7311 backtrace_close (descriptor
, error_callback
, data
);
7315 /* Data passed to phdr_callback. */
7319 struct backtrace_state
*state
;
7320 backtrace_error_callback error_callback
;
7322 fileline
*fileline_fn
;
7325 const char *exe_filename
;
7329 /* Callback passed to dl_iterate_phdr. Load debug info from shared
7334 __attribute__ ((__force_align_arg_pointer__
))
7336 phdr_callback (struct dl_phdr_info
*info
, size_t size ATTRIBUTE_UNUSED
,
7339 struct phdr_data
*pd
= (struct phdr_data
*) pdata
;
7340 const char *filename
;
7343 fileline elf_fileline_fn
;
7346 /* There is not much we can do if we don't have the module name,
7347 unless executable is ET_DYN, where we expect the very first
7348 phdr_callback to be for the PIE. */
7349 if (info
->dlpi_name
== NULL
|| info
->dlpi_name
[0] == '\0')
7351 if (pd
->exe_descriptor
== -1)
7353 filename
= pd
->exe_filename
;
7354 descriptor
= pd
->exe_descriptor
;
7355 pd
->exe_descriptor
= -1;
7359 if (pd
->exe_descriptor
!= -1)
7361 backtrace_close (pd
->exe_descriptor
, pd
->error_callback
, pd
->data
);
7362 pd
->exe_descriptor
= -1;
7365 filename
= info
->dlpi_name
;
7366 descriptor
= backtrace_open (info
->dlpi_name
, pd
->error_callback
,
7367 pd
->data
, &does_not_exist
);
7372 if (elf_add (pd
->state
, filename
, descriptor
, NULL
, 0, info
->dlpi_addr
,
7373 pd
->error_callback
, pd
->data
, &elf_fileline_fn
, pd
->found_sym
,
7374 &found_dwarf
, NULL
, 0, 0, NULL
, 0))
7378 *pd
->found_dwarf
= 1;
7379 *pd
->fileline_fn
= elf_fileline_fn
;
7386 /* Initialize the backtrace data we need from an ELF executable. At
7387 the ELF level, all we need to do is find the debug info
7391 backtrace_initialize (struct backtrace_state
*state
, const char *filename
,
7392 int descriptor
, backtrace_error_callback error_callback
,
7393 void *data
, fileline
*fileline_fn
)
7398 fileline elf_fileline_fn
= elf_nodebug
;
7399 struct phdr_data pd
;
7401 ret
= elf_add (state
, filename
, descriptor
, NULL
, 0, 0, error_callback
, data
,
7402 &elf_fileline_fn
, &found_sym
, &found_dwarf
, NULL
, 1, 0, NULL
,
7408 pd
.error_callback
= error_callback
;
7410 pd
.fileline_fn
= &elf_fileline_fn
;
7411 pd
.found_sym
= &found_sym
;
7412 pd
.found_dwarf
= &found_dwarf
;
7413 pd
.exe_filename
= filename
;
7414 pd
.exe_descriptor
= ret
< 0 ? descriptor
: -1;
7416 dl_iterate_phdr (phdr_callback
, (void *) &pd
);
7418 if (!state
->threaded
)
7421 state
->syminfo_fn
= elf_syminfo
;
7422 else if (state
->syminfo_fn
== NULL
)
7423 state
->syminfo_fn
= elf_nosyms
;
7428 backtrace_atomic_store_pointer (&state
->syminfo_fn
, elf_syminfo
);
7430 (void) __sync_bool_compare_and_swap (&state
->syminfo_fn
, NULL
,
7434 if (!state
->threaded
)
7435 *fileline_fn
= state
->fileline_fn
;
7437 *fileline_fn
= backtrace_atomic_load_pointer (&state
->fileline_fn
);
7439 if (*fileline_fn
== NULL
|| *fileline_fn
== elf_nodebug
)
7440 *fileline_fn
= elf_fileline_fn
;