Bug 439685 compiler warning in callgrind/main.c
[valgrind.git] / coregrind / m_debuginfo / storage.c
blobc3fa62e96c04d67aaa9ca39c0bde1c8f516133fe
2 /*--------------------------------------------------------------------*/
3 /*--- Format-neutral storage of and querying of info acquired from ---*/
4 /*--- ELF/XCOFF stabs/dwarf1/dwarf2/dwarf3 debug info. ---*/
5 /*--- storage.c ---*/
6 /*--------------------------------------------------------------------*/
8 /*
9 This file is part of Valgrind, a dynamic binary instrumentation
10 framework.
12 Copyright (C) 2000-2017 Julian Seward
13 jseward@acm.org
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, see <http://www.gnu.org/licenses/>.
28 The GNU General Public License is contained in the file COPYING.
31 /* This file manages the data structures built by the debuginfo
32 system. These are: the top level SegInfo list. For each SegInfo,
33 there are tables for address-to-symbol mappings,
34 address-to-src-file/line mappings, and address-to-CFI-info
35 mappings.
38 #include "pub_core_basics.h"
39 #include "pub_core_options.h" /* VG_(clo_verbosity) */
40 #include "pub_core_debuginfo.h"
41 #include "pub_core_debuglog.h"
42 #include "pub_core_libcassert.h"
43 #include "pub_core_libcbase.h"
44 #include "pub_core_libcprint.h"
45 #include "pub_core_xarray.h"
46 #include "pub_core_oset.h"
47 #include "pub_core_deduppoolalloc.h"
49 #include "priv_misc.h" /* dinfo_zalloc/free/strdup */
50 #include "priv_image.h"
51 #include "priv_d3basics.h" /* ML_(pp_GX) */
52 #include "priv_tytypes.h"
53 #include "priv_storage.h" /* self */
56 /*------------------------------------------------------------*/
57 /*--- Misc (printing, errors) ---*/
58 /*------------------------------------------------------------*/
60 /* Show a non-fatal debug info reading error. Use VG_(core_panic) for
61 fatal errors. 'serious' errors are shown regardless of the
62 verbosity setting. */
63 void ML_(symerr) ( const DebugInfo* di, Bool serious, const HChar* msg )
65 /* XML mode hides everything :-( */
66 if (VG_(clo_xml))
67 return;
69 if (serious) {
71 VG_(message)(Vg_DebugMsg, "WARNING: Serious error when "
72 "reading debug info\n");
73 if (True || VG_(clo_verbosity) < 2) {
74 /* Need to show what the file name is, at verbosity levels 2
75 or below, since that won't already have been shown */
76 VG_(message)(Vg_DebugMsg,
77 "When reading debug info from %s:\n",
78 (di && di->fsm.filename) ? di->fsm.filename
79 : "???");
81 VG_(message)(Vg_DebugMsg, "%s\n", msg);
83 } else { /* !serious */
85 if (VG_(clo_verbosity) >= 2)
86 VG_(message)(Vg_DebugMsg, "%s\n", msg);
92 /* Print a symbol. */
93 void ML_(ppSym) ( Int idx, const DiSym* sym )
95 const HChar** sec_names = sym->sec_names;
96 vg_assert(sym->pri_name);
97 if (sec_names)
98 vg_assert(sec_names);
99 VG_(printf)( "%5d: %c%c%c %#8lx .. %#8lx (%u) %s%s",
100 idx,
101 sym->isText ? 'T' : '-',
102 sym->isIFunc ? 'I' : '-',
103 sym->isGlobal ? 'G' : '-',
104 sym->avmas.main,
105 sym->avmas.main + sym->size - 1, sym->size,
106 sym->pri_name, sec_names ? " " : "" );
107 if (sec_names) {
108 while (*sec_names) {
109 VG_(printf)("%s%s", *sec_names, *(sec_names+1) ? " " : "");
110 sec_names++;
113 VG_(printf)("\n");
116 /* Print a call-frame-info summary. */
117 void ML_(ppDiCfSI) ( const XArray* /* of CfiExpr */ exprs,
118 Addr base, UInt len,
119 const DiCfSI_m* si_m )
121 # define SHOW_HOW(_how, _off) \
122 do { \
123 if (_how == CFIR_UNKNOWN) { \
124 VG_(printf)("Unknown"); \
125 } else \
126 if (_how == CFIR_SAME) { \
127 VG_(printf)("Same"); \
128 } else \
129 if (_how == CFIR_CFAREL) { \
130 VG_(printf)("cfa+%d", _off); \
131 } else \
132 if (_how == CFIR_MEMCFAREL) { \
133 VG_(printf)("*(cfa+%d)", _off); \
134 } else \
135 if (_how == CFIR_EXPR) { \
136 VG_(printf)("{"); \
137 ML_(ppCfiExpr)(exprs, _off); \
138 VG_(printf)("}"); \
139 } else \
140 if (_how == CFIR_S390X_F0) { \
141 VG_(printf)("oldF0"); \
142 } else \
143 if (_how == CFIR_S390X_F1) { \
144 VG_(printf)("oldF1"); \
145 } else \
146 if (_how == CFIR_S390X_F2) { \
147 VG_(printf)("oldF2"); \
148 } else \
149 if (_how == CFIR_S390X_F3) { \
150 VG_(printf)("oldF3"); \
151 } else \
152 if (_how == CFIR_S390X_F4) { \
153 VG_(printf)("oldF4"); \
154 } else \
155 if (_how == CFIR_S390X_F5) { \
156 VG_(printf)("oldF5"); \
157 } else \
158 if (_how == CFIR_S390X_F6) { \
159 VG_(printf)("oldF6"); \
160 } else \
161 if (_how == CFIR_S390X_F7) { \
162 VG_(printf)("oldF7"); \
163 } else { \
164 vg_assert(0+0); \
166 } while (0)
168 if (base != 0 || len != 0)
169 VG_(printf)("[%#lx .. %#lx]: ", base, base + len - 1);
170 else
171 VG_(printf)("[]: ");
173 switch (si_m->cfa_how) {
174 case CFIC_IA_SPREL:
175 VG_(printf)("let cfa=oldSP+%d", si_m->cfa_off);
176 break;
177 case CFIC_IA_BPREL:
178 VG_(printf)("let cfa=oldBP+%d", si_m->cfa_off);
179 break;
180 case CFIC_ARM_R13REL:
181 VG_(printf)("let cfa=oldR13+%d", si_m->cfa_off);
182 break;
183 case CFIC_ARM_R12REL:
184 VG_(printf)("let cfa=oldR12+%d", si_m->cfa_off);
185 break;
186 case CFIC_ARM_R11REL:
187 VG_(printf)("let cfa=oldR11+%d", si_m->cfa_off);
188 break;
189 case CFIR_SAME:
190 VG_(printf)("let cfa=Same");
191 break;
192 case CFIC_ARM_R7REL:
193 VG_(printf)("let cfa=oldR7+%d", si_m->cfa_off);
194 break;
195 case CFIC_ARM64_SPREL:
196 VG_(printf)("let cfa=oldSP+%d", si_m->cfa_off);
197 break;
198 case CFIC_ARM64_X29REL:
199 VG_(printf)("let cfa=oldX29+%d", si_m->cfa_off);
200 break;
201 case CFIC_EXPR:
202 VG_(printf)("let cfa={");
203 ML_(ppCfiExpr)(exprs, si_m->cfa_off);
204 VG_(printf)("}");
205 break;
206 default:
207 vg_assert(0);
210 VG_(printf)(" in RA=");
211 SHOW_HOW(si_m->ra_how, si_m->ra_off);
212 # if defined(VGA_x86) || defined(VGA_amd64)
213 VG_(printf)(" SP=");
214 SHOW_HOW(si_m->sp_how, si_m->sp_off);
215 VG_(printf)(" BP=");
216 SHOW_HOW(si_m->bp_how, si_m->bp_off);
217 # elif defined(VGA_arm)
218 VG_(printf)(" R14=");
219 SHOW_HOW(si_m->r14_how, si_m->r14_off);
220 VG_(printf)(" R13=");
221 SHOW_HOW(si_m->r13_how, si_m->r13_off);
222 VG_(printf)(" R12=");
223 SHOW_HOW(si_m->r12_how, si_m->r12_off);
224 VG_(printf)(" R11=");
225 SHOW_HOW(si_m->r11_how, si_m->r11_off);
226 VG_(printf)(" R7=");
227 SHOW_HOW(si_m->r7_how, si_m->r7_off);
228 # elif defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_ppc64le)
229 /* nothing */
230 # elif defined(VGA_s390x)
231 VG_(printf)(" SP=");
232 SHOW_HOW(si_m->sp_how, si_m->sp_off);
233 VG_(printf)(" FP=");
234 SHOW_HOW(si_m->fp_how, si_m->fp_off);
235 VG_(printf)(" F0=");
236 SHOW_HOW(si_m->f0_how, si_m->f0_off);
237 VG_(printf)(" F1=");
238 SHOW_HOW(si_m->f1_how, si_m->f1_off);
239 VG_(printf)(" F2=");
240 SHOW_HOW(si_m->f2_how, si_m->f2_off);
241 VG_(printf)(" F3=");
242 SHOW_HOW(si_m->f3_how, si_m->f3_off);
243 VG_(printf)(" F4=");
244 SHOW_HOW(si_m->f4_how, si_m->f4_off);
245 VG_(printf)(" F5=");
246 SHOW_HOW(si_m->f5_how, si_m->f5_off);
247 VG_(printf)(" F6=");
248 SHOW_HOW(si_m->f6_how, si_m->f6_off);
249 VG_(printf)(" F7=");
250 SHOW_HOW(si_m->f7_how, si_m->f7_off);
251 # elif defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
252 VG_(printf)(" SP=");
253 SHOW_HOW(si_m->sp_how, si_m->sp_off);
254 VG_(printf)(" FP=");
255 SHOW_HOW(si_m->fp_how, si_m->fp_off);
256 # elif defined(VGA_arm64)
257 VG_(printf)(" SP=");
258 SHOW_HOW(si_m->sp_how, si_m->sp_off);
259 VG_(printf)(" X30=");
260 SHOW_HOW(si_m->x30_how, si_m->x30_off);
261 VG_(printf)(" X29=");
262 SHOW_HOW(si_m->x29_how, si_m->x29_off);
263 # else
264 # error "Unknown arch"
265 # endif
266 VG_(printf)("\n");
267 # undef SHOW_HOW
271 /*------------------------------------------------------------*/
272 /*--- Adding stuff ---*/
273 /*------------------------------------------------------------*/
275 /* If not yet in strpool, add a str to the string pool including terminating
276 zero.
277 Return the pointer to the string in strpool.
279 const HChar* ML_(addStr) ( DebugInfo* di, const HChar* str, Int len )
281 if (len == -1) {
282 len = VG_(strlen)(str);
283 } else {
284 vg_assert(len >= 0);
286 if (UNLIKELY(di->strpool == NULL))
287 di->strpool = VG_(newDedupPA)(SEGINFO_STRPOOLSIZE,
289 ML_(dinfo_zalloc),
290 "di.storage.addStr.1",
291 ML_(dinfo_free));
292 return VG_(allocEltDedupPA) (di->strpool, len+1, str);
295 UInt ML_(addFnDn) (struct _DebugInfo* di,
296 const HChar* filename,
297 const HChar* dirname)
299 FnDn fndn;
300 UInt fndn_ix;
302 if (UNLIKELY(di->fndnpool == NULL))
303 di->fndnpool = VG_(newDedupPA)(500,
304 vg_alignof(FnDn),
305 ML_(dinfo_zalloc),
306 "di.storage.addFnDn.1",
307 ML_(dinfo_free));
308 fndn.filename = ML_(addStr)(di, filename, -1);
309 fndn.dirname = dirname ? ML_(addStr)(di, dirname, -1) : NULL;
310 fndn_ix = VG_(allocFixedEltDedupPA) (di->fndnpool, sizeof(FnDn), &fndn);
311 return fndn_ix;
314 const HChar* ML_(fndn_ix2filename) (const DebugInfo* di,
315 UInt fndn_ix)
317 FnDn *fndn;
318 if (fndn_ix == 0)
319 return "???";
320 else {
321 fndn = VG_(indexEltNumber) (di->fndnpool, fndn_ix);
322 return fndn->filename;
326 const HChar* ML_(fndn_ix2dirname) (const DebugInfo* di,
327 UInt fndn_ix)
329 FnDn *fndn;
330 if (fndn_ix == 0)
331 return "";
332 else {
333 fndn = VG_(indexEltNumber) (di->fndnpool, fndn_ix);
334 if (fndn->dirname)
335 return fndn->dirname;
336 else
337 return "";
341 /* Add a string to the string table of a DebugInfo, by copying the
342 string from the given DiCursor. Measures the length of the string
343 itself. */
344 const HChar* ML_(addStrFromCursor)( DebugInfo* di, DiCursor c )
346 /* This is a less-than-stellar implementation, but it should
347 work. */
348 vg_assert(ML_(cur_is_valid)(c));
349 HChar* str = ML_(cur_read_strdup)(c, "di.addStrFromCursor.1");
350 const HChar* res = ML_(addStr)(di, str, -1);
351 ML_(dinfo_free)(str);
352 return res;
356 /* Add a symbol to the symbol table, by copying *sym. 'sym' may only
357 have one name, so there's no complexities to do with deep vs
358 shallow copying of the sec_name array. This is checked.
360 void ML_(addSym) ( struct _DebugInfo* di, DiSym* sym )
362 UInt new_sz, i;
363 DiSym* new_tab;
365 vg_assert(sym->pri_name != NULL);
366 vg_assert(sym->sec_names == NULL);
368 /* Ignore zero-sized syms. */
369 if (sym->size == 0) return;
371 if (di->symtab_used == di->symtab_size) {
372 new_sz = 2 * di->symtab_size;
373 if (new_sz == 0) new_sz = 500;
374 new_tab = ML_(dinfo_zalloc)( "di.storage.addSym.1",
375 new_sz * sizeof(DiSym) );
376 if (di->symtab != NULL) {
377 for (i = 0; i < di->symtab_used; i++)
378 new_tab[i] = di->symtab[i];
379 ML_(dinfo_free)(di->symtab);
381 di->symtab = new_tab;
382 di->symtab_size = new_sz;
385 di->symtab[di->symtab_used++] = *sym;
386 vg_assert(di->symtab_used <= di->symtab_size);
389 UInt ML_(fndn_ix) (const DebugInfo* di, Word locno)
391 UInt fndn_ix;
393 switch(di->sizeof_fndn_ix) {
394 case 1: fndn_ix = ((UChar*) di->loctab_fndn_ix)[locno]; break;
395 case 2: fndn_ix = ((UShort*) di->loctab_fndn_ix)[locno]; break;
396 case 4: fndn_ix = ((UInt*) di->loctab_fndn_ix)[locno]; break;
397 default: vg_assert(0);
399 return fndn_ix;
402 static inline void set_fndn_ix (struct _DebugInfo* di, Word locno, UInt fndn_ix)
404 Word i;
406 switch(di->sizeof_fndn_ix) {
407 case 1:
408 if (LIKELY (fndn_ix <= 255)) {
409 ((UChar*) di->loctab_fndn_ix)[locno] = fndn_ix;
410 return;
413 UChar* old = (UChar*) di->loctab_fndn_ix;
414 UShort* new = ML_(dinfo_zalloc)( "di.storage.sfix.1",
415 di->loctab_size * 2 );
416 for (i = 0; i < di->loctab_used; i++)
417 new[i] = old[i];
418 ML_(dinfo_free)(old);
419 di->sizeof_fndn_ix = 2;
420 di->loctab_fndn_ix = new;
422 // Fallthrough
424 case 2:
425 if (LIKELY (fndn_ix <= 65535)) {
426 ((UShort*) di->loctab_fndn_ix)[locno] = fndn_ix;
427 return;
430 UShort* old = (UShort*) di->loctab_fndn_ix;
431 UInt* new = ML_(dinfo_zalloc)( "di.storage.sfix.2",
432 di->loctab_size * 4 );
433 for (i = 0; i < di->loctab_used; i++)
434 new[i] = old[i];
435 ML_(dinfo_free)(old);
436 di->sizeof_fndn_ix = 4;
437 di->loctab_fndn_ix = new;
439 // Fallthrough
441 case 4:
442 ((UInt*) di->loctab_fndn_ix)[locno] = fndn_ix;
443 return;
445 default: vg_assert(0);
450 // Comment the below line to trace LOCTAB merging/canonicalising
451 #define TRACE_LOCTAB_CANON(msg,prev_loc,cur_loc)
452 #ifndef TRACE_LOCTAB_CANON
453 #define TRACE_LOCTAB_CANON(msg,prev_loc,cur_loc) \
454 VG_(printf)("%s previous: addr %#lx, size %d, line %d, " \
455 " current: addr %#lx, size %d, line %d.\n", \
456 msg, \
457 (prev_loc)->addr, (prev_loc)->size, (prev_loc)->lineno, \
458 (cur_loc)->addr, (cur_loc)->size, (cur_loc)->lineno);
459 #endif
461 /* Add a location to the location table.
463 static void addLoc ( struct _DebugInfo* di, DiLoc* loc, UInt fndn_ix )
465 /* Zero-sized locs should have been ignored earlier */
466 vg_assert(loc->size > 0);
468 /* Check if the last entry has adjacent range for the same line. */
469 if (di->loctab_used > 0) {
470 DiLoc *previous = &di->loctab[di->loctab_used - 1];
471 if ((previous->lineno == loc->lineno)
472 && (previous->addr + previous->size == loc->addr)) {
473 if (previous->size + loc->size <= MAX_LOC_SIZE) {
474 TRACE_LOCTAB_CANON ("addLoc merging", previous, loc);
475 previous->size += loc->size;
476 return;
477 } else {
478 TRACE_LOCTAB_CANON ("addLoc merging not done (maxsize)",
479 previous, loc);
484 if (di->loctab_used == di->loctab_size) {
485 UInt new_sz;
486 DiLoc* new_loctab;
487 void* new_loctab_fndn_ix;
489 new_sz = 2 * di->loctab_size;
490 if (new_sz == 0) new_sz = 500;
491 new_loctab = ML_(dinfo_zalloc)( "di.storage.addLoc.1",
492 new_sz * sizeof(DiLoc) );
493 if (di->sizeof_fndn_ix == 0)
494 di->sizeof_fndn_ix = 1; // To start with.
495 new_loctab_fndn_ix = ML_(dinfo_zalloc)( "di.storage.addLoc.2",
496 new_sz * di->sizeof_fndn_ix );
497 if (di->loctab != NULL) {
498 VG_(memcpy)(new_loctab, di->loctab,
499 di->loctab_used * sizeof(DiLoc));
500 VG_(memcpy)(new_loctab_fndn_ix, di->loctab_fndn_ix,
501 di->loctab_used * di->sizeof_fndn_ix);
502 ML_(dinfo_free)(di->loctab);
503 ML_(dinfo_free)(di->loctab_fndn_ix);
505 di->loctab = new_loctab;
506 di->loctab_fndn_ix = new_loctab_fndn_ix;
507 di->loctab_size = new_sz;
510 di->loctab[di->loctab_used] = *loc;
511 set_fndn_ix (di, di->loctab_used, fndn_ix);
512 di->loctab_used++;
513 vg_assert(di->loctab_used <= di->loctab_size);
517 /* Resize the LocTab (line number table) to save memory, by removing
518 (and, potentially, allowing m_mallocfree to unmap) any unused space
519 at the end of the table. */
520 static void shrinkLocTab ( struct _DebugInfo* di )
522 UWord new_sz = di->loctab_used;
523 if (new_sz == di->loctab_size) return;
524 vg_assert(new_sz < di->loctab_size);
525 ML_(dinfo_shrink_block)( di->loctab, new_sz * sizeof(DiLoc));
526 ML_(dinfo_shrink_block)( di->loctab_fndn_ix, new_sz * di->sizeof_fndn_ix);
527 di->loctab_size = new_sz;
530 // Complain once, unless VG_(debugLog_getLevel)() > 3
531 // showinfo is called if VG_(debugLog_getLevel)() >= 1
532 #define COMPLAIN_ONCE(what, limit, limit_op, showinfo) \
534 static Bool complained = False; \
535 if (!complained) { \
536 complained = VG_(debugLog_getLevel)() <= 3; \
537 VG_(message)(Vg_UserMsg, \
538 "warning: Can't handle " what " with " \
539 "line number %d " limit_op " than %d\n", \
540 lineno, limit); \
541 if (VG_(debugLog_getLevel)() >= 1) showinfo; \
542 if (complained) \
543 VG_(message)(Vg_UserMsg, \
544 "(Nb: this message is only shown once)\n"); \
549 /* Top-level place to call to add a source-location mapping entry.
551 void ML_(addLineInfo) ( struct _DebugInfo* di,
552 UInt fndn_ix,
553 Addr this,
554 Addr next,
555 Int lineno,
556 Int entry /* only needed for debug printing */
559 static const Bool debug = False;
560 DiLoc loc;
561 UWord size = next - this;
563 # define SHOWLINEINFO \
564 VG_(message)(Vg_DebugMsg, \
565 "addLoc: addr %#lx, size %lu, line %d, fndn_ix %u\n", \
566 this,size,lineno,fndn_ix)
568 /* Ignore zero-sized locs */
569 if (this == next) return;
571 if (debug) {
572 FnDn *fndn = VG_(indexEltNumber) (di->fndnpool, fndn_ix);
573 VG_(printf)( " src ix %u %s %s line %d %#lx-%#lx\n",
574 fndn_ix,
575 fndn->dirname ? fndn->dirname : "(unknown)",
576 fndn->filename, lineno, this, next );
579 /* Maximum sanity checking. Some versions of GNU as do a shabby
580 * job with stabs entries; if anything looks suspicious, revert to
581 * a size of 1. This should catch the instruction of interest
582 * (since if using asm-level debug info, one instruction will
583 * correspond to one line, unlike with C-level debug info where
584 * multiple instructions can map to the one line), but avoid
585 * catching any other instructions bogusly. */
586 if (this > next) {
587 if (VG_(clo_verbosity) > 2) {
588 VG_(message)(Vg_DebugMsg,
589 "warning: line info addresses out of order "
590 "at entry %d: 0x%lx 0x%lx\n", entry, this, next);
592 size = 1;
595 if (size > MAX_LOC_SIZE) {
596 if (0)
597 VG_(message)(Vg_DebugMsg,
598 "warning: line info address range too large "
599 "at entry %d: %lu\n", entry, size);
600 size = 1;
603 /* At this point, we know that the original value for |size|, viz
604 |next - this|, will only still be used in the case where
605 |this| <u |next|, so it can't have underflowed. Considering
606 that and the three checks that follow it, the following must
607 hold. */
608 vg_assert(size >= 1);
609 vg_assert(size <= MAX_LOC_SIZE);
611 /* Rule out ones which are completely outside the r-x mapped area.
612 See "Comment_Regarding_Text_Range_Checks" elsewhere in this file
613 for background and rationale. */
614 vg_assert(di->fsm.have_rx_map && di->fsm.rw_map_count);
615 if (ML_(find_rx_mapping)(di, this, this + size - 1) == NULL) {
616 if (0)
617 VG_(message)(Vg_DebugMsg,
618 "warning: ignoring line info entry falling "
619 "outside current DebugInfo: %#lx %#lx %#lx %#lx\n",
620 di->text_avma,
621 di->text_avma + di->text_size,
622 this, this + size - 1);
623 return;
626 if (lineno < 0) {
627 COMPLAIN_ONCE("line info entry", 0, "smaller", SHOWLINEINFO);
628 return;
630 if (lineno > MAX_LINENO) {
631 /* With --enable-lto, gcc 9 creates huge line numbers e.g. in the tool
632 => only complain with some debug level. */
633 if (VG_(debugLog_getLevel)() >= 1)
634 COMPLAIN_ONCE("line info entry", MAX_LINENO, "greater", SHOWLINEINFO);
635 return;
638 loc.addr = this;
639 loc.size = (UShort)size;
640 loc.lineno = lineno;
642 if (0) SHOWLINEINFO;
644 addLoc ( di, &loc, fndn_ix );
645 # undef SHOWLINEINFO
648 /* Add an inlined call info to the inlined call table.
650 static void addInl ( struct _DebugInfo* di, DiInlLoc* inl )
652 UInt new_sz, i;
653 DiInlLoc* new_tab;
655 /* empty inl should have been ignored earlier */
656 vg_assert(inl->addr_lo < inl->addr_hi);
658 if (di->inltab_used == di->inltab_size) {
659 new_sz = 2 * di->inltab_size;
660 if (new_sz == 0) new_sz = 500;
661 new_tab = ML_(dinfo_zalloc)( "di.storage.addInl.1",
662 new_sz * sizeof(DiInlLoc) );
663 if (di->inltab != NULL) {
664 for (i = 0; i < di->inltab_used; i++)
665 new_tab[i] = di->inltab[i];
666 ML_(dinfo_free)(di->inltab);
668 di->inltab = new_tab;
669 di->inltab_size = new_sz;
672 di->inltab[di->inltab_used] = *inl;
673 if (inl->addr_hi - inl->addr_lo > di->maxinl_codesz)
674 di->maxinl_codesz = inl->addr_hi - inl->addr_lo;
675 di->inltab_used++;
676 vg_assert(di->inltab_used <= di->inltab_size);
680 /* Resize the InlTab (inlined call table) to save memory, by removing
681 (and, potentially, allowing m_mallocfree to unmap) any unused space
682 at the end of the table. */
683 static void shrinkInlTab ( struct _DebugInfo* di )
685 UWord new_sz = di->inltab_used;
686 if (new_sz == di->inltab_size) return;
687 vg_assert(new_sz < di->inltab_size);
688 ML_(dinfo_shrink_block)( di->inltab, new_sz * sizeof(DiInlLoc));
689 di->inltab_size = new_sz;
692 /* Top-level place to call to add a addr-to-inlined fn info. */
693 void ML_(addInlInfo) ( struct _DebugInfo* di,
694 Addr addr_lo, Addr addr_hi,
695 const HChar* inlinedfn,
696 UInt fndn_ix,
697 Int lineno, UShort level)
699 DiInlLoc inl;
701 # define SHOWLINEINFO \
702 VG_(message) (Vg_DebugMsg, \
703 "addInlInfo: fn %s inlined as addr_lo %#lx,addr_hi %#lx," \
704 "caller fndn_ix %u %s:%d\n", \
705 inlinedfn, addr_lo, addr_hi, fndn_ix, \
706 ML_(fndn_ix2filename) (di, fndn_ix), lineno)
708 /* Similar paranoia as in ML_(addLineInfo). Unclear if needed. */
709 if (addr_lo >= addr_hi) {
710 if (VG_(clo_verbosity) > 2) {
711 VG_(message)(Vg_DebugMsg,
712 "warning: inlined info addresses out of order "
713 "at: 0x%lx 0x%lx\n", addr_lo, addr_hi);
714 SHOWLINEINFO;
716 addr_hi = addr_lo + 1;
719 if (lineno < 0) {
720 COMPLAIN_ONCE ("inlined call info entry", 0, "smaller", SHOWLINEINFO);
721 return;
723 if (lineno > MAX_LINENO) {
724 /* With --enable-lto, gcc 9 creates huge line numbers e.g. in the tool
725 => only complain with some debug level. */
726 if (VG_(debugLog_getLevel)() >= 1)
727 COMPLAIN_ONCE ("inlined call info entry", MAX_LINENO, "greater",
728 SHOWLINEINFO);
729 return;
732 // code resulting from inlining of inlinedfn:
733 inl.addr_lo = addr_lo;
734 inl.addr_hi = addr_hi;
735 inl.inlinedfn = inlinedfn;
736 // caller:
737 inl.fndn_ix = fndn_ix;
738 inl.lineno = lineno;
739 inl.level = level;
741 if (0) SHOWLINEINFO;
743 addInl ( di, &inl );
744 # undef SHOWLINEINFO
747 DiCfSI_m* ML_(get_cfsi_m) (const DebugInfo* di, UInt pos)
749 UInt cfsi_m_ix;
751 vg_assert(pos >= 0 && pos < di->cfsi_used);
752 switch (di->sizeof_cfsi_m_ix) {
753 case 1: cfsi_m_ix = ((UChar*) di->cfsi_m_ix)[pos]; break;
754 case 2: cfsi_m_ix = ((UShort*) di->cfsi_m_ix)[pos]; break;
755 case 4: cfsi_m_ix = ((UInt*) di->cfsi_m_ix)[pos]; break;
756 default: vg_assert(0);
758 if (cfsi_m_ix == 0)
759 return NULL; // cfi hole
760 else
761 return VG_(indexEltNumber) (di->cfsi_m_pool, cfsi_m_ix);
764 /* Top-level place to call to add a CFI summary record. The supplied
765 DiCfSI_m is copied. */
766 void ML_(addDiCfSI) ( struct _DebugInfo* di,
767 Addr base, UInt len, DiCfSI_m* cfsi_m )
769 static const Bool debug = False;
770 UInt new_sz;
771 DiCfSI* new_tab;
772 SSizeT delta;
773 DebugInfoMapping* map;
774 DebugInfoMapping* map2;
776 if (debug) {
777 VG_(printf)("adding DiCfSI: ");
778 ML_(ppDiCfSI)(di->cfsi_exprs, base, len, cfsi_m);
781 /* sanity */
782 vg_assert(len > 0);
783 /* Issue a warning if LEN is unexpectedly large (exceeds 5 million).
784 The implication is you have a single procedure
785 with more than 5 million bytes of code. Which is pretty
786 unlikely. Either that, or the debuginfo reader is somehow
787 broken. 5 million is of course arbitrary; but it's big enough
788 to be bigger than the size of any plausible piece of code that
789 would fall within a single procedure. But occasionally it does
790 happen (c.f. BZ #339542). */
791 if (len >= 5000000)
792 VG_(message)(Vg_DebugMsg,
793 "warning: DiCfSI %#lx .. %#lx is huge; length = %u (%s)\n",
794 base, base + len - 1, len, di->soname);
796 vg_assert(di->fsm.have_rx_map && di->fsm.rw_map_count);
797 /* Find mapping where at least one end of the CFSI falls into. */
798 map = ML_(find_rx_mapping)(di, base, base);
799 map2 = ML_(find_rx_mapping)(di, base + len - 1,
800 base + len - 1);
801 if (map == NULL)
802 map = map2;
803 else if (map2 == NULL)
804 map2 = map;
806 /* Rule out ones which are completely outside the r-x mapped area
807 (or which span across different areas).
808 See "Comment_Regarding_Text_Range_Checks" elsewhere in this file
809 for background and rationale. */
810 if (map == NULL || map != map2) {
811 static Int complaints = 10;
812 if (VG_(clo_trace_cfi) || complaints > 0) {
813 complaints--;
814 if (VG_(clo_verbosity) > 1) {
815 VG_(message)(
816 Vg_DebugMsg,
817 "warning: DiCfSI %#lx .. %#lx outside mapped rx segments (%s)\n",
818 base,
819 base + len - 1,
820 di->soname
823 if (VG_(clo_trace_cfi))
824 ML_(ppDiCfSI)(di->cfsi_exprs, base, len, cfsi_m);
826 return;
829 /* Now we know the range is at least partially inside the r-x
830 mapped area. That implies that at least one of the ends of the
831 range falls inside the area. If necessary, clip it so it is
832 completely within the area. If we don't do this,
833 check_CFSI_related_invariants() in debuginfo.c (invariant #2)
834 will fail. See
835 "Comment_on_IMPORTANT_CFSI_REPRESENTATIONAL_INVARIANTS" in
836 priv_storage.h for background. */
837 if (base < map->avma) {
838 /* Lower end is outside the mapped area. Hence upper end must
839 be inside it. */
840 if (0) VG_(printf)("XXX truncate lower\n");
841 vg_assert(base + len - 1 >= map->avma);
842 delta = (SSizeT)(map->avma - base);
843 vg_assert(delta > 0);
844 vg_assert(delta < (SSizeT)len);
845 base += delta;
846 len -= delta;
848 else
849 if (base + len - 1 > map->avma + map->size - 1) {
850 /* Upper end is outside the mapped area. Hence lower end must be
851 inside it. */
852 if (0) VG_(printf)("XXX truncate upper\n");
853 vg_assert(base <= map->avma + map->size - 1);
854 delta = (SSizeT)( (base + len - 1)
855 - (map->avma + map->size - 1) );
856 vg_assert(delta > 0);
857 vg_assert(delta < (SSizeT)len);
858 len -= delta;
861 /* Final checks */
863 /* Because: either cfsi was entirely inside the range, in which
864 case we asserted that len > 0 at the start, OR it fell partially
865 inside the range, in which case we reduced it by some size
866 (delta) which is < its original size. */
867 vg_assert(len > 0);
869 /* Similar logic applies for the next two assertions. */
870 vg_assert(base >= map->avma);
871 vg_assert(base + len - 1
872 <= map->avma + map->size - 1);
874 if (di->cfsi_used == di->cfsi_size) {
875 new_sz = 2 * di->cfsi_size;
876 if (new_sz == 0) new_sz = 20;
877 new_tab = ML_(dinfo_zalloc)( "di.storage.addDiCfSI.1",
878 new_sz * sizeof(DiCfSI) );
879 if (di->cfsi_rd != NULL) {
880 VG_(memcpy)(new_tab, di->cfsi_rd,
881 di->cfsi_used * sizeof(DiCfSI));
882 ML_(dinfo_free)(di->cfsi_rd);
884 di->cfsi_rd = new_tab;
885 di->cfsi_size = new_sz;
886 if (di->cfsi_m_pool == NULL)
887 di->cfsi_m_pool = VG_(newDedupPA)(1000 * sizeof(DiCfSI_m),
888 vg_alignof(DiCfSI_m),
889 ML_(dinfo_zalloc),
890 "di.storage.DiCfSI_m_pool",
891 ML_(dinfo_free));
894 di->cfsi_rd[di->cfsi_used].base = base;
895 di->cfsi_rd[di->cfsi_used].len = len;
896 di->cfsi_rd[di->cfsi_used].cfsi_m_ix
897 = VG_(allocFixedEltDedupPA)(di->cfsi_m_pool,
898 sizeof(DiCfSI_m),
899 cfsi_m);
900 di->cfsi_used++;
901 vg_assert(di->cfsi_used <= di->cfsi_size);
905 Int ML_(CfiExpr_Undef)( XArray* dst )
907 CfiExpr e;
908 VG_(memset)( &e, 0, sizeof(e) );
909 e.tag = Cex_Undef;
910 return (Int)VG_(addToXA)( dst, &e );
912 Int ML_(CfiExpr_Deref)( XArray* dst, Int ixAddr )
914 CfiExpr e;
915 VG_(memset)( &e, 0, sizeof(e) );
916 e.tag = Cex_Deref;
917 e.Cex.Deref.ixAddr = ixAddr;
918 return (Int)VG_(addToXA)( dst, &e );
920 Int ML_(CfiExpr_Const)( XArray* dst, UWord con )
922 CfiExpr e;
923 VG_(memset)( &e, 0, sizeof(e) );
924 e.tag = Cex_Const;
925 e.Cex.Const.con = con;
926 return (Int)VG_(addToXA)( dst, &e );
928 Int ML_(CfiExpr_Unop)( XArray* dst, CfiUnop op, Int ix )
930 CfiExpr e;
931 VG_(memset)( &e, 0, sizeof(e) );
932 e.tag = Cex_Unop;
933 e.Cex.Unop.op = op;
934 e.Cex.Unop.ix = ix;
935 return (Int)VG_(addToXA)( dst, &e );
937 Int ML_(CfiExpr_Binop)( XArray* dst, CfiBinop op, Int ixL, Int ixR )
939 CfiExpr e;
940 VG_(memset)( &e, 0, sizeof(e) );
941 e.tag = Cex_Binop;
942 e.Cex.Binop.op = op;
943 e.Cex.Binop.ixL = ixL;
944 e.Cex.Binop.ixR = ixR;
945 return (Int)VG_(addToXA)( dst, &e );
947 Int ML_(CfiExpr_CfiReg)( XArray* dst, CfiReg reg )
949 CfiExpr e;
950 VG_(memset)( &e, 0, sizeof(e) );
951 e.tag = Cex_CfiReg;
952 e.Cex.CfiReg.reg = reg;
953 return (Int)VG_(addToXA)( dst, &e );
955 Int ML_(CfiExpr_DwReg)( XArray* dst, Int reg )
957 CfiExpr e;
958 VG_(memset)( &e, 0, sizeof(e) );
959 e.tag = Cex_DwReg;
960 e.Cex.DwReg.reg = reg;
961 return (Int)VG_(addToXA)( dst, &e );
964 static void ppCfiUnop ( CfiUnop op )
966 switch (op) {
967 case Cunop_Abs: VG_(printf)("abs"); break;
968 case Cunop_Neg: VG_(printf)("-"); break;
969 case Cunop_Not: VG_(printf)("~"); break;
970 default: vg_assert(0);
974 static void ppCfiBinop ( CfiBinop op )
976 switch (op) {
977 case Cbinop_Add: VG_(printf)("+"); break;
978 case Cbinop_Sub: VG_(printf)("-"); break;
979 case Cbinop_And: VG_(printf)("&"); break;
980 case Cbinop_Mul: VG_(printf)("*"); break;
981 case Cbinop_Shl: VG_(printf)("<<"); break;
982 case Cbinop_Shr: VG_(printf)(">>"); break;
983 case Cbinop_Eq: VG_(printf)("=="); break;
984 case Cbinop_Ge: VG_(printf)(">="); break;
985 case Cbinop_Gt: VG_(printf)(">"); break;
986 case Cbinop_Le: VG_(printf)("<="); break;
987 case Cbinop_Lt: VG_(printf)("<"); break;
988 case Cbinop_Ne: VG_(printf)("!="); break;
989 default: vg_assert(0);
993 static void ppCfiReg ( CfiReg reg )
995 switch (reg) {
996 case Creg_INVALID: VG_(printf)("Creg_INVALID"); break;
997 case Creg_IA_SP: VG_(printf)("xSP"); break;
998 case Creg_IA_BP: VG_(printf)("xBP"); break;
999 case Creg_IA_IP: VG_(printf)("xIP"); break;
1000 case Creg_ARM_R13: VG_(printf)("R13"); break;
1001 case Creg_ARM_R12: VG_(printf)("R12"); break;
1002 case Creg_ARM_R15: VG_(printf)("R15"); break;
1003 case Creg_ARM_R14: VG_(printf)("R14"); break;
1004 case Creg_ARM_R7: VG_(printf)("R7"); break;
1005 case Creg_ARM64_SP: VG_(printf)("SP"); break;
1006 case Creg_ARM64_X30: VG_(printf)("X30"); break;
1007 case Creg_ARM64_X29: VG_(printf)("X29"); break;
1008 case Creg_MIPS_RA: VG_(printf)("RA"); break;
1009 case Creg_S390_IA: VG_(printf)("IA"); break;
1010 case Creg_S390_SP: VG_(printf)("SP"); break;
1011 case Creg_S390_FP: VG_(printf)("FP"); break;
1012 case Creg_S390_LR: VG_(printf)("LR"); break;
1013 default: vg_assert(0);
1017 void ML_(ppCfiExpr)( const XArray* src, Int ix )
1019 /* VG_(indexXA) checks for invalid src/ix values, so we can
1020 use it indiscriminately. */
1021 const CfiExpr* e = VG_(indexXA)( src, ix );
1022 switch (e->tag) {
1023 case Cex_Undef:
1024 VG_(printf)("Undef");
1025 break;
1026 case Cex_Deref:
1027 VG_(printf)("*(");
1028 ML_(ppCfiExpr)(src, e->Cex.Deref.ixAddr);
1029 VG_(printf)(")");
1030 break;
1031 case Cex_Const:
1032 VG_(printf)("0x%lx", e->Cex.Const.con);
1033 break;
1034 case Cex_Unop:
1035 ppCfiUnop(e->Cex.Unop.op);
1036 VG_(printf)("(");
1037 ML_(ppCfiExpr)(src, e->Cex.Unop.ix);
1038 VG_(printf)(")");
1039 break;
1040 case Cex_Binop:
1041 VG_(printf)("(");
1042 ML_(ppCfiExpr)(src, e->Cex.Binop.ixL);
1043 VG_(printf)(")");
1044 ppCfiBinop(e->Cex.Binop.op);
1045 VG_(printf)("(");
1046 ML_(ppCfiExpr)(src, e->Cex.Binop.ixR);
1047 VG_(printf)(")");
1048 break;
1049 case Cex_CfiReg:
1050 ppCfiReg(e->Cex.CfiReg.reg);
1051 break;
1052 case Cex_DwReg:
1053 VG_(printf)("dwr%d", e->Cex.DwReg.reg);
1054 break;
1055 default:
1056 VG_(core_panic)("ML_(ppCfiExpr)");
1057 /*NOTREACHED*/
1058 break;
1063 Word ML_(cmp_for_DiAddrRange_range) ( const void* keyV,
1064 const void* elemV ) {
1065 const Addr* key = (const Addr*)keyV;
1066 const DiAddrRange* elem = (const DiAddrRange*)elemV;
1067 if (0)
1068 VG_(printf)("cmp_for_DiAddrRange_range: %#lx vs %#lx\n",
1069 *key, elem->aMin);
1070 if ((*key) < elem->aMin) return -1;
1071 if ((*key) > elem->aMax) return 1;
1072 return 0;
1075 static
1076 void show_scope ( OSet* /* of DiAddrRange */ scope, const HChar* who )
1078 DiAddrRange* range;
1079 VG_(printf)("Scope \"%s\" = {\n", who);
1080 VG_(OSetGen_ResetIter)( scope );
1081 while (True) {
1082 range = VG_(OSetGen_Next)( scope );
1083 if (!range) break;
1084 VG_(printf)(" %#lx .. %#lx: %ld vars\n", range->aMin, range->aMax,
1085 range->vars ? VG_(sizeXA)(range->vars) : 0);
1087 VG_(printf)("}\n");
1090 /* Add the variable 'var' to 'scope' for the address range [aMin,aMax]
1091 (inclusive of aMin and aMax). Split existing ranges as required if
1092 aMin or aMax or both don't match existing range boundaries, and add
1093 'var' to all required ranges. Take great care to preserve the
1094 invariant that the ranges in 'scope' cover the entire address range
1095 exactly once, with no overlaps and no holes. */
1096 static void add_var_to_arange (
1097 /*MOD*/OSet* /* of DiAddrRange */ scope,
1098 Addr aMin,
1099 Addr aMax,
1100 DiVariable* var
1103 DiAddrRange *first, *last, *range;
1104 /* These xx variables are for assertion checking only; they don't
1105 contribute anything to the actual work of this function. */
1106 DiAddrRange *xxRangep, *xxFirst, *xxLast;
1107 UWord xxIters;
1109 vg_assert(aMin <= aMax);
1111 if (0) VG_(printf)("add_var_to_arange: %#lx .. %#lx\n", aMin, aMax);
1112 if (0) show_scope( scope, "add_var_to_arange(1)" );
1114 /* See if the lower end of the range (aMin) falls exactly on an
1115 existing range boundary. If not, find the range it does fall
1116 into, and split it (copying the variables in the process), so
1117 that aMin does exactly fall on a range boundary. */
1118 first = VG_(OSetGen_Lookup)( scope, &aMin );
1119 /* It must be present, since the presented OSet must cover
1120 the entire address range. */
1121 vg_assert(first);
1122 vg_assert(first->aMin <= first->aMax);
1123 vg_assert(first->aMin <= aMin && aMin <= first->aMax);
1125 /* Fast track common case, which is that the range specified for
1126 the variable exactly coincides with one already-existing
1127 range. */
1128 if (first->aMin == aMin && first->aMax == aMax) {
1129 vg_assert(first->vars);
1130 VG_(addToXA)( first->vars, var );
1131 return;
1134 /* We have to get into splitting ranges, which is complex
1135 and slow. */
1136 if (first->aMin < aMin) {
1137 DiAddrRange* nyu;
1138 /* Ok. We'll have to split 'first'. */
1139 /* truncate the upper end of 'first' */
1140 Addr tmp = first->aMax;
1141 first->aMax = aMin-1;
1142 vg_assert(first->aMin <= first->aMax);
1143 /* create a new range */
1144 nyu = VG_(OSetGen_AllocNode)( scope, sizeof(DiAddrRange) );
1145 nyu->aMin = aMin;
1146 nyu->aMax = tmp;
1147 vg_assert(nyu->aMin <= nyu->aMax);
1148 /* copy vars into it */
1149 vg_assert(first->vars);
1150 nyu->vars = VG_(cloneXA)( "di.storage.avta.1", first->vars );
1151 VG_(OSetGen_Insert)( scope, nyu );
1152 first = nyu;
1155 vg_assert(first->aMin == aMin);
1157 /* Now do exactly the same for the upper end (aMax): if it doesn't
1158 fall on a boundary, cause it to do so by splitting the range it
1159 does currently fall into. */
1160 last = VG_(OSetGen_Lookup)( scope, &aMax );
1161 vg_assert(last->aMin <= last->aMax);
1162 vg_assert(last->aMin <= aMax && aMax <= last->aMax);
1164 if (aMax < last->aMax) {
1165 DiAddrRange* nyu;
1166 /* We have to split 'last'. */
1167 /* truncate the lower end of 'last' */
1168 Addr tmp = last->aMin;
1169 last->aMin = aMax+1;
1170 vg_assert(last->aMin <= last->aMax);
1171 /* create a new range */
1172 nyu = VG_(OSetGen_AllocNode)( scope, sizeof(DiAddrRange) );
1173 nyu->aMin = tmp;
1174 nyu->aMax = aMax;
1175 vg_assert(nyu->aMin <= nyu->aMax);
1176 /* copy vars into it */
1177 vg_assert(last->vars);
1178 nyu->vars = VG_(cloneXA)( "di.storage.avta.2", last->vars );
1179 VG_(OSetGen_Insert)( scope, nyu );
1180 last = nyu;
1183 vg_assert(aMax == last->aMax);
1185 xxFirst = (DiAddrRange*)VG_(OSetGen_Lookup)(scope, &aMin);
1186 xxLast = (DiAddrRange*)VG_(OSetGen_Lookup)(scope, &aMax);
1187 vg_assert(xxFirst);
1188 vg_assert(xxLast);
1189 vg_assert(xxFirst->aMin == aMin);
1190 vg_assert(xxLast->aMax == aMax);
1191 if (xxFirst != xxLast)
1192 vg_assert(xxFirst->aMax < xxLast->aMin);
1194 /* Great. Now we merely need to iterate over the segments from
1195 'first' to 'last' inclusive, and add 'var' to the variable set
1196 of each of them. */
1197 if (0) {
1198 static UWord ctr = 0;
1199 ctr++;
1200 VG_(printf)("ctr = %lu\n", ctr);
1201 if (ctr >= 33263) show_scope( scope, "add_var_to_arange(2)" );
1204 xxIters = 0;
1205 range = xxRangep = NULL;
1206 VG_(OSetGen_ResetIterAt)( scope, &aMin );
1207 while (True) {
1208 xxRangep = range;
1209 range = VG_(OSetGen_Next)( scope );
1210 if (!range) break;
1211 if (range->aMin > aMax) break;
1212 xxIters++;
1213 if (0) VG_(printf)("have range %#lx %#lx\n",
1214 range->aMin, range->aMax);
1216 /* Sanity checks */
1217 if (!xxRangep) {
1218 /* This is the first in the range */
1219 vg_assert(range->aMin == aMin);
1220 } else {
1221 vg_assert(xxRangep->aMax + 1 == range->aMin);
1224 vg_assert(range->vars);
1225 VG_(addToXA)( range->vars, var );
1227 /* Done. We should have seen at least one range. */
1228 vg_assert(xxIters >= 1);
1229 if (xxIters == 1) vg_assert(xxFirst == xxLast);
1230 if (xxFirst == xxLast) vg_assert(xxIters == 1);
1231 vg_assert(xxRangep);
1232 vg_assert(xxRangep->aMax == aMax);
1233 vg_assert(xxRangep == xxLast);
1237 /* Top-level place to call to add a variable description (as extracted
1238 from a DWARF3 .debug_info section. */
1239 void ML_(addVar)( struct _DebugInfo* di,
1240 Int level,
1241 Addr aMin,
1242 Addr aMax,
1243 const HChar* name, /* in di's .strpool */
1244 UWord typeR, /* a cuOff */
1245 const GExpr* gexpr,
1246 const GExpr* fbGX,
1247 UInt fndn_ix, /* where decl'd - may be zero.
1248 index in in di's .fndnpool */
1249 Int lineNo, /* where decl'd - may be zero */
1250 Bool show )
1252 OSet* /* of DiAddrRange */ scope;
1253 DiVariable var;
1254 Bool all;
1255 TyEnt* ent;
1256 MaybeULong mul;
1257 const HChar* badness;
1259 vg_assert(di && di->admin_tyents);
1261 if (0) {
1262 VG_(printf)(" ML_(addVar): level %d %#lx-%#lx %s :: ",
1263 level, aMin, aMax, name );
1264 ML_(pp_TyEnt_C_ishly)( di->admin_tyents, typeR );
1265 VG_(printf)("\n Var=");
1266 ML_(pp_GX)(gexpr);
1267 VG_(printf)("\n");
1268 if (fbGX) {
1269 VG_(printf)(" FrB=");
1270 ML_(pp_GX)( fbGX );
1271 VG_(printf)("\n");
1272 } else {
1273 VG_(printf)(" FrB=none\n");
1275 VG_(printf)("\n");
1278 vg_assert(level >= 0);
1279 vg_assert(aMin <= aMax);
1280 vg_assert(name);
1281 vg_assert(gexpr);
1283 ent = ML_(TyEnts__index_by_cuOff)( di->admin_tyents, NULL, typeR);
1284 vg_assert(ent);
1285 vg_assert(ML_(TyEnt__is_type)(ent));
1287 /* "Comment_Regarding_Text_Range_Checks" (is referred to elsewhere)
1288 ----------------------------------------------------------------
1289 Ignore any variables whose aMin .. aMax (that is, range of text
1290 addresses for which they actually exist) falls outside the text
1291 segment. Is this indicative of a bug in the reader? Maybe.
1292 (LATER): instead of restricting strictly to the .text segment,
1293 be a bit more relaxed, and accept any variable whose text range
1294 falls inside the r-x mapped area. This is useful because .text
1295 is not always the only instruction-carrying segment: others are:
1296 .init .plt __libc_freeres_fn and .fini. This implicitly assumes
1297 that those extra sections have the same bias as .text, but that
1298 seems a reasonable assumption to me. */
1299 /* This is assured us by top level steering logic in debuginfo.c,
1300 and it is re-checked at the start of
1301 ML_(read_elf_debug_info). */
1302 vg_assert(di->fsm.have_rx_map && di->fsm.rw_map_count);
1303 if (level > 0 && ML_(find_rx_mapping)(di, aMin, aMax) == NULL) {
1304 if (VG_(clo_verbosity) > 1) {
1305 VG_(message)(Vg_DebugMsg,
1306 "warning: addVar: in range %#lx .. %#lx outside "
1307 "all rx mapped areas (%s)\n",
1308 aMin, aMax, name
1311 return;
1314 /* If the type's size is zero (which can mean unknown size), ignore
1315 it. We will never be able to actually relate a data address to
1316 a data object with zero size, so there's no point in storing
1317 info on it. On 32-bit platforms, also reject types whose size
1318 is 2^32 bytes or large. (It's amazing what junk shows up ..) */
1319 mul = ML_(sizeOfType)(di->admin_tyents, typeR);
1321 badness = NULL;
1322 if (mul.b != True)
1323 badness = "unknown size";
1324 else if (mul.ul == 0)
1325 badness = "zero size ";
1326 else if (sizeof(void*) == 4 && mul.ul >= (1ULL<<32))
1327 badness = "implausibly large";
1329 if (badness) {
1330 static Int complaints = 10;
1331 if (VG_(clo_verbosity) >= 2 && complaints > 0) {
1332 VG_(message)(Vg_DebugMsg, "warning: addVar: %s (%s)\n",
1333 badness, name );
1334 complaints--;
1336 return;
1339 if (!di->varinfo) {
1340 di->varinfo = VG_(newXA)( ML_(dinfo_zalloc),
1341 "di.storage.addVar.1",
1342 ML_(dinfo_free),
1343 sizeof(OSet*) );
1346 vg_assert(level < 256); /* arbitrary; stay sane */
1347 /* Expand the top level array enough to map this level */
1348 while ( VG_(sizeXA)(di->varinfo) <= level ) {
1349 DiAddrRange* nyu;
1350 scope = VG_(OSetGen_Create)( offsetof(DiAddrRange,aMin),
1351 ML_(cmp_for_DiAddrRange_range),
1352 ML_(dinfo_zalloc), "di.storage.addVar.2",
1353 ML_(dinfo_free) );
1354 if (0) VG_(printf)("create: scope = %p, adding at %ld\n",
1355 scope, VG_(sizeXA)(di->varinfo));
1356 VG_(addToXA)( di->varinfo, &scope );
1357 /* Add a single range covering the entire address space. At
1358 level 0 we require this doesn't get split. At levels above 0
1359 we require that any additions to it cause it to get split.
1360 All of these invariants get checked both add_var_to_arange
1361 and after reading is complete, in canonicaliseVarInfo. */
1362 nyu = VG_(OSetGen_AllocNode)( scope, sizeof(DiAddrRange) );
1363 nyu->aMin = (Addr)0;
1364 nyu->aMax = ~(Addr)0;
1365 nyu->vars = VG_(newXA)( ML_(dinfo_zalloc), "di.storage.addVar.3",
1366 ML_(dinfo_free),
1367 sizeof(DiVariable) );
1368 VG_(OSetGen_Insert)( scope, nyu );
1371 vg_assert( VG_(sizeXA)(di->varinfo) > level );
1372 scope = *(OSet**)VG_(indexXA)( di->varinfo, level );
1373 vg_assert(scope);
1375 var.name = name;
1376 var.typeR = typeR;
1377 var.gexpr = gexpr;
1378 var.fbGX = fbGX;
1379 var.fndn_ix = fndn_ix;
1380 var.lineNo = lineNo;
1382 all = aMin == (Addr)0 && aMax == ~(Addr)0;
1383 vg_assert(level == 0 ? all : !all);
1385 add_var_to_arange( /*MOD*/scope, aMin, aMax, &var );
1389 /* This really just checks the constructed data structure, as there is
1390 no canonicalisation to do. */
1391 static void canonicaliseVarInfo ( struct _DebugInfo* di )
1393 Word i, nInThisScope;
1395 if (!di->varinfo)
1396 return;
1398 for (i = 0; i < VG_(sizeXA)(di->varinfo); i++) {
1400 DiAddrRange *range, *rangep;
1401 OSet* scope = *(OSet**)VG_(indexXA)(di->varinfo, i);
1402 if (!scope) continue;
1404 /* Deal with the global-scope case. */
1405 if (i == 0) {
1406 Addr zero = 0;
1407 vg_assert(VG_(OSetGen_Size)( scope ) == 1);
1408 range = VG_(OSetGen_Lookup)( scope, &zero );
1409 vg_assert(range);
1410 vg_assert(range->aMin == (Addr)0);
1411 vg_assert(range->aMax == ~(Addr)0);
1412 continue;
1415 /* All the rest of this is for the local-scope case. */
1416 /* iterate over all entries in 'scope' */
1417 nInThisScope = 0;
1418 rangep = NULL;
1419 VG_(OSetGen_ResetIter)(scope);
1420 while (True) {
1421 range = VG_(OSetGen_Next)(scope);
1422 if (!range) {
1423 /* We just saw the last one. There must have been at
1424 least one entry in the range. */
1425 vg_assert(rangep);
1426 vg_assert(rangep->aMax == ~(Addr)0);
1427 break;
1430 vg_assert(range->aMin <= range->aMax);
1431 vg_assert(range->vars);
1433 if (!rangep) {
1434 /* This is the first entry in the range. */
1435 vg_assert(range->aMin == 0);
1436 } else {
1437 vg_assert(rangep->aMax + 1 == range->aMin);
1440 rangep = range;
1441 nInThisScope++;
1442 } /* iterating over ranges in a given scope */
1444 /* If there's only one entry in this (local) scope, it must
1445 cover the entire address space (obviously), but it must not
1446 contain any vars. */
1448 vg_assert(nInThisScope > 0);
1449 if (nInThisScope == 1) {
1450 Addr zero = 0;
1451 vg_assert(VG_(OSetGen_Size)( scope ) == 1);
1452 range = VG_(OSetGen_Lookup)( scope, &zero );
1453 vg_assert(range);
1454 vg_assert(range->aMin == (Addr)0);
1455 vg_assert(range->aMax == ~(Addr)0);
1456 vg_assert(range->vars);
1457 vg_assert(VG_(sizeXA)(range->vars) == 0);
1460 } /* iterate over scopes */
1464 /*------------------------------------------------------------*/
1465 /*--- Canonicalisers ---*/
1466 /*------------------------------------------------------------*/
1468 /* Sort the symtab by starting address, and emit warnings if any
1469 symbols have overlapping address ranges. We use that old chestnut,
1470 shellsort. Mash the table around so as to establish the property
1471 that addresses are in order and the ranges to not overlap. This
1472 facilitates using binary search to map addresses to symbols when we
1473 come to query the table.
1475 static Int compare_DiSym ( const void* va, const void* vb )
1477 const DiSym* a = va;
1478 const DiSym* b = vb;
1479 if (a->avmas.main < b->avmas.main) return -1;
1480 if (a->avmas.main > b->avmas.main) return 1;
1481 return 0;
1485 /* An address is associated with more than one name. Which do we
1486 prefer as the "display" name (that we show the user in stack
1487 traces)? In order:
1489 - Prefer "PMPI_<foo>" over "MPI_<foo>".
1491 - Else, prefer a non-empty name over an empty one.
1493 - Else, prefer a non-whitespace name over an all-whitespace name.
1495 - Else, prefer the shorter symbol name. If the symbol contains a
1496 version symbol ('@' on Linux, other platforms may differ), which means it
1497 is versioned, then the length up to the version symbol is used for length
1498 comparison purposes (so "foo@GLIBC_2.4.2" is considered shorter than
1499 "foobar").
1501 - Else, if two symbols have the same length, prefer a versioned symbol over
1502 a non-versioned symbol.
1504 - Else, use alphabetical ordering.
1506 - Otherwise, they must be the same; use the name with the lower address.
1508 Very occasionally this goes wrong (eg. 'memcmp' and 'bcmp' are
1509 aliases in glibc, we choose the 'bcmp' symbol because it's shorter,
1510 so we can misdescribe memcmp() as bcmp()). This is hard to avoid.
1511 It's mentioned in the FAQ file.
1513 Returned value is True if a_name is preferred, False if b_name is
1514 preferred.
1516 static
1517 Bool preferName ( const DebugInfo* di,
1518 const HChar* a_name, const HChar* b_name,
1519 Addr sym_avma/*exposition only*/ )
1521 Word cmp;
1522 Word vlena, vlenb; /* length without version */
1523 const HChar *vpa, *vpb;
1525 Bool preferA = False;
1526 Bool preferB = False;
1528 vg_assert(a_name);
1529 vg_assert(b_name);
1530 // vg_assert(a_name != b_name);
1531 // ???? now the pointers can be equal but is that
1532 // ???? not the indication of a latent bug ????
1534 vlena = VG_(strlen)(a_name);
1535 vlenb = VG_(strlen)(b_name);
1537 # if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_freebsd)
1538 # define VERSION_CHAR '@'
1539 # elif defined(VGO_darwin)
1540 # define VERSION_CHAR '$'
1541 # else
1542 # error Unknown OS
1543 # endif
1545 vpa = VG_(strchr)(a_name, VERSION_CHAR);
1546 vpb = VG_(strchr)(b_name, VERSION_CHAR);
1548 # undef VERSION_CHAR
1550 if (vpa)
1551 vlena = vpa - a_name;
1552 if (vpb)
1553 vlenb = vpb - b_name;
1555 /* MPI hack: prefer PMPI_Foo over MPI_Foo */
1556 if (0==VG_(strncmp)(a_name, "MPI_", 4)
1557 && 0==VG_(strncmp)(b_name, "PMPI_", 5)
1558 && 0==VG_(strcmp)(a_name, 1+b_name)) {
1559 preferB = True; goto out;
1561 if (0==VG_(strncmp)(b_name, "MPI_", 4)
1562 && 0==VG_(strncmp)(a_name, "PMPI_", 5)
1563 && 0==VG_(strcmp)(b_name, 1+a_name)) {
1564 preferA = True; goto out;
1567 /* Prefer non-empty name. */
1568 if (vlena && !vlenb) {
1569 preferA = True; goto out;
1571 if (vlenb && !vlena) {
1572 preferB = True; goto out;
1575 /* Prefer non-whitespace name. */
1577 Bool blankA = True;
1578 Bool blankB = True;
1579 const HChar *s;
1580 s = a_name;
1581 while (*s) {
1582 if (!VG_(isspace)(*s++)) {
1583 blankA = False;
1584 break;
1587 s = b_name;
1588 while (*s) {
1589 if (!VG_(isspace)(*s++)) {
1590 blankB = False;
1591 break;
1595 if (!blankA && blankB) {
1596 preferA = True; goto out;
1598 if (!blankB && blankA) {
1599 preferB = True; goto out;
1603 /* Select the shortest unversioned name */
1604 if (vlena < vlenb) {
1605 preferA = True; goto out;
1607 if (vlenb < vlena) {
1608 preferB = True; goto out;
1611 /* Equal lengths; select the versioned name */
1612 if (vpa && !vpb) {
1613 preferA = True; goto out;
1615 if (vpb && !vpa) {
1616 preferB = True; goto out;
1619 /* Either both versioned or neither is versioned; select them
1620 alphabetically */
1621 cmp = VG_(strcmp)(a_name, b_name);
1622 if (cmp < 0) {
1623 preferA = True; goto out;
1625 if (cmp > 0) {
1626 preferB = True; goto out;
1629 /* If we get here, they are the same name. */
1631 /* In this case we could choose either (arbitrarily), but might as
1632 well choose the one with the lowest DiSym* address, so as to try
1633 and make the comparison mechanism more stable (a la sorting
1634 parlance). Also, skip the diagnostic printing in this case. */
1635 return a_name <= b_name ? True : False;
1637 /*NOTREACHED*/
1638 vg_assert(0);
1639 out:
1640 if (preferA && !preferB) {
1641 TRACE_SYMTAB("sym at %#lx: prefer '%s' to '%s'\n",
1642 sym_avma, a_name, b_name );
1643 return True;
1645 if (preferB && !preferA) {
1646 TRACE_SYMTAB("sym at %#lx: prefer '%s' to '%s'\n",
1647 sym_avma, b_name, a_name );
1648 return False;
1650 /*NOTREACHED*/
1651 vg_assert(0);
1655 /* Add the names in FROM to the names in TO. */
1656 static
1657 void add_DiSym_names_to_from ( const DebugInfo* di, DiSym* to,
1658 const DiSym* from )
1660 vg_assert(to->pri_name);
1661 vg_assert(from->pri_name);
1662 /* Figure out how many names there will be in the new combined
1663 secondary vector. */
1664 const HChar** to_sec = to->sec_names;
1665 const HChar** from_sec = from->sec_names;
1666 Word n_new_sec = 1;
1667 if (from_sec) {
1668 while (*from_sec) {
1669 n_new_sec++;
1670 from_sec++;
1673 if (to_sec) {
1674 while (*to_sec) {
1675 n_new_sec++;
1676 to_sec++;
1679 if (0)
1680 TRACE_SYMTAB("merge: -> %ld\n", n_new_sec);
1681 /* Create the new sec and copy stuff into it, putting the new
1682 entries at the end. */
1683 const HChar** new_sec = ML_(dinfo_zalloc)( "di.storage.aDntf.1",
1684 (n_new_sec+1) * sizeof(HChar*) );
1685 from_sec = from->sec_names;
1686 to_sec = to->sec_names;
1687 Word i = 0;
1688 if (to_sec) {
1689 while (*to_sec) {
1690 new_sec[i++] = *to_sec;
1691 to_sec++;
1694 new_sec[i++] = from->pri_name;
1695 if (from_sec) {
1696 while (*from_sec) {
1697 new_sec[i++] = *from_sec;
1698 from_sec++;
1701 vg_assert(i == n_new_sec);
1702 vg_assert(new_sec[i] == NULL);
1703 /* If we're replacing an existing secondary vector, free it. */
1704 if (to->sec_names) {
1705 ML_(dinfo_free)(to->sec_names);
1707 to->sec_names = new_sec;
1711 static void canonicaliseSymtab ( struct _DebugInfo* di )
1713 Word i, j, n_truncated;
1714 Addr sta1, sta2, end1, end2, toc1, toc2;
1715 const HChar *pri1, *pri2, **sec1, **sec2;
1716 Bool ist1, ist2, isf1, isf2, isg1, isg2;
1718 # define SWAP(ty,aa,bb) \
1719 do { ty tt = (aa); (aa) = (bb); (bb) = tt; } while (0)
1721 if (di->symtab_used == 0)
1722 return;
1724 /* Check initial invariants */
1725 for (i = 0; i < di->symtab_used; i++) {
1726 DiSym* sym = &di->symtab[i];
1727 vg_assert(sym->pri_name);
1728 vg_assert(!sym->sec_names);
1731 /* Sort by address. */
1732 VG_(ssort)(di->symtab, di->symtab_used,
1733 sizeof(*di->symtab), compare_DiSym);
1735 cleanup_more:
1737 /* BEGIN Detect and "fix" identical address ranges. */
1738 while (1) {
1739 Word r, w, n_merged;
1740 n_merged = 0;
1741 w = 0;
1742 /* A pass merging entries together in the case where they agree
1743 on .isText -- that is, either: both are .isText or both are
1744 not .isText. They are merged into a single entry, but both
1745 sets of names are preserved, so we end up knowing all the
1746 names for that particular address range.*/
1747 for (r = 1; r < di->symtab_used; r++) {
1748 vg_assert(w < r);
1749 if ( di->symtab[w].avmas.main == di->symtab[r].avmas.main
1750 && di->symtab[w].size == di->symtab[r].size
1751 && !!di->symtab[w].isText == !!di->symtab[r].isText) {
1752 /* merge the two into one */
1753 n_merged++;
1754 /* Add r names to w if r has secondary names
1755 or r and w primary names differ. */
1756 if (di->symtab[r].sec_names
1757 || (0 != VG_(strcmp)(di->symtab[r].pri_name,
1758 di->symtab[w].pri_name))) {
1759 add_DiSym_names_to_from(di, &di->symtab[w], &di->symtab[r]);
1761 /* mark w as an IFunc if either w or r are */
1762 di->symtab[w].isIFunc = di->symtab[w].isIFunc || di->symtab[r].isIFunc;
1763 /* likewise for global symbols */
1764 di->symtab[w].isGlobal = di->symtab[w].isGlobal || di->symtab[r].isGlobal;
1765 /* and use ::pri_names to indicate this slot is no longer in use */
1766 di->symtab[r].pri_name = NULL;
1767 if (di->symtab[r].sec_names) {
1768 ML_(dinfo_free)(di->symtab[r].sec_names);
1769 di->symtab[r].sec_names = NULL;
1771 /* Completely zap the entry -- paranoia to make it more
1772 likely we'll notice if we inadvertently use it
1773 again. */
1774 VG_(memset)(&di->symtab[r], 0, sizeof(DiSym));
1775 } else {
1776 w = r;
1780 /* A second pass merging entries together where one .isText but
1781 the other isn't. In such cases, just ignore the non-.isText
1782 one (a heuristic hack.) */
1783 for (r = 1; r < di->symtab_used; r++) {
1784 /* Either of the symbols might already have been zapped by
1785 the previous pass, so we first have to check that. */
1786 if (di->symtab[r-1].pri_name == NULL) continue;
1787 if (di->symtab[r-0].pri_name == NULL) continue;
1788 /* ok, they are both still valid. Identical address ranges? */
1789 if (di->symtab[r-1].avmas.main != di->symtab[r-0].avmas.main) continue;
1790 if (di->symtab[r-1].size != di->symtab[r-0].size) continue;
1791 /* Identical address ranges. They must disagree on .isText
1792 since if they agreed, the previous pass would have merged
1793 them. */
1794 if (di->symtab[r-1].isText && di->symtab[r-0].isText) vg_assert(0);
1795 if (!di->symtab[r-1].isText && !di->symtab[r-0].isText) vg_assert(0);
1796 Word to_zap = di->symtab[r-1].isText ? (r-0) : (r-1);
1797 Word to_keep = di->symtab[r-1].isText ? (r-1) : (r-0);
1798 vg_assert(!di->symtab[to_zap].isText);
1799 vg_assert(di->symtab[to_keep].isText);
1800 /* Add to_zap's names to to_keep if to_zap has secondary names
1801 or to_zap's and to_keep's primary names differ. */
1802 if (di->symtab[to_zap].sec_names
1803 || (0 != VG_(strcmp)(di->symtab[to_zap].pri_name,
1804 di->symtab[to_keep].pri_name))) {
1805 add_DiSym_names_to_from(di, &di->symtab[to_keep],
1806 &di->symtab[to_zap]);
1808 /* Mark to_zap as not-in use in the same way as in the
1809 previous loop. */
1810 di->symtab[to_zap].pri_name = NULL;
1811 if (di->symtab[to_zap].sec_names) {
1812 ML_(dinfo_free)(di->symtab[to_zap].sec_names);
1813 di->symtab[to_zap].sec_names = NULL;
1815 VG_(memset)(&di->symtab[to_zap], 0, sizeof(DiSym));
1816 n_merged++;
1819 TRACE_SYMTAB( "canonicaliseSymtab: %ld symbols merged\n", n_merged);
1820 if (n_merged == 0)
1821 break;
1822 /* Now a pass to squeeze out any unused ones */
1823 w = 0;
1824 for (r = 0; r < di->symtab_used; r++) {
1825 vg_assert(w <= r);
1826 if (di->symtab[r].pri_name == NULL)
1827 continue;
1828 if (w < r) {
1829 di->symtab[w] = di->symtab[r];
1831 w++;
1833 vg_assert(w + n_merged == di->symtab_used);
1834 di->symtab_used = w;
1835 } /* while (1) */
1836 /* END Detect and "fix" identical address ranges. */
1838 /* BEGIN Detect and "fix" overlapping address ranges. */
1839 n_truncated = 0;
1841 for (i = 0; i < ((Word)di->symtab_used) -1; i++) {
1843 vg_assert(di->symtab[i].avmas.main <= di->symtab[i+1].avmas.main);
1845 /* Check for common (no overlap) case. */
1846 if (di->symtab[i].avmas.main + di->symtab[i].size
1847 <= di->symtab[i+1].avmas.main)
1848 continue;
1850 /* There's an overlap. Truncate one or the other. */
1851 if (di->trace_symtab) {
1852 VG_(printf)("overlapping address ranges in symbol table\n\t");
1853 ML_(ppSym)( i, &di->symtab[i] );
1854 VG_(printf)("\t");
1855 ML_(ppSym)( i+1, &di->symtab[i+1] );
1856 VG_(printf)("\n");
1859 /* Truncate one or the other. */
1860 sta1 = di->symtab[i].avmas.main;
1861 end1 = sta1 + di->symtab[i].size - 1;
1862 toc1 = GET_TOCPTR_AVMA(di->symtab[i].avmas);
1863 // aren't we missing local_ep here ????
1864 pri1 = di->symtab[i].pri_name;
1865 sec1 = di->symtab[i].sec_names;
1866 ist1 = di->symtab[i].isText;
1867 isf1 = di->symtab[i].isIFunc;
1868 isg1 = di->symtab[i].isGlobal;
1870 sta2 = di->symtab[i+1].avmas.main;
1871 end2 = sta2 + di->symtab[i+1].size - 1;
1872 toc2 = GET_TOCPTR_AVMA(di->symtab[i+1].avmas);
1873 // aren't we missing local_ep here ????
1874 pri2 = di->symtab[i+1].pri_name;
1875 sec2 = di->symtab[i+1].sec_names;
1876 ist2 = di->symtab[i+1].isText;
1877 isf2 = di->symtab[i+1].isIFunc;
1878 isg2 = di->symtab[i+1].isGlobal;
1880 if (sta1 < sta2) {
1881 end1 = sta2 - 1;
1882 } else {
1883 vg_assert(sta1 == sta2);
1884 if (end1 > end2) {
1885 sta1 = end2 + 1;
1886 SWAP(Addr,sta1,sta2); SWAP(Addr,end1,end2); SWAP(Addr,toc1,toc2);
1887 SWAP(const HChar*,pri1,pri2); SWAP(const HChar**,sec1,sec2);
1888 SWAP(Bool,ist1,ist2); SWAP(Bool,isf1,isf2); SWAP(Bool, isg1, isg2);
1889 } else
1890 if (end1 < end2) {
1891 sta2 = end1 + 1;
1892 } else {
1893 /* end1 == end2. Identical addr ranges. We'll eventually wind
1894 up back at cleanup_more, which will take care of it. */
1897 di->symtab[i].avmas.main = sta1;
1898 di->symtab[i].size = end1 - sta1 + 1;
1899 SET_TOCPTR_AVMA(di->symtab[i].avmas, toc1);
1900 // missing local_ep ???
1901 di->symtab[i].pri_name = pri1;
1902 di->symtab[i].sec_names = sec1;
1903 di->symtab[i].isText = ist1;
1904 di->symtab[i].isIFunc = isf1;
1905 di->symtab[i].isGlobal = isg1;
1907 di->symtab[i+1].avmas.main = sta2;
1908 di->symtab[i+1].size = end2 - sta2 + 1;
1909 SET_TOCPTR_AVMA(di->symtab[i+1].avmas, toc2);
1910 // missing local_ep ???
1911 di->symtab[i+1].pri_name = pri2;
1912 di->symtab[i+1].sec_names = sec2;
1913 di->symtab[i+1].isText = ist2;
1914 di->symtab[i+1].isIFunc = isf2;
1915 di->symtab[i+1].isGlobal = isg2;
1917 vg_assert(sta1 <= sta2);
1918 vg_assert(di->symtab[i].size > 0);
1919 vg_assert(di->symtab[i+1].size > 0);
1920 /* It may be that the i+1 entry now needs to be moved further
1921 along to maintain the address order requirement. */
1922 j = i+1;
1923 while (j < ((Word)di->symtab_used)-1
1924 && di->symtab[j].avmas.main > di->symtab[j+1].avmas.main) {
1925 SWAP(DiSym,di->symtab[j],di->symtab[j+1]);
1926 j++;
1928 n_truncated++;
1930 /* END Detect and "fix" overlapping address ranges. */
1932 if (n_truncated > 0) goto cleanup_more;
1934 /* Ensure relevant postconditions hold. */
1935 for (i = 0; i < ((Word)di->symtab_used)-1; i++) {
1936 /* No zero-sized symbols. */
1937 vg_assert(di->symtab[i].size > 0);
1938 /* In order. */
1939 vg_assert(di->symtab[i].avmas.main < di->symtab[i+1].avmas.main);
1940 /* No overlaps. */
1941 vg_assert(di->symtab[i].avmas.main + di->symtab[i].size - 1
1942 < di->symtab[i+1].avmas.main);
1943 /* Names are sane(ish) */
1944 vg_assert(di->symtab[i].pri_name);
1945 if (di->symtab[i].sec_names) {
1946 vg_assert(di->symtab[i].sec_names[0]);
1950 /* For each symbol that has more than one name, use preferName to
1951 select the primary name. This is a complete kludge in that
1952 doing it properly requires making a total ordering on the
1953 candidate names, whilst what we have to work with is an ad-hoc
1954 binary relation (preferName) that certainly doesn't have the
1955 relevant transitivity etc properties that are needed to induce a
1956 legitimate total order. Doesn't matter though if it doesn't
1957 always work right since this is only used to generate names to
1958 show the user. */
1959 for (i = 0; i < ((Word)di->symtab_used)-1; i++) {
1960 DiSym* sym = &di->symtab[i];
1961 const HChar** sec = sym->sec_names;
1962 if (!sec)
1963 continue;
1964 /* Slow but simple. Copy all the cands into a temp array,
1965 choose the primary name, and copy them all back again. */
1966 Word n_tmp = 1;
1967 while (*sec) { n_tmp++; sec++; }
1968 j = 0;
1969 const HChar** tmp = ML_(dinfo_zalloc)( "di.storage.cS.1",
1970 (n_tmp+1) * sizeof(HChar*) );
1971 tmp[j++] = sym->pri_name;
1972 sec = sym->sec_names;
1973 while (*sec) { tmp[j++] = *sec; sec++; }
1974 vg_assert(j == n_tmp);
1975 vg_assert(tmp[n_tmp] == NULL); /* because of zalloc */
1976 /* Choose the most favoured. */
1977 Word best = 0;
1978 for (j = 1; j < n_tmp; j++) {
1979 if (preferName(di, tmp[best], tmp[j], di->symtab[i].avmas.main)) {
1980 /* best is unchanged */
1981 } else {
1982 best = j;
1985 vg_assert(best >= 0 && best < n_tmp);
1986 /* Copy back */
1987 sym->pri_name = tmp[best];
1988 const HChar** cursor = sym->sec_names;
1989 for (j = 0; j < n_tmp; j++) {
1990 if (j == best)
1991 continue;
1992 *cursor = tmp[j];
1993 cursor++;
1995 vg_assert(*cursor == NULL);
1996 ML_(dinfo_free)( tmp );
1999 # undef SWAP
2003 static DiLoc* sorting_loctab = NULL;
2004 static Int compare_DiLoc_via_ix ( const void* va, const void* vb )
2006 const DiLoc* a = &sorting_loctab[*(const UInt*)va];
2007 const DiLoc* b = &sorting_loctab[*(const UInt*)vb];
2008 if (a->addr < b->addr) return -1;
2009 if (a->addr > b->addr) return 1;
2010 return 0;
2012 static void sort_loctab_and_loctab_fndn_ix (struct _DebugInfo* di )
2014 /* We have to sort the array loctab by addr
2015 together with its "parallel" array loctab_fndn_ix.
2016 We first build sort_ix : an array of indexes in loctab,
2017 that we sort by loctab address. Then we can reorder both
2018 arrays according to sort_ix. */
2019 UInt *sort_ix = ML_(dinfo_zalloc)("di.storage.six",
2020 di->loctab_used*sizeof(UInt));
2021 Word i, j, k;
2023 for (i = 0; i < di->loctab_used; i++) sort_ix[i] = i;
2024 sorting_loctab = di->loctab;
2025 VG_(ssort)(sort_ix, di->loctab_used,
2026 sizeof(*sort_ix), compare_DiLoc_via_ix);
2027 sorting_loctab = NULL;
2029 // Permute in place, using the sort_ix.
2030 for (i=0; i < di->loctab_used; i++) {
2031 DiLoc tmp_diloc;
2032 UInt tmp_fndn_ix;
2034 if (i == sort_ix[i])
2035 continue; // i already at the good place
2037 tmp_diloc = di->loctab[i];
2038 tmp_fndn_ix = ML_(fndn_ix)(di, i);
2039 j = i;
2040 for (;;) {
2041 k = sort_ix[j];
2042 sort_ix[j] = j;
2043 if (k == i)
2044 break;
2045 di->loctab[j] = di->loctab[k];
2046 set_fndn_ix (di, j, ML_(fndn_ix)(di, k));
2047 j = k;
2049 di->loctab[j] = tmp_diloc;
2050 set_fndn_ix (di, j, tmp_fndn_ix);
2052 ML_(dinfo_free)(sort_ix);
2055 /* Sort the location table by starting address. Mash the table around
2056 so as to establish the property that addresses are in order and the
2057 ranges do not overlap. This facilitates using binary search to map
2058 addresses to locations when we come to query the table.
2060 static void canonicaliseLoctab ( struct _DebugInfo* di )
2062 Word i, j;
2064 if (di->loctab_used == 0)
2065 return;
2067 /* sort loctab and loctab_fndn_ix by addr. */
2068 sort_loctab_and_loctab_fndn_ix (di);
2070 for (i = 0; i < ((Word)di->loctab_used)-1; i++) {
2071 vg_assert(di->loctab[i].size < 10000);
2072 /* If two adjacent entries overlap, truncate the first. */
2073 if (di->loctab[i].addr + di->loctab[i].size > di->loctab[i+1].addr) {
2074 /* Do this in signed int32 because the actual .size fields
2075 are only 12 bits. */
2076 Int new_size = di->loctab[i+1].addr - di->loctab[i].addr;
2077 TRACE_LOCTAB_CANON ("Truncating",
2078 &(di->loctab[i]), &(di->loctab[i+1]));
2079 if (new_size < 0) {
2080 di->loctab[i].size = 0;
2081 } else
2082 if (new_size > MAX_LOC_SIZE) {
2083 di->loctab[i].size = MAX_LOC_SIZE;
2084 } else {
2085 di->loctab[i].size = (UShort)new_size;
2090 /* Zap any zero-sized entries resulting from the truncation
2091 process. */
2092 j = 0;
2093 for (i = 0; i < (Word)di->loctab_used; i++) {
2094 if (di->loctab[i].size > 0) {
2095 if (j != i) {
2096 di->loctab[j] = di->loctab[i];
2097 set_fndn_ix(di, j, ML_(fndn_ix)(di, i));
2099 j++;
2102 di->loctab_used = j;
2104 /* Ensure relevant postconditions hold. */
2105 for (i = 0; i < ((Word)di->loctab_used)-1; i++) {
2106 if (0)
2107 VG_(printf)("%ld 0x%p lno:%d sz:%d fndn_ix:%u i+1 0x%p\n",
2109 (void*)di->loctab[i].addr,
2110 di->loctab[i].lineno,
2111 di->loctab[i].size,
2112 ML_(fndn_ix)(di, i),
2113 (void*)di->loctab[i+1].addr);
2115 /* No zero-sized symbols. */
2116 vg_assert(di->loctab[i].size > 0);
2117 /* In order. */
2118 vg_assert(di->loctab[i].addr < di->loctab[i+1].addr);
2119 /* No overlaps. */
2120 vg_assert(di->loctab[i].addr + di->loctab[i].size - 1
2121 < di->loctab[i+1].addr);
2124 /* Free up unused space at the end of the table. */
2125 shrinkLocTab(di);
2128 /* Sort the inlined call table by starting address. Mash the table around
2129 so as to establish the property that addresses are in order.
2130 This facilitates using binary search to map addresses to locations when
2131 we come to query the table.
2132 Note : ranges can overlap, multiple ranges can start at an address,
2133 multiple ranges can end at an address.
2135 static Int compare_DiInlLoc ( const void* va, const void* vb )
2137 const DiInlLoc* a = va;
2138 const DiInlLoc* b = vb;
2139 if (a->addr_lo < b->addr_lo) return -1;
2140 if (a->addr_lo > b->addr_lo) return 1;
2141 return 0;
2144 static void canonicaliseInltab ( struct _DebugInfo* di )
2146 Word i;
2148 if (di->inltab_used == 0)
2149 return;
2151 /* Sort by start address. */
2152 VG_(ssort)(di->inltab, di->inltab_used,
2153 sizeof(*di->inltab), compare_DiInlLoc);
2155 /* Ensure relevant postconditions hold. */
2156 for (i = 0; i < ((Word)di->inltab_used)-1; i++) {
2157 /* No zero-sized inlined call. */
2158 vg_assert(di->inltab[i].addr_lo < di->inltab[i].addr_hi);
2159 /* In order, but we can have duplicates and overlapping ranges. */
2160 vg_assert(di->inltab[i].addr_lo <= di->inltab[i+1].addr_lo);
2163 /* Free up unused space at the end of the table. */
2164 shrinkInlTab(di);
2168 /* Sort the call-frame-info cfsi_rd by starting address. Mash the table
2169 around so as to establish the property that addresses are in order
2170 and the ranges do not overlap. This facilitates using binary
2171 search to map addresses to locations when we come to query the
2172 table.
2174 Also, set cfisi_minaddr and cfisi_maxaddr to be the min and max of
2175 any of the address ranges contained in cfisi[0 .. cfisi_used-1], so
2176 as to facilitate rapidly skipping this SegInfo when looking for an
2177 address which falls outside that range.
2179 static Int compare_DiCfSI ( const void* va, const void* vb )
2181 const DiCfSI* a = va;
2182 const DiCfSI* b = vb;
2183 if (a->base < b->base) return -1;
2184 if (a->base > b->base) return 1;
2185 return 0;
2188 static void get_cfsi_rd_stats ( const DebugInfo* di,
2189 UWord *n_mergeables, UWord *n_holes )
2191 Word i;
2193 *n_mergeables = 0;
2194 *n_holes = 0;
2196 vg_assert (di->cfsi_used == 0 || di->cfsi_rd);
2197 for (i = 1; i < (Word)di->cfsi_used; i++) {
2198 Addr here_min = di->cfsi_rd[i].base;
2199 Addr prev_max = di->cfsi_rd[i-1].base + di->cfsi_rd[i-1].len - 1;
2200 Addr sep = here_min - prev_max;
2201 if (sep > 1)
2202 (*n_holes)++;
2203 if (sep == 1 && di->cfsi_rd[i-1].cfsi_m_ix == di->cfsi_rd[i].cfsi_m_ix)
2204 (*n_mergeables)++;
2208 void ML_(canonicaliseCFI) ( struct _DebugInfo* di )
2210 Word i, j;
2211 const Addr minAvma = 0;
2212 const Addr maxAvma = ~minAvma;
2214 /* Note: take care in here. di->cfsi can be NULL, in which
2215 case _used and _size fields will be zero. */
2216 if (di->cfsi_rd == NULL) {
2217 vg_assert(di->cfsi_used == 0);
2218 vg_assert(di->cfsi_size == 0);
2219 vg_assert(di->cfsi_m_pool == NULL);
2220 } else {
2221 vg_assert(di->cfsi_size != 0);
2222 vg_assert(di->cfsi_m_pool != NULL);
2225 /* Set cfsi_minavma and cfsi_maxavma to summarise the entire
2226 address range contained in cfsi_rd[0 .. cfsi_used-1]. */
2227 di->cfsi_minavma = maxAvma;
2228 di->cfsi_maxavma = minAvma;
2229 for (i = 0; i < (Word)di->cfsi_used; i++) {
2230 Addr here_min = di->cfsi_rd[i].base;
2231 Addr here_max = di->cfsi_rd[i].base + di->cfsi_rd[i].len - 1;
2232 if (here_min < di->cfsi_minavma)
2233 di->cfsi_minavma = here_min;
2234 if (here_max > di->cfsi_maxavma)
2235 di->cfsi_maxavma = here_max;
2238 if (di->trace_cfi)
2239 VG_(printf)("canonicaliseCfiSI: %lu entries, %#lx .. %#lx\n",
2240 di->cfsi_used,
2241 di->cfsi_minavma, di->cfsi_maxavma);
2243 /* Sort the cfsi_rd array by base address. */
2244 VG_(ssort)(di->cfsi_rd, di->cfsi_used, sizeof(*di->cfsi_rd), compare_DiCfSI);
2246 /* If two adjacent entries overlap, truncate the first. */
2247 for (i = 0; i < (Word)di->cfsi_used-1; i++) {
2248 if (di->cfsi_rd[i].base + di->cfsi_rd[i].len > di->cfsi_rd[i+1].base) {
2249 Word new_len = di->cfsi_rd[i+1].base - di->cfsi_rd[i].base;
2250 /* how could it be otherwise? The entries are sorted by the
2251 .base field. */
2252 vg_assert(new_len >= 0);
2253 vg_assert(new_len <= di->cfsi_rd[i].len);
2254 di->cfsi_rd[i].len = new_len;
2258 /* Zap any zero-sized entries resulting from the truncation
2259 process. */
2260 j = 0;
2261 for (i = 0; i < (Word)di->cfsi_used; i++) {
2262 if (di->cfsi_rd[i].len > 0) {
2263 if (j != i)
2264 di->cfsi_rd[j] = di->cfsi_rd[i];
2265 j++;
2268 /* VG_(printf)("XXXXXXXXXXXXX %d %d\n", di->cfsi_used, j); */
2269 di->cfsi_used = j;
2271 /* Ensure relevant postconditions hold. */
2272 for (i = 0; i < (Word)di->cfsi_used; i++) {
2273 /* No zero-length ranges. */
2274 vg_assert(di->cfsi_rd[i].len > 0);
2275 /* Makes sense w.r.t. summary address range */
2276 vg_assert(di->cfsi_rd[i].base >= di->cfsi_minavma);
2277 vg_assert(di->cfsi_rd[i].base + di->cfsi_rd[i].len - 1
2278 <= di->cfsi_maxavma);
2280 if (i < di->cfsi_used - 1) {
2282 if (!(di->cfsi[i].base < di->cfsi[i+1].base)) {
2283 VG_(printf)("\nOOO cfsis:\n");
2284 ML_(ppCfiSI)(&di->cfsi[i]);
2285 ML_(ppCfiSI)(&di->cfsi[i+1]);
2288 /* In order. */
2289 vg_assert(di->cfsi_rd[i].base < di->cfsi_rd[i+1].base);
2290 /* No overlaps. */
2291 vg_assert(di->cfsi_rd[i].base + di->cfsi_rd[i].len - 1
2292 < di->cfsi_rd[i+1].base);
2296 if (VG_(clo_stats) && VG_(clo_verbosity) >= 3) {
2297 UWord n_mergeables, n_holes;
2298 get_cfsi_rd_stats (di, &n_mergeables, &n_holes);
2299 VG_(dmsg)("CFSI total %lu mergeables %lu holes %lu uniq cfsi_m %u\n",
2300 di->cfsi_used, n_mergeables, n_holes,
2301 di->cfsi_m_pool ? VG_(sizeDedupPA) (di->cfsi_m_pool) : 0);
2305 void ML_(finish_CFSI_arrays) ( struct _DebugInfo* di )
2307 UWord n_mergeables, n_holes;
2308 UWord new_used;
2309 UWord i;
2310 UWord pos;
2311 UWord f_mergeables, f_holes;
2312 UInt sz_cfsi_m_pool;
2314 get_cfsi_rd_stats (di, &n_mergeables, &n_holes);
2316 if (di->cfsi_used == 0) {
2317 vg_assert (di->cfsi_rd == NULL);
2318 vg_assert (di->cfsi_m_pool == NULL);
2319 vg_assert (n_mergeables == 0);
2320 vg_assert (n_holes == 0);
2321 return;
2324 vg_assert (di->cfsi_used > n_mergeables);
2325 new_used = di->cfsi_used - n_mergeables + n_holes;
2327 sz_cfsi_m_pool = VG_(sizeDedupPA)(di->cfsi_m_pool);
2328 vg_assert (sz_cfsi_m_pool > 0);
2329 if (sz_cfsi_m_pool <= 255)
2330 di->sizeof_cfsi_m_ix = 1;
2331 else if (sz_cfsi_m_pool <= 65535)
2332 di->sizeof_cfsi_m_ix = 2;
2333 else
2334 di->sizeof_cfsi_m_ix = 4;
2336 di->cfsi_base = ML_(dinfo_zalloc)( "di.storage.finCfSI.1",
2337 new_used * sizeof(Addr) );
2338 di->cfsi_m_ix = ML_(dinfo_zalloc)( "di.storage.finCfSI.2",
2339 new_used * sizeof(UChar)*di->sizeof_cfsi_m_ix);
2341 pos = 0;
2342 f_mergeables = 0;
2343 f_holes = 0;
2344 for (i = 0; i < (Word)di->cfsi_used; i++) {
2345 if (i > 0) {
2346 Addr here_min = di->cfsi_rd[i].base;
2347 Addr prev_max = di->cfsi_rd[i-1].base + di->cfsi_rd[i-1].len - 1;
2348 SizeT sep = here_min - prev_max;
2350 // Skip a mergeable entry.
2351 if (sep == 1) {
2352 if (di->cfsi_rd[i-1].cfsi_m_ix == di->cfsi_rd[i].cfsi_m_ix) {
2353 f_mergeables++;
2354 continue;
2357 // Insert a hole if needed.
2358 if (sep > 1) {
2359 f_holes++;
2360 di->cfsi_base[pos] = prev_max + 1;
2361 switch (di->sizeof_cfsi_m_ix) {
2362 case 1: ((UChar*) di->cfsi_m_ix)[pos] = 0; break;
2363 case 2: ((UShort*)di->cfsi_m_ix)[pos] = 0; break;
2364 case 4: ((UInt*) di->cfsi_m_ix)[pos] = 0; break;
2365 default: vg_assert(0);
2367 pos++;
2371 // Insert the cfsi entry i.
2372 di->cfsi_base[pos] = di->cfsi_rd[i].base;
2373 switch (di->sizeof_cfsi_m_ix) {
2374 case 1: ((UChar*) di->cfsi_m_ix)[pos] = di->cfsi_rd[i].cfsi_m_ix; break;
2375 case 2: ((UShort*)di->cfsi_m_ix)[pos] = di->cfsi_rd[i].cfsi_m_ix; break;
2376 case 4: ((UInt*) di->cfsi_m_ix)[pos] = di->cfsi_rd[i].cfsi_m_ix; break;
2377 default: vg_assert(0);
2379 pos++;
2382 vg_assert (f_mergeables == n_mergeables);
2383 vg_assert (f_holes == n_holes);
2384 vg_assert (pos == new_used);
2386 di->cfsi_used = new_used;
2387 di->cfsi_size = new_used;
2388 ML_(dinfo_free) (di->cfsi_rd);
2389 di->cfsi_rd = NULL;
2393 /* Canonicalise the tables held by 'di', in preparation for use. Call
2394 this after finishing adding entries to these tables. */
2395 void ML_(canonicaliseTables) ( struct _DebugInfo* di )
2397 canonicaliseSymtab ( di );
2398 canonicaliseLoctab ( di );
2399 canonicaliseInltab ( di );
2400 ML_(canonicaliseCFI) ( di );
2401 if (di->cfsi_m_pool)
2402 VG_(freezeDedupPA) (di->cfsi_m_pool, ML_(dinfo_shrink_block));
2403 canonicaliseVarInfo ( di );
2404 if (di->strpool)
2405 VG_(freezeDedupPA) (di->strpool, ML_(dinfo_shrink_block));
2406 if (di->fndnpool)
2407 VG_(freezeDedupPA) (di->fndnpool, ML_(dinfo_shrink_block));
2411 /*------------------------------------------------------------*/
2412 /*--- Searching the tables ---*/
2413 /*------------------------------------------------------------*/
2415 /* Find a symbol-table index containing the specified pointer, or -1
2416 if not found. Binary search. */
2418 Word ML_(search_one_symtab) ( const DebugInfo* di, Addr ptr,
2419 Bool findText )
2421 Addr a_mid_lo, a_mid_hi;
2422 Word mid,
2423 lo = 0,
2424 hi = di->symtab_used-1;
2425 while (True) {
2426 /* current unsearched space is from lo to hi, inclusive. */
2427 if (lo > hi) return -1; /* not found */
2428 mid = (lo + hi) / 2;
2429 a_mid_lo = di->symtab[mid].avmas.main;
2430 a_mid_hi = ((Addr)di->symtab[mid].avmas.main) + di->symtab[mid].size - 1;
2432 if (ptr < a_mid_lo) { hi = mid-1; continue; }
2433 if (ptr > a_mid_hi) { lo = mid+1; continue; }
2434 vg_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
2435 /* Found a symbol with the correct address range. But is it
2436 of the right kind (text vs data) ? */
2437 if ( findText && di->symtab[mid].isText ) return mid;
2438 if ( (!findText) && (!di->symtab[mid].isText) ) return mid;
2439 return -1;
2444 /* Find a location-table index containing the specified pointer, or -1
2445 if not found. Binary search. */
2447 Word ML_(search_one_loctab) ( const DebugInfo* di, Addr ptr )
2449 Addr a_mid_lo, a_mid_hi;
2450 Word mid,
2451 lo = 0,
2452 hi = di->loctab_used-1;
2453 while (True) {
2454 /* current unsearched space is from lo to hi, inclusive. */
2455 if (lo > hi) return -1; /* not found */
2456 mid = (lo + hi) / 2;
2457 a_mid_lo = di->loctab[mid].addr;
2458 a_mid_hi = ((Addr)di->loctab[mid].addr) + di->loctab[mid].size - 1;
2460 if (ptr < a_mid_lo) { hi = mid-1; continue; }
2461 if (ptr > a_mid_hi) { lo = mid+1; continue; }
2462 vg_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
2463 return mid;
2468 /* Find a CFI-table index containing the specified pointer, or -1
2469 if not found. Binary search. */
2471 Word ML_(search_one_cfitab) ( const DebugInfo* di, Addr ptr )
2473 Word mid,
2474 lo = 0,
2475 hi = di->cfsi_used-1;
2477 while (lo <= hi) {
2478 /* Invariants : hi == cfsi_used-1 || ptr < cfsi_base[hi+1]
2479 lo == 0 || ptr > cfsi_base[lo-1]
2480 (the first part of the invariants is similar to considering
2481 that cfsi_base[-1] is 0 and cfsi_base[cfsi_used] is ~0) */
2482 mid = (lo + hi) / 2;
2483 if (ptr < di->cfsi_base[mid]) { hi = mid-1; continue; }
2484 if (ptr > di->cfsi_base[mid]) { lo = mid+1; continue; }
2485 lo = mid+1; break;
2488 #if 0
2489 for (mid = 0; mid <= di->cfsi_used-1; mid++)
2490 if (ptr < di->cfsi_base[mid])
2491 break;
2492 vg_assert (lo - 1 == mid - 1);
2493 #endif
2494 return lo - 1;
2498 /* Find a FPO-table index containing the specified pointer, or -1
2499 if not found. Binary search. */
2501 Word ML_(search_one_fpotab) ( const DebugInfo* di, Addr ptr )
2503 Addr const addr = ptr - di->fpo_base_avma;
2504 Addr a_mid_lo, a_mid_hi;
2505 Word mid, size,
2506 lo = 0,
2507 hi = di->fpo_size-1;
2508 while (True) {
2509 /* current unsearched space is from lo to hi, inclusive. */
2510 if (lo > hi) return -1; /* not found */
2511 mid = (lo + hi) / 2;
2512 a_mid_lo = di->fpo[mid].ulOffStart;
2513 size = di->fpo[mid].cbProcSize;
2514 a_mid_hi = a_mid_lo + size - 1;
2515 vg_assert(a_mid_hi >= a_mid_lo);
2516 if (addr < a_mid_lo) { hi = mid-1; continue; }
2517 if (addr > a_mid_hi) { lo = mid+1; continue; }
2518 vg_assert(addr >= a_mid_lo && addr <= a_mid_hi);
2519 return mid;
2523 /*--------------------------------------------------------------------*/
2524 /*--- end ---*/
2525 /*--------------------------------------------------------------------*/