s390x: Re-implement STFLE as an extension
[valgrind.git] / coregrind / m_debuginfo / storage.c
blob148de6f17e711a9e56fd1955181be4844e4e4426
2 /*--------------------------------------------------------------------*/
3 /*--- Format-neutral storage of and querying of info acquired from ---*/
4 /*--- ELF/XCOFF stabs/dwarf1/dwarf2/dwarf3 debug info. ---*/
5 /*--- storage.c ---*/
6 /*--------------------------------------------------------------------*/
8 /*
9 This file is part of Valgrind, a dynamic binary instrumentation
10 framework.
12 Copyright (C) 2000-2017 Julian Seward
13 jseward@acm.org
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, see <http://www.gnu.org/licenses/>.
28 The GNU General Public License is contained in the file COPYING.
31 /* This file manages the data structures built by the debuginfo
32 system. These are: the top level SegInfo list. For each SegInfo,
33 there are tables for address-to-symbol mappings,
34 address-to-src-file/line mappings, and address-to-CFI-info
35 mappings.
38 #include "pub_core_basics.h"
39 #include "pub_core_options.h" /* VG_(clo_verbosity) */
40 #include "pub_core_debuginfo.h"
41 #include "pub_core_debuglog.h"
42 #include "pub_core_libcassert.h"
43 #include "pub_core_libcbase.h"
44 #include "pub_core_libcprint.h"
45 #include "pub_core_xarray.h"
46 #include "pub_core_oset.h"
47 #include "pub_core_deduppoolalloc.h"
49 #include "priv_misc.h" /* dinfo_zalloc/free/strdup */
50 #include "priv_image.h"
51 #include "priv_d3basics.h" /* ML_(pp_GX) */
52 #include "priv_tytypes.h"
53 #include "priv_storage.h" /* self */
56 /*------------------------------------------------------------*/
57 /*--- Misc (printing, errors) ---*/
58 /*------------------------------------------------------------*/
60 /* Show a non-fatal debug info reading error. Use VG_(core_panic) for
61 fatal errors. 'serious' errors are shown regardless of the
62 verbosity setting. */
63 void ML_(symerr) ( const DebugInfo* di, Bool serious, const HChar* msg )
65 /* XML mode hides everything :-( */
66 if (VG_(clo_xml))
67 return;
69 if (serious) {
71 VG_(message)(Vg_DebugMsg, "WARNING: Serious error when "
72 "reading debug info\n");
73 if (True || VG_(clo_verbosity) < 2) {
74 /* Need to show what the file name is, at verbosity levels 2
75 or below, since that won't already have been shown */
76 VG_(message)(Vg_DebugMsg,
77 "When reading debug info from %s:\n",
78 (di && di->fsm.filename) ? di->fsm.filename
79 : "???");
81 VG_(message)(Vg_DebugMsg, "%s\n", msg);
83 } else { /* !serious */
85 if (VG_(clo_verbosity) >= 2)
86 VG_(message)(Vg_DebugMsg, "%s\n", msg);
92 /* Print a symbol. */
93 void ML_(ppSym) ( Int idx, const DiSym* sym )
95 const HChar** sec_names = sym->sec_names;
96 vg_assert(sym->pri_name);
97 if (sec_names)
98 vg_assert(sec_names);
99 VG_(printf)( "%5d: %c%c%c %#8lx .. %#8lx (%u) %s%s",
100 idx,
101 sym->isText ? 'T' : '-',
102 sym->isIFunc ? 'I' : '-',
103 sym->isGlobal ? 'G' : '-',
104 sym->avmas.main,
105 sym->avmas.main + sym->size - 1, sym->size,
106 sym->pri_name, sec_names ? " " : "" );
107 if (sec_names) {
108 while (*sec_names) {
109 VG_(printf)("%s%s", *sec_names, *(sec_names+1) ? " " : "");
110 sec_names++;
113 VG_(printf)("\n");
116 /* Print a call-frame-info summary. */
117 void ML_(ppDiCfSI) ( const XArray* /* of CfiExpr */ exprs,
118 Addr base, UInt len,
119 const DiCfSI_m* si_m )
121 # define SHOW_HOW(_how, _off) \
122 do { \
123 if (_how == CFIR_UNKNOWN) { \
124 VG_(printf)("Unknown"); \
125 } else \
126 if (_how == CFIR_SAME) { \
127 VG_(printf)("Same"); \
128 } else \
129 if (_how == CFIR_CFAREL) { \
130 VG_(printf)("cfa+%d", _off); \
131 } else \
132 if (_how == CFIR_MEMCFAREL) { \
133 VG_(printf)("*(cfa+%d)", _off); \
134 } else \
135 if (_how == CFIR_EXPR) { \
136 VG_(printf)("{"); \
137 ML_(ppCfiExpr)(exprs, _off); \
138 VG_(printf)("}"); \
139 } else \
140 if (_how == CFIR_S390X_F0) { \
141 VG_(printf)("oldF0"); \
142 } else \
143 if (_how == CFIR_S390X_F1) { \
144 VG_(printf)("oldF1"); \
145 } else \
146 if (_how == CFIR_S390X_F2) { \
147 VG_(printf)("oldF2"); \
148 } else \
149 if (_how == CFIR_S390X_F3) { \
150 VG_(printf)("oldF3"); \
151 } else \
152 if (_how == CFIR_S390X_F4) { \
153 VG_(printf)("oldF4"); \
154 } else \
155 if (_how == CFIR_S390X_F5) { \
156 VG_(printf)("oldF5"); \
157 } else \
158 if (_how == CFIR_S390X_F6) { \
159 VG_(printf)("oldF6"); \
160 } else \
161 if (_how == CFIR_S390X_F7) { \
162 VG_(printf)("oldF7"); \
163 } else { \
164 vg_assert(0+0); \
166 } while (0)
168 if (base != 0 || len != 0)
169 VG_(printf)("[%#lx .. %#lx]: ", base, base + len - 1);
170 else
171 VG_(printf)("[]: ");
173 switch (si_m->cfa_how) {
174 case CFIC_IA_SPREL:
175 VG_(printf)("let cfa=oldSP+%d", si_m->cfa_off);
176 break;
177 case CFIC_IA_BPREL:
178 VG_(printf)("let cfa=oldBP+%d", si_m->cfa_off);
179 break;
180 case CFIC_ARM_R13REL:
181 VG_(printf)("let cfa=oldR13+%d", si_m->cfa_off);
182 break;
183 case CFIC_ARM_R12REL:
184 VG_(printf)("let cfa=oldR12+%d", si_m->cfa_off);
185 break;
186 case CFIC_ARM_R11REL:
187 VG_(printf)("let cfa=oldR11+%d", si_m->cfa_off);
188 break;
189 case CFIR_SAME:
190 VG_(printf)("let cfa=Same");
191 break;
192 case CFIC_ARM_R7REL:
193 VG_(printf)("let cfa=oldR7+%d", si_m->cfa_off);
194 break;
195 case CFIC_ARM64_SPREL:
196 VG_(printf)("let cfa=oldSP+%d", si_m->cfa_off);
197 break;
198 case CFIC_ARM64_X29REL:
199 VG_(printf)("let cfa=oldX29+%d", si_m->cfa_off);
200 break;
201 case CFIC_EXPR:
202 VG_(printf)("let cfa={");
203 ML_(ppCfiExpr)(exprs, si_m->cfa_off);
204 VG_(printf)("}");
205 break;
206 default:
207 vg_assert(0);
210 VG_(printf)(" in RA=");
211 SHOW_HOW(si_m->ra_how, si_m->ra_off);
212 # if defined(VGA_x86) || defined(VGA_amd64)
213 VG_(printf)(" SP=");
214 SHOW_HOW(si_m->sp_how, si_m->sp_off);
215 VG_(printf)(" BP=");
216 SHOW_HOW(si_m->bp_how, si_m->bp_off);
217 # elif defined(VGA_arm)
218 VG_(printf)(" R14=");
219 SHOW_HOW(si_m->r14_how, si_m->r14_off);
220 VG_(printf)(" R13=");
221 SHOW_HOW(si_m->r13_how, si_m->r13_off);
222 VG_(printf)(" R12=");
223 SHOW_HOW(si_m->r12_how, si_m->r12_off);
224 VG_(printf)(" R11=");
225 SHOW_HOW(si_m->r11_how, si_m->r11_off);
226 VG_(printf)(" R7=");
227 SHOW_HOW(si_m->r7_how, si_m->r7_off);
228 # elif defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_ppc64le)
229 /* nothing */
230 # elif defined(VGA_s390x)
231 VG_(printf)(" SP=");
232 SHOW_HOW(si_m->sp_how, si_m->sp_off);
233 VG_(printf)(" FP=");
234 SHOW_HOW(si_m->fp_how, si_m->fp_off);
235 VG_(printf)(" F0=");
236 SHOW_HOW(si_m->f0_how, si_m->f0_off);
237 VG_(printf)(" F1=");
238 SHOW_HOW(si_m->f1_how, si_m->f1_off);
239 VG_(printf)(" F2=");
240 SHOW_HOW(si_m->f2_how, si_m->f2_off);
241 VG_(printf)(" F3=");
242 SHOW_HOW(si_m->f3_how, si_m->f3_off);
243 VG_(printf)(" F4=");
244 SHOW_HOW(si_m->f4_how, si_m->f4_off);
245 VG_(printf)(" F5=");
246 SHOW_HOW(si_m->f5_how, si_m->f5_off);
247 VG_(printf)(" F6=");
248 SHOW_HOW(si_m->f6_how, si_m->f6_off);
249 VG_(printf)(" F7=");
250 SHOW_HOW(si_m->f7_how, si_m->f7_off);
251 # elif defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
252 VG_(printf)(" SP=");
253 SHOW_HOW(si_m->sp_how, si_m->sp_off);
254 VG_(printf)(" FP=");
255 SHOW_HOW(si_m->fp_how, si_m->fp_off);
256 # elif defined(VGA_arm64)
257 VG_(printf)(" SP=");
258 SHOW_HOW(si_m->sp_how, si_m->sp_off);
259 VG_(printf)(" X30=");
260 SHOW_HOW(si_m->x30_how, si_m->x30_off);
261 VG_(printf)(" X29=");
262 SHOW_HOW(si_m->x29_how, si_m->x29_off);
263 # else
264 # error "Unknown arch"
265 # endif
266 VG_(printf)("\n");
267 # undef SHOW_HOW
271 /*------------------------------------------------------------*/
272 /*--- Adding stuff ---*/
273 /*------------------------------------------------------------*/
275 /* If not yet in strpool, add a str to the string pool including terminating
276 zero.
277 Return the pointer to the string in strpool.
279 const HChar* ML_(addStr) ( DebugInfo* di, const HChar* str, Int len )
281 if (len == -1) {
282 len = VG_(strlen)(str);
283 } else {
284 vg_assert(len >= 0);
286 if (UNLIKELY(di->strpool == NULL))
287 di->strpool = VG_(newDedupPA)(SEGINFO_STRPOOLSIZE,
289 ML_(dinfo_zalloc),
290 "di.storage.addStr.1",
291 ML_(dinfo_free));
292 return VG_(allocEltDedupPA) (di->strpool, len+1, str);
295 UInt ML_(addFnDn) (struct _DebugInfo* di,
296 const HChar* filename,
297 const HChar* dirname)
299 FnDn fndn;
300 UInt fndn_ix;
302 if (UNLIKELY(di->fndnpool == NULL))
303 di->fndnpool = VG_(newDedupPA)(500,
304 vg_alignof(FnDn),
305 ML_(dinfo_zalloc),
306 "di.storage.addFnDn.1",
307 ML_(dinfo_free));
308 fndn.filename = ML_(addStr)(di, filename, -1);
309 fndn.dirname = dirname ? ML_(addStr)(di, dirname, -1) : NULL;
310 fndn_ix = VG_(allocFixedEltDedupPA) (di->fndnpool, sizeof(FnDn), &fndn);
311 return fndn_ix;
314 const HChar* ML_(fndn_ix2filename) (const DebugInfo* di,
315 UInt fndn_ix)
317 FnDn *fndn;
318 if (fndn_ix == 0)
319 return "???";
320 else {
321 fndn = VG_(indexEltNumber) (di->fndnpool, fndn_ix);
322 return fndn->filename;
326 const HChar* ML_(fndn_ix2dirname) (const DebugInfo* di,
327 UInt fndn_ix)
329 FnDn *fndn;
330 if (fndn_ix == 0)
331 return "";
332 else {
333 fndn = VG_(indexEltNumber) (di->fndnpool, fndn_ix);
334 if (fndn->dirname)
335 return fndn->dirname;
336 else
337 return "";
341 /* Add a string to the string table of a DebugInfo, by copying the
342 string from the given DiCursor. Measures the length of the string
343 itself. */
344 const HChar* ML_(addStrFromCursor)( DebugInfo* di, DiCursor c )
346 /* This is a less-than-stellar implementation, but it should
347 work. */
348 vg_assert(ML_(cur_is_valid)(c));
349 HChar* str = ML_(cur_read_strdup)(c, "di.addStrFromCursor.1");
350 const HChar* res = ML_(addStr)(di, str, -1);
351 ML_(dinfo_free)(str);
352 return res;
356 /* Add a symbol to the symbol table, by copying *sym. 'sym' may only
357 have one name, so there's no complexities to do with deep vs
358 shallow copying of the sec_name array. This is checked.
360 void ML_(addSym) ( struct _DebugInfo* di, DiSym* sym )
362 UInt new_sz, i;
363 DiSym* new_tab;
365 vg_assert(sym->pri_name != NULL);
366 vg_assert(sym->sec_names == NULL);
368 /* Ignore zero-sized syms. */
369 if (sym->size == 0) return;
371 if (di->symtab_used == di->symtab_size) {
372 new_sz = 2 * di->symtab_size;
373 if (new_sz == 0) new_sz = 500;
374 new_tab = ML_(dinfo_zalloc)( "di.storage.addSym.1",
375 new_sz * sizeof(DiSym) );
376 if (di->symtab != NULL) {
377 for (i = 0; i < di->symtab_used; i++)
378 new_tab[i] = di->symtab[i];
379 ML_(dinfo_free)(di->symtab);
381 di->symtab = new_tab;
382 di->symtab_size = new_sz;
385 di->symtab[di->symtab_used++] = *sym;
386 vg_assert(di->symtab_used <= di->symtab_size);
389 UInt ML_(fndn_ix) (const DebugInfo* di, Word locno)
391 UInt fndn_ix;
393 switch(di->sizeof_fndn_ix) {
394 case 1: fndn_ix = ((UChar*) di->loctab_fndn_ix)[locno]; break;
395 case 2: fndn_ix = ((UShort*) di->loctab_fndn_ix)[locno]; break;
396 case 4: fndn_ix = ((UInt*) di->loctab_fndn_ix)[locno]; break;
397 default: vg_assert(0);
399 return fndn_ix;
402 static inline void set_fndn_ix (struct _DebugInfo* di, Word locno, UInt fndn_ix)
404 Word i;
406 switch(di->sizeof_fndn_ix) {
407 case 1:
408 if (LIKELY (fndn_ix <= 255)) {
409 ((UChar*) di->loctab_fndn_ix)[locno] = fndn_ix;
410 return;
413 UChar* old = (UChar*) di->loctab_fndn_ix;
414 UShort* new = ML_(dinfo_zalloc)( "di.storage.sfix.1",
415 di->loctab_size * 2 );
416 for (i = 0; i < di->loctab_used; i++)
417 new[i] = old[i];
418 ML_(dinfo_free)(old);
419 di->sizeof_fndn_ix = 2;
420 di->loctab_fndn_ix = new;
422 // Fallthrough
424 case 2:
425 if (LIKELY (fndn_ix <= 65535)) {
426 ((UShort*) di->loctab_fndn_ix)[locno] = fndn_ix;
427 return;
430 UShort* old = (UShort*) di->loctab_fndn_ix;
431 UInt* new = ML_(dinfo_zalloc)( "di.storage.sfix.2",
432 di->loctab_size * 4 );
433 for (i = 0; i < di->loctab_used; i++)
434 new[i] = old[i];
435 ML_(dinfo_free)(old);
436 di->sizeof_fndn_ix = 4;
437 di->loctab_fndn_ix = new;
439 // Fallthrough
441 case 4:
442 ((UInt*) di->loctab_fndn_ix)[locno] = fndn_ix;
443 return;
445 default: vg_assert(0);
450 // Comment the below line to trace LOCTAB merging/canonicalising
451 #define TRACE_LOCTAB_CANON(msg,prev_loc,cur_loc)
452 #ifndef TRACE_LOCTAB_CANON
453 #define TRACE_LOCTAB_CANON(msg,prev_loc,cur_loc) \
454 VG_(printf)("%s previous: addr %#lx, size %d, line %d, " \
455 " current: addr %#lx, size %d, line %d.\n", \
456 msg, \
457 (prev_loc)->addr, (prev_loc)->size, (prev_loc)->lineno, \
458 (cur_loc)->addr, (cur_loc)->size, (cur_loc)->lineno);
459 #endif
461 /* Add a location to the location table.
463 static void addLoc ( struct _DebugInfo* di, DiLoc* loc, UInt fndn_ix )
465 /* Zero-sized locs should have been ignored earlier */
466 vg_assert(loc->size > 0);
468 /* Check if the last entry has adjacent range for the same line. */
469 if (di->loctab_used > 0) {
470 DiLoc *previous = &di->loctab[di->loctab_used - 1];
471 if ((previous->lineno == loc->lineno)
472 && (previous->addr + previous->size == loc->addr)) {
473 if (previous->size + loc->size <= MAX_LOC_SIZE) {
474 TRACE_LOCTAB_CANON ("addLoc merging", previous, loc);
475 previous->size += loc->size;
476 return;
477 } else {
478 TRACE_LOCTAB_CANON ("addLoc merging not done (maxsize)",
479 previous, loc);
484 if (di->loctab_used == di->loctab_size) {
485 UInt new_sz;
486 DiLoc* new_loctab;
487 void* new_loctab_fndn_ix;
489 new_sz = 2 * di->loctab_size;
490 if (new_sz == 0) new_sz = 500;
491 new_loctab = ML_(dinfo_zalloc)( "di.storage.addLoc.1",
492 new_sz * sizeof(DiLoc) );
493 if (di->sizeof_fndn_ix == 0)
494 di->sizeof_fndn_ix = 1; // To start with.
495 new_loctab_fndn_ix = ML_(dinfo_zalloc)( "di.storage.addLoc.2",
496 new_sz * di->sizeof_fndn_ix );
497 if (di->loctab != NULL) {
498 VG_(memcpy)(new_loctab, di->loctab,
499 di->loctab_used * sizeof(DiLoc));
500 VG_(memcpy)(new_loctab_fndn_ix, di->loctab_fndn_ix,
501 di->loctab_used * di->sizeof_fndn_ix);
502 ML_(dinfo_free)(di->loctab);
503 ML_(dinfo_free)(di->loctab_fndn_ix);
505 di->loctab = new_loctab;
506 di->loctab_fndn_ix = new_loctab_fndn_ix;
507 di->loctab_size = new_sz;
510 di->loctab[di->loctab_used] = *loc;
511 set_fndn_ix (di, di->loctab_used, fndn_ix);
512 di->loctab_used++;
513 vg_assert(di->loctab_used <= di->loctab_size);
517 /* Resize the LocTab (line number table) to save memory, by removing
518 (and, potentially, allowing m_mallocfree to unmap) any unused space
519 at the end of the table. */
520 static void shrinkLocTab ( struct _DebugInfo* di )
522 UWord new_sz = di->loctab_used;
523 if (new_sz == di->loctab_size) return;
524 vg_assert(new_sz < di->loctab_size);
525 ML_(dinfo_shrink_block)( di->loctab, new_sz * sizeof(DiLoc));
526 ML_(dinfo_shrink_block)( di->loctab_fndn_ix, new_sz * di->sizeof_fndn_ix);
527 di->loctab_size = new_sz;
530 // Complain once, unless VG_(debugLog_getLevel)() > 3
531 // showinfo is called if VG_(debugLog_getLevel)() >= 1
532 #define COMPLAIN_ONCE(what, limit, limit_op, showinfo) \
534 static Bool complained = False; \
535 if (!complained) { \
536 complained = VG_(debugLog_getLevel)() <= 3; \
537 VG_(message)(Vg_UserMsg, \
538 "warning: Can't handle " what " with " \
539 "line number %d " limit_op " than %d\n", \
540 lineno, limit); \
541 if (VG_(debugLog_getLevel)() >= 1) showinfo; \
542 if (complained) \
543 VG_(message)(Vg_UserMsg, \
544 "(Nb: this message is only shown once)\n"); \
549 /* Top-level place to call to add a source-location mapping entry.
551 void ML_(addLineInfo) ( struct _DebugInfo* di,
552 UInt fndn_ix,
553 Addr this,
554 Addr next,
555 Int lineno,
556 Int entry /* only needed for debug printing */
559 static const Bool debug = False;
560 DiLoc loc;
561 UWord size = next - this;
563 # define SHOWLINEINFO \
564 VG_(message)(Vg_DebugMsg, \
565 "addLoc: addr %#lx, size %lu, line %d, fndn_ix %u\n", \
566 this,size,lineno,fndn_ix)
568 /* Ignore zero-sized locs */
569 if (this == next) return;
571 if (debug) {
572 FnDn *fndn = VG_(indexEltNumber) (di->fndnpool, fndn_ix);
573 VG_(printf)( " src ix %u %s %s line %d %#lx-%#lx\n",
574 fndn_ix,
575 fndn->dirname ? fndn->dirname : "(unknown)",
576 fndn->filename, lineno, this, next );
579 /* Maximum sanity checking. Some versions of GNU as do a shabby
580 * job with stabs entries; if anything looks suspicious, revert to
581 * a size of 1. This should catch the instruction of interest
582 * (since if using asm-level debug info, one instruction will
583 * correspond to one line, unlike with C-level debug info where
584 * multiple instructions can map to the one line), but avoid
585 * catching any other instructions bogusly. */
586 if (this > next) {
587 if (VG_(clo_verbosity) > 2) {
588 VG_(message)(Vg_DebugMsg,
589 "warning: line info addresses out of order "
590 "at entry %d: 0x%lx 0x%lx\n", entry, this, next);
592 size = 1;
595 if (size > MAX_LOC_SIZE) {
596 if (0)
597 VG_(message)(Vg_DebugMsg,
598 "warning: line info address range too large "
599 "at entry %d: %lu\n", entry, size);
600 size = 1;
603 /* At this point, we know that the original value for |size|, viz
604 |next - this|, will only still be used in the case where
605 |this| <u |next|, so it can't have underflowed. Considering
606 that and the three checks that follow it, the following must
607 hold. */
608 vg_assert(size >= 1);
609 vg_assert(size <= MAX_LOC_SIZE);
611 /* Rule out ones which are completely outside the r-x mapped area.
612 See "Comment_Regarding_Text_Range_Checks" elsewhere in this file
613 for background and rationale. */
614 vg_assert(di->fsm.have_rx_map);
615 if (ML_(find_rx_mapping)(di, this, this + size - 1) == NULL) {
616 if (0)
617 VG_(message)(Vg_DebugMsg,
618 "warning: ignoring line info entry falling "
619 "outside current DebugInfo: %#lx %#lx %#lx %#lx\n",
620 di->text_avma,
621 di->text_avma + di->text_size,
622 this, this + size - 1);
623 return;
626 if (lineno < 0) {
627 COMPLAIN_ONCE("line info entry", 0, "smaller", SHOWLINEINFO);
628 return;
630 if (lineno > MAX_LINENO) {
631 /* With --enable-lto, gcc 9 creates huge line numbers e.g. in the tool
632 => only complain with some debug level. */
633 if (VG_(debugLog_getLevel)() >= 1)
634 COMPLAIN_ONCE("line info entry", MAX_LINENO, "greater", SHOWLINEINFO);
635 return;
638 loc.addr = this;
639 loc.size = (UShort)size;
640 loc.lineno = lineno;
642 if (0) SHOWLINEINFO;
644 addLoc ( di, &loc, fndn_ix );
645 # undef SHOWLINEINFO
648 /* Add an inlined call info to the inlined call table.
650 static void addInl ( struct _DebugInfo* di, DiInlLoc* inl )
652 UInt new_sz, i;
653 DiInlLoc* new_tab;
655 /* empty inl should have been ignored earlier */
656 vg_assert(inl->addr_lo < inl->addr_hi);
658 if (di->inltab_used == di->inltab_size) {
659 new_sz = 2 * di->inltab_size;
660 if (new_sz == 0) new_sz = 500;
661 new_tab = ML_(dinfo_zalloc)( "di.storage.addInl.1",
662 new_sz * sizeof(DiInlLoc) );
663 if (di->inltab != NULL) {
664 for (i = 0; i < di->inltab_used; i++)
665 new_tab[i] = di->inltab[i];
666 ML_(dinfo_free)(di->inltab);
668 di->inltab = new_tab;
669 di->inltab_size = new_sz;
672 di->inltab[di->inltab_used] = *inl;
673 if (inl->addr_hi - inl->addr_lo > di->maxinl_codesz)
674 di->maxinl_codesz = inl->addr_hi - inl->addr_lo;
675 di->inltab_used++;
676 vg_assert(di->inltab_used <= di->inltab_size);
680 /* Resize the InlTab (inlined call table) to save memory, by removing
681 (and, potentially, allowing m_mallocfree to unmap) any unused space
682 at the end of the table. */
683 static void shrinkInlTab ( struct _DebugInfo* di )
685 UWord new_sz = di->inltab_used;
686 if (new_sz == di->inltab_size) return;
687 vg_assert(new_sz < di->inltab_size);
688 ML_(dinfo_shrink_block)( di->inltab, new_sz * sizeof(DiInlLoc));
689 di->inltab_size = new_sz;
692 /* Top-level place to call to add a addr-to-inlined fn info. */
693 void ML_(addInlInfo) ( struct _DebugInfo* di,
694 Addr addr_lo, Addr addr_hi,
695 const HChar* inlinedfn,
696 UInt fndn_ix,
697 Int lineno, UShort level)
699 DiInlLoc inl;
701 # define SHOWLINEINFO \
702 VG_(message) (Vg_DebugMsg, \
703 "addInlInfo: fn %s inlined as addr_lo %#lx,addr_hi %#lx," \
704 "caller fndn_ix %u %s:%d\n", \
705 inlinedfn, addr_lo, addr_hi, fndn_ix, \
706 ML_(fndn_ix2filename) (di, fndn_ix), lineno)
708 /* Similar paranoia as in ML_(addLineInfo). Unclear if needed. */
709 if (addr_lo >= addr_hi) {
710 if (VG_(clo_verbosity) > 2) {
711 VG_(message)(Vg_DebugMsg,
712 "warning: inlined info addresses out of order "
713 "at: 0x%lx 0x%lx\n", addr_lo, addr_hi);
714 SHOWLINEINFO;
716 addr_hi = addr_lo + 1;
719 if (lineno < 0) {
720 COMPLAIN_ONCE ("inlined call info entry", 0, "smaller", SHOWLINEINFO);
721 return;
723 if (lineno > MAX_LINENO) {
724 /* With --enable-lto, gcc 9 creates huge line numbers e.g. in the tool
725 => only complain with some debug level. */
726 if (VG_(debugLog_getLevel)() >= 1)
727 COMPLAIN_ONCE ("inlined call info entry", MAX_LINENO, "greater",
728 SHOWLINEINFO);
729 return;
732 // code resulting from inlining of inlinedfn:
733 inl.addr_lo = addr_lo;
734 inl.addr_hi = addr_hi;
735 inl.inlinedfn = inlinedfn;
736 // caller:
737 inl.fndn_ix = fndn_ix;
738 inl.lineno = lineno;
739 inl.level = level;
741 if (0) SHOWLINEINFO;
743 addInl ( di, &inl );
744 # undef SHOWLINEINFO
747 DiCfSI_m* ML_(get_cfsi_m) (const DebugInfo* di, UInt pos)
749 UInt cfsi_m_ix;
751 vg_assert(pos < di->cfsi_used);
752 switch (di->sizeof_cfsi_m_ix) {
753 case 1: cfsi_m_ix = ((UChar*) di->cfsi_m_ix)[pos]; break;
754 case 2: cfsi_m_ix = ((UShort*) di->cfsi_m_ix)[pos]; break;
755 case 4: cfsi_m_ix = ((UInt*) di->cfsi_m_ix)[pos]; break;
756 default: vg_assert(0);
758 if (cfsi_m_ix == 0)
759 return NULL; // cfi hole
760 else
761 return VG_(indexEltNumber) (di->cfsi_m_pool, cfsi_m_ix);
764 /* Top-level place to call to add a CFI summary record. The supplied
765 DiCfSI_m is copied. */
766 void ML_(addDiCfSI) ( struct _DebugInfo* di,
767 Addr base, UInt len, DiCfSI_m* cfsi_m )
769 static const Bool debug = False;
770 UInt new_sz;
771 DiCfSI* new_tab;
772 SSizeT delta;
773 DebugInfoMapping* map;
774 DebugInfoMapping* map2;
776 if (debug) {
777 VG_(printf)("adding DiCfSI: ");
778 ML_(ppDiCfSI)(di->cfsi_exprs, base, len, cfsi_m);
781 /* sanity */
782 vg_assert(len > 0);
783 /* Issue a warning if LEN is unexpectedly large (exceeds 5 million).
784 The implication is you have a single procedure
785 with more than 5 million bytes of code. Which is pretty
786 unlikely. Either that, or the debuginfo reader is somehow
787 broken. 5 million is of course arbitrary; but it's big enough
788 to be bigger than the size of any plausible piece of code that
789 would fall within a single procedure. But occasionally it does
790 happen (c.f. BZ #339542). */
791 if (len >= 5000000)
792 VG_(message)(Vg_DebugMsg,
793 "warning: DiCfSI %#lx .. %#lx is huge; length = %u (%s)\n",
794 base, base + len - 1, len, di->soname);
796 vg_assert(di->fsm.have_rx_map && di->fsm.rw_map_count);
797 /* Find mapping where at least one end of the CFSI falls into. */
798 map = ML_(find_rx_mapping)(di, base, base);
799 map2 = ML_(find_rx_mapping)(di, base + len - 1,
800 base + len - 1);
801 if (map == NULL)
802 map = map2;
803 else if (map2 == NULL)
804 map2 = map;
806 /* Rule out ones which are completely outside the r-x mapped area
807 (or which span across different areas).
808 See "Comment_Regarding_Text_Range_Checks" elsewhere in this file
809 for background and rationale. */
810 if (map == NULL || map != map2) {
811 static Int complaints = 10;
812 if (VG_(clo_trace_cfi) || complaints > 0) {
813 complaints--;
814 if (VG_(clo_verbosity) > 1) {
815 VG_(message)(
816 Vg_DebugMsg,
817 "warning: DiCfSI %#lx .. %#lx outside mapped rx segments (%s)\n",
818 base,
819 base + len - 1,
820 di->soname
823 if (VG_(clo_trace_cfi))
824 ML_(ppDiCfSI)(di->cfsi_exprs, base, len, cfsi_m);
826 return;
829 /* Now we know the range is at least partially inside the r-x
830 mapped area. That implies that at least one of the ends of the
831 range falls inside the area. If necessary, clip it so it is
832 completely within the area. If we don't do this,
833 check_CFSI_related_invariants() in debuginfo.c (invariant #2)
834 will fail. See
835 "Comment_on_IMPORTANT_CFSI_REPRESENTATIONAL_INVARIANTS" in
836 priv_storage.h for background. */
837 if (base < map->avma) {
838 /* Lower end is outside the mapped area. Hence upper end must
839 be inside it. */
840 if (0) VG_(printf)("XXX truncate lower\n");
841 vg_assert(base + len - 1 >= map->avma);
842 delta = (SSizeT)(map->avma - base);
843 vg_assert(delta > 0);
844 vg_assert(delta < (SSizeT)len);
845 base += delta;
846 len -= delta;
848 else
849 if (base + len - 1 > map->avma + map->size - 1) {
850 /* Upper end is outside the mapped area. Hence lower end must be
851 inside it. */
852 if (0) VG_(printf)("XXX truncate upper\n");
853 vg_assert(base <= map->avma + map->size - 1);
854 delta = (SSizeT)( (base + len - 1)
855 - (map->avma + map->size - 1) );
856 vg_assert(delta > 0);
857 vg_assert(delta < (SSizeT)len);
858 len -= delta;
861 /* Final checks */
863 /* Because: either cfsi was entirely inside the range, in which
864 case we asserted that len > 0 at the start, OR it fell partially
865 inside the range, in which case we reduced it by some size
866 (delta) which is < its original size. */
867 vg_assert(len > 0);
869 /* Similar logic applies for the next two assertions. */
870 vg_assert(base >= map->avma);
871 vg_assert(base + len - 1
872 <= map->avma + map->size - 1);
874 if (di->cfsi_used == di->cfsi_size) {
875 new_sz = 2 * di->cfsi_size;
876 if (new_sz == 0) new_sz = 20;
877 new_tab = ML_(dinfo_zalloc)( "di.storage.addDiCfSI.1",
878 new_sz * sizeof(DiCfSI) );
879 if (di->cfsi_rd != NULL) {
880 VG_(memcpy)(new_tab, di->cfsi_rd,
881 di->cfsi_used * sizeof(DiCfSI));
882 ML_(dinfo_free)(di->cfsi_rd);
884 di->cfsi_rd = new_tab;
885 di->cfsi_size = new_sz;
886 if (di->cfsi_m_pool == NULL)
887 di->cfsi_m_pool = VG_(newDedupPA)(1000 * sizeof(DiCfSI_m),
888 vg_alignof(DiCfSI_m),
889 ML_(dinfo_zalloc),
890 "di.storage.DiCfSI_m_pool",
891 ML_(dinfo_free));
894 di->cfsi_rd[di->cfsi_used].base = base;
895 di->cfsi_rd[di->cfsi_used].len = len;
896 di->cfsi_rd[di->cfsi_used].cfsi_m_ix
897 = VG_(allocFixedEltDedupPA)(di->cfsi_m_pool,
898 sizeof(DiCfSI_m),
899 cfsi_m);
900 di->cfsi_used++;
901 vg_assert(di->cfsi_used <= di->cfsi_size);
905 Int ML_(CfiExpr_Undef)( XArray* dst )
907 CfiExpr e;
908 VG_(memset)( &e, 0, sizeof(e) );
909 e.tag = Cex_Undef;
910 return (Int)VG_(addToXA)( dst, &e );
912 Int ML_(CfiExpr_Deref)( XArray* dst, Int ixAddr )
914 CfiExpr e;
915 VG_(memset)( &e, 0, sizeof(e) );
916 e.tag = Cex_Deref;
917 e.Cex.Deref.ixAddr = ixAddr;
918 return (Int)VG_(addToXA)( dst, &e );
920 Int ML_(CfiExpr_Const)( XArray* dst, UWord con )
922 CfiExpr e;
923 VG_(memset)( &e, 0, sizeof(e) );
924 e.tag = Cex_Const;
925 e.Cex.Const.con = con;
926 return (Int)VG_(addToXA)( dst, &e );
928 Int ML_(CfiExpr_Unop)( XArray* dst, CfiUnop op, Int ix )
930 CfiExpr e;
931 VG_(memset)( &e, 0, sizeof(e) );
932 e.tag = Cex_Unop;
933 e.Cex.Unop.op = op;
934 e.Cex.Unop.ix = ix;
935 return (Int)VG_(addToXA)( dst, &e );
937 Int ML_(CfiExpr_Binop)( XArray* dst, CfiBinop op, Int ixL, Int ixR )
939 CfiExpr e;
940 VG_(memset)( &e, 0, sizeof(e) );
941 e.tag = Cex_Binop;
942 e.Cex.Binop.op = op;
943 e.Cex.Binop.ixL = ixL;
944 e.Cex.Binop.ixR = ixR;
945 return (Int)VG_(addToXA)( dst, &e );
947 Int ML_(CfiExpr_CfiReg)( XArray* dst, CfiReg reg )
949 CfiExpr e;
950 VG_(memset)( &e, 0, sizeof(e) );
951 e.tag = Cex_CfiReg;
952 e.Cex.CfiReg.reg = reg;
953 return (Int)VG_(addToXA)( dst, &e );
955 Int ML_(CfiExpr_DwReg)( XArray* dst, Int reg )
957 CfiExpr e;
958 VG_(memset)( &e, 0, sizeof(e) );
959 e.tag = Cex_DwReg;
960 e.Cex.DwReg.reg = reg;
961 return (Int)VG_(addToXA)( dst, &e );
964 static void ppCfiUnop ( CfiUnop op )
966 switch (op) {
967 case Cunop_Abs: VG_(printf)("abs"); break;
968 case Cunop_Neg: VG_(printf)("-"); break;
969 case Cunop_Not: VG_(printf)("~"); break;
970 default: vg_assert(0);
974 static void ppCfiBinop ( CfiBinop op )
976 switch (op) {
977 case Cbinop_Add: VG_(printf)("+"); break;
978 case Cbinop_Sub: VG_(printf)("-"); break;
979 case Cbinop_And: VG_(printf)("&"); break;
980 case Cbinop_Mul: VG_(printf)("*"); break;
981 case Cbinop_Shl: VG_(printf)("<<"); break;
982 case Cbinop_Shr: VG_(printf)(">>"); break;
983 case Cbinop_Eq: VG_(printf)("=="); break;
984 case Cbinop_Ge: VG_(printf)(">="); break;
985 case Cbinop_Gt: VG_(printf)(">"); break;
986 case Cbinop_Le: VG_(printf)("<="); break;
987 case Cbinop_Lt: VG_(printf)("<"); break;
988 case Cbinop_Ne: VG_(printf)("!="); break;
989 default: vg_assert(0);
993 static void ppCfiReg ( CfiReg reg )
995 switch (reg) {
996 case Creg_INVALID: VG_(printf)("Creg_INVALID"); break;
997 case Creg_IA_SP: VG_(printf)("xSP"); break;
998 case Creg_IA_BP: VG_(printf)("xBP"); break;
999 case Creg_IA_IP: VG_(printf)("xIP"); break;
1000 case Creg_ARM_R13: VG_(printf)("R13"); break;
1001 case Creg_ARM_R12: VG_(printf)("R12"); break;
1002 case Creg_ARM_R15: VG_(printf)("R15"); break;
1003 case Creg_ARM_R14: VG_(printf)("R14"); break;
1004 case Creg_ARM_R7: VG_(printf)("R7"); break;
1005 case Creg_ARM64_SP: VG_(printf)("SP"); break;
1006 case Creg_ARM64_X30: VG_(printf)("X30"); break;
1007 case Creg_ARM64_X29: VG_(printf)("X29"); break;
1008 case Creg_MIPS_RA: VG_(printf)("RA"); break;
1009 case Creg_S390_IA: VG_(printf)("IA"); break;
1010 case Creg_S390_SP: VG_(printf)("SP"); break;
1011 case Creg_S390_FP: VG_(printf)("FP"); break;
1012 case Creg_S390_LR: VG_(printf)("LR"); break;
1013 default: vg_assert(0);
1017 void ML_(ppCfiExpr)( const XArray* src, Int ix )
1019 /* VG_(indexXA) checks for invalid src/ix values, so we can
1020 use it indiscriminately. */
1021 const CfiExpr* e = VG_(indexXA)( src, ix );
1022 switch (e->tag) {
1023 case Cex_Undef:
1024 VG_(printf)("Undef");
1025 break;
1026 case Cex_Deref:
1027 VG_(printf)("*(");
1028 ML_(ppCfiExpr)(src, e->Cex.Deref.ixAddr);
1029 VG_(printf)(")");
1030 break;
1031 case Cex_Const:
1032 VG_(printf)("0x%lx", e->Cex.Const.con);
1033 break;
1034 case Cex_Unop:
1035 ppCfiUnop(e->Cex.Unop.op);
1036 VG_(printf)("(");
1037 ML_(ppCfiExpr)(src, e->Cex.Unop.ix);
1038 VG_(printf)(")");
1039 break;
1040 case Cex_Binop:
1041 VG_(printf)("(");
1042 ML_(ppCfiExpr)(src, e->Cex.Binop.ixL);
1043 VG_(printf)(")");
1044 ppCfiBinop(e->Cex.Binop.op);
1045 VG_(printf)("(");
1046 ML_(ppCfiExpr)(src, e->Cex.Binop.ixR);
1047 VG_(printf)(")");
1048 break;
1049 case Cex_CfiReg:
1050 ppCfiReg(e->Cex.CfiReg.reg);
1051 break;
1052 case Cex_DwReg:
1053 VG_(printf)("dwr%d", e->Cex.DwReg.reg);
1054 break;
1055 default:
1056 VG_(core_panic)("ML_(ppCfiExpr)");
1057 /*NOTREACHED*/
1058 break;
1063 Word ML_(cmp_for_DiAddrRange_range) ( const void* keyV,
1064 const void* elemV ) {
1065 const Addr* key = (const Addr*)keyV;
1066 const DiAddrRange* elem = (const DiAddrRange*)elemV;
1067 if (0)
1068 VG_(printf)("cmp_for_DiAddrRange_range: %#lx vs %#lx\n",
1069 *key, elem->aMin);
1070 if ((*key) < elem->aMin) return -1;
1071 if ((*key) > elem->aMax) return 1;
1072 return 0;
1075 static
1076 void show_scope ( OSet* /* of DiAddrRange */ scope, const HChar* who )
1078 DiAddrRange* range;
1079 VG_(printf)("Scope \"%s\" = {\n", who);
1080 VG_(OSetGen_ResetIter)( scope );
1081 while (True) {
1082 range = VG_(OSetGen_Next)( scope );
1083 if (!range) break;
1084 VG_(printf)(" %#lx .. %#lx: %ld vars\n", range->aMin, range->aMax,
1085 range->vars ? VG_(sizeXA)(range->vars) : 0);
1087 VG_(printf)("}\n");
1090 /* Add the variable 'var' to 'scope' for the address range [aMin,aMax]
1091 (inclusive of aMin and aMax). Split existing ranges as required if
1092 aMin or aMax or both don't match existing range boundaries, and add
1093 'var' to all required ranges. Take great care to preserve the
1094 invariant that the ranges in 'scope' cover the entire address range
1095 exactly once, with no overlaps and no holes. */
1096 static void add_var_to_arange (
1097 /*MOD*/OSet* /* of DiAddrRange */ scope,
1098 Addr aMin,
1099 Addr aMax,
1100 DiVariable* var
1103 DiAddrRange *first, *last, *range;
1104 /* These xx variables are for assertion checking only; they don't
1105 contribute anything to the actual work of this function. */
1106 DiAddrRange *xxRangep, *xxFirst, *xxLast;
1107 UWord xxIters;
1109 vg_assert(aMin <= aMax);
1111 if (0) VG_(printf)("add_var_to_arange: %#lx .. %#lx\n", aMin, aMax);
1112 if (0) show_scope( scope, "add_var_to_arange(1)" );
1114 /* See if the lower end of the range (aMin) falls exactly on an
1115 existing range boundary. If not, find the range it does fall
1116 into, and split it (copying the variables in the process), so
1117 that aMin does exactly fall on a range boundary. */
1118 first = VG_(OSetGen_Lookup)( scope, &aMin );
1119 /* It must be present, since the presented OSet must cover
1120 the entire address range. */
1121 vg_assert(first);
1122 vg_assert(first->aMin <= first->aMax);
1123 vg_assert(first->aMin <= aMin && aMin <= first->aMax);
1125 /* Fast track common case, which is that the range specified for
1126 the variable exactly coincides with one already-existing
1127 range. */
1128 if (first->aMin == aMin && first->aMax == aMax) {
1129 vg_assert(first->vars);
1130 VG_(addToXA)( first->vars, var );
1131 return;
1134 /* We have to get into splitting ranges, which is complex
1135 and slow. */
1136 if (first->aMin < aMin) {
1137 DiAddrRange* nyu;
1138 /* Ok. We'll have to split 'first'. */
1139 /* truncate the upper end of 'first' */
1140 Addr tmp = first->aMax;
1141 first->aMax = aMin-1;
1142 vg_assert(first->aMin <= first->aMax);
1143 /* create a new range */
1144 nyu = VG_(OSetGen_AllocNode)( scope, sizeof(DiAddrRange) );
1145 nyu->aMin = aMin;
1146 nyu->aMax = tmp;
1147 vg_assert(nyu->aMin <= nyu->aMax);
1148 /* copy vars into it */
1149 vg_assert(first->vars);
1150 nyu->vars = VG_(cloneXA)( "di.storage.avta.1", first->vars );
1151 VG_(OSetGen_Insert)( scope, nyu );
1152 first = nyu;
1155 vg_assert(first->aMin == aMin);
1157 /* Now do exactly the same for the upper end (aMax): if it doesn't
1158 fall on a boundary, cause it to do so by splitting the range it
1159 does currently fall into. */
1160 last = VG_(OSetGen_Lookup)( scope, &aMax );
1161 vg_assert(last->aMin <= last->aMax);
1162 vg_assert(last->aMin <= aMax && aMax <= last->aMax);
1164 if (aMax < last->aMax) {
1165 DiAddrRange* nyu;
1166 /* We have to split 'last'. */
1167 /* truncate the lower end of 'last' */
1168 Addr tmp = last->aMin;
1169 last->aMin = aMax+1;
1170 vg_assert(last->aMin <= last->aMax);
1171 /* create a new range */
1172 nyu = VG_(OSetGen_AllocNode)( scope, sizeof(DiAddrRange) );
1173 nyu->aMin = tmp;
1174 nyu->aMax = aMax;
1175 vg_assert(nyu->aMin <= nyu->aMax);
1176 /* copy vars into it */
1177 vg_assert(last->vars);
1178 nyu->vars = VG_(cloneXA)( "di.storage.avta.2", last->vars );
1179 VG_(OSetGen_Insert)( scope, nyu );
1180 last = nyu;
1183 vg_assert(aMax == last->aMax);
1185 xxFirst = (DiAddrRange*)VG_(OSetGen_Lookup)(scope, &aMin);
1186 xxLast = (DiAddrRange*)VG_(OSetGen_Lookup)(scope, &aMax);
1187 vg_assert(xxFirst);
1188 vg_assert(xxLast);
1189 vg_assert(xxFirst->aMin == aMin);
1190 vg_assert(xxLast->aMax == aMax);
1191 if (xxFirst != xxLast)
1192 vg_assert(xxFirst->aMax < xxLast->aMin);
1194 /* Great. Now we merely need to iterate over the segments from
1195 'first' to 'last' inclusive, and add 'var' to the variable set
1196 of each of them. */
1197 if (0) {
1198 static UWord ctr = 0;
1199 ctr++;
1200 VG_(printf)("ctr = %lu\n", ctr);
1201 if (ctr >= 33263) show_scope( scope, "add_var_to_arange(2)" );
1204 xxIters = 0;
1205 range = xxRangep = NULL;
1206 VG_(OSetGen_ResetIterAt)( scope, &aMin );
1207 while (True) {
1208 xxRangep = range;
1209 range = VG_(OSetGen_Next)( scope );
1210 if (!range) break;
1211 if (range->aMin > aMax) break;
1212 xxIters++;
1213 if (0) VG_(printf)("have range %#lx %#lx\n",
1214 range->aMin, range->aMax);
1216 /* Sanity checks */
1217 if (!xxRangep) {
1218 /* This is the first in the range */
1219 vg_assert(range->aMin == aMin);
1220 } else {
1221 vg_assert(xxRangep->aMax + 1 == range->aMin);
1224 vg_assert(range->vars);
1225 VG_(addToXA)( range->vars, var );
1227 /* Done. We should have seen at least one range. */
1228 vg_assert(xxIters >= 1);
1229 if (xxIters == 1) vg_assert(xxFirst == xxLast);
1230 if (xxFirst == xxLast) vg_assert(xxIters == 1);
1231 vg_assert(xxRangep);
1232 vg_assert(xxRangep->aMax == aMax);
1233 vg_assert(xxRangep == xxLast);
1237 /* Top-level place to call to add a variable description (as extracted
1238 from a DWARF3 .debug_info section. */
1239 void ML_(addVar)( struct _DebugInfo* di,
1240 Int level,
1241 Addr aMin,
1242 Addr aMax,
1243 const HChar* name, /* in di's .strpool */
1244 UWord typeR, /* a cuOff */
1245 const GExpr* gexpr,
1246 const GExpr* fbGX,
1247 UInt fndn_ix, /* where decl'd - may be zero.
1248 index in in di's .fndnpool */
1249 Int lineNo, /* where decl'd - may be zero */
1250 Bool show )
1252 OSet* /* of DiAddrRange */ scope;
1253 DiVariable var;
1254 Bool all;
1255 TyEnt* ent;
1256 MaybeULong mul;
1257 const HChar* badness;
1259 vg_assert(di && di->admin_tyents);
1261 if (0) {
1262 VG_(printf)(" ML_(addVar): level %d %#lx-%#lx %s :: ",
1263 level, aMin, aMax, name );
1264 ML_(pp_TyEnt_C_ishly)( di->admin_tyents, typeR );
1265 VG_(printf)("\n Var=");
1266 ML_(pp_GX)(gexpr);
1267 VG_(printf)("\n");
1268 if (fbGX) {
1269 VG_(printf)(" FrB=");
1270 ML_(pp_GX)( fbGX );
1271 VG_(printf)("\n");
1272 } else {
1273 VG_(printf)(" FrB=none\n");
1275 VG_(printf)("\n");
1278 vg_assert(level >= 0);
1279 vg_assert(aMin <= aMax);
1280 vg_assert(name);
1281 vg_assert(gexpr);
1283 ent = ML_(TyEnts__index_by_cuOff)( di->admin_tyents, NULL, typeR);
1284 vg_assert(ent);
1285 vg_assert(ML_(TyEnt__is_type)(ent));
1287 /* "Comment_Regarding_Text_Range_Checks" (is referred to elsewhere)
1288 ----------------------------------------------------------------
1289 Ignore any variables whose aMin .. aMax (that is, range of text
1290 addresses for which they actually exist) falls outside the text
1291 segment. Is this indicative of a bug in the reader? Maybe.
1292 (LATER): instead of restricting strictly to the .text segment,
1293 be a bit more relaxed, and accept any variable whose text range
1294 falls inside the r-x mapped area. This is useful because .text
1295 is not always the only instruction-carrying segment: others are:
1296 .init .plt __libc_freeres_fn and .fini. This implicitly assumes
1297 that those extra sections have the same bias as .text, but that
1298 seems a reasonable assumption to me. */
1299 /* This is assured us by top level steering logic in debuginfo.c,
1300 and it is re-checked at the start of ML_(read_elf_object). */
1301 vg_assert(di->fsm.have_rx_map && di->fsm.rw_map_count);
1302 if (level > 0 && ML_(find_rx_mapping)(di, aMin, aMax) == NULL) {
1303 if (VG_(clo_verbosity) > 1) {
1304 VG_(message)(Vg_DebugMsg,
1305 "warning: addVar: in range %#lx .. %#lx outside "
1306 "all rx mapped areas (%s)\n",
1307 aMin, aMax, name
1310 return;
1313 /* If the type's size is zero (which can mean unknown size), ignore
1314 it. We will never be able to actually relate a data address to
1315 a data object with zero size, so there's no point in storing
1316 info on it. On 32-bit platforms, also reject types whose size
1317 is 2^32 bytes or large. (It's amazing what junk shows up ..) */
1318 mul = ML_(sizeOfType)(di->admin_tyents, typeR);
1320 badness = NULL;
1321 if (mul.b != True)
1322 badness = "unknown size";
1323 else if (mul.ul == 0)
1324 badness = "zero size ";
1325 else if (sizeof(void*) == 4 && mul.ul >= (1ULL<<32))
1326 badness = "implausibly large";
1328 if (badness) {
1329 static Int complaints = 10;
1330 if (VG_(clo_verbosity) >= 2 && complaints > 0) {
1331 VG_(message)(Vg_DebugMsg, "warning: addVar: %s (%s)\n",
1332 badness, name );
1333 complaints--;
1335 return;
1338 if (!di->varinfo) {
1339 di->varinfo = VG_(newXA)( ML_(dinfo_zalloc),
1340 "di.storage.addVar.1",
1341 ML_(dinfo_free),
1342 sizeof(OSet*) );
1345 vg_assert(level < 256); /* arbitrary; stay sane */
1346 /* Expand the top level array enough to map this level */
1347 while ( VG_(sizeXA)(di->varinfo) <= level ) {
1348 DiAddrRange* nyu;
1349 scope = VG_(OSetGen_Create)( offsetof(DiAddrRange,aMin),
1350 ML_(cmp_for_DiAddrRange_range),
1351 ML_(dinfo_zalloc), "di.storage.addVar.2",
1352 ML_(dinfo_free) );
1353 if (0) VG_(printf)("create: scope = %p, adding at %ld\n",
1354 scope, VG_(sizeXA)(di->varinfo));
1355 VG_(addToXA)( di->varinfo, &scope );
1356 /* Add a single range covering the entire address space. At
1357 level 0 we require this doesn't get split. At levels above 0
1358 we require that any additions to it cause it to get split.
1359 All of these invariants get checked both add_var_to_arange
1360 and after reading is complete, in canonicaliseVarInfo. */
1361 nyu = VG_(OSetGen_AllocNode)( scope, sizeof(DiAddrRange) );
1362 nyu->aMin = (Addr)0;
1363 nyu->aMax = ~(Addr)0;
1364 nyu->vars = VG_(newXA)( ML_(dinfo_zalloc), "di.storage.addVar.3",
1365 ML_(dinfo_free),
1366 sizeof(DiVariable) );
1367 VG_(OSetGen_Insert)( scope, nyu );
1370 vg_assert( VG_(sizeXA)(di->varinfo) > level );
1371 scope = *(OSet**)VG_(indexXA)( di->varinfo, level );
1372 vg_assert(scope);
1374 var.name = name;
1375 var.typeR = typeR;
1376 var.gexpr = gexpr;
1377 var.fbGX = fbGX;
1378 var.fndn_ix = fndn_ix;
1379 var.lineNo = lineNo;
1381 all = aMin == (Addr)0 && aMax == ~(Addr)0;
1382 vg_assert(level == 0 ? all : !all);
1384 add_var_to_arange( /*MOD*/scope, aMin, aMax, &var );
1388 /* This really just checks the constructed data structure, as there is
1389 no canonicalisation to do. */
1390 static void canonicaliseVarInfo ( struct _DebugInfo* di )
1392 Word i, nInThisScope;
1394 if (!di->varinfo)
1395 return;
1397 for (i = 0; i < VG_(sizeXA)(di->varinfo); i++) {
1399 DiAddrRange *range, *rangep;
1400 OSet* scope = *(OSet**)VG_(indexXA)(di->varinfo, i);
1401 if (!scope) continue;
1403 /* Deal with the global-scope case. */
1404 if (i == 0) {
1405 Addr zero = 0;
1406 vg_assert(VG_(OSetGen_Size)( scope ) == 1);
1407 range = VG_(OSetGen_Lookup)( scope, &zero );
1408 vg_assert(range);
1409 vg_assert(range->aMin == (Addr)0);
1410 vg_assert(range->aMax == ~(Addr)0);
1411 continue;
1414 /* All the rest of this is for the local-scope case. */
1415 /* iterate over all entries in 'scope' */
1416 nInThisScope = 0;
1417 rangep = NULL;
1418 VG_(OSetGen_ResetIter)(scope);
1419 while (True) {
1420 range = VG_(OSetGen_Next)(scope);
1421 if (!range) {
1422 /* We just saw the last one. There must have been at
1423 least one entry in the range. */
1424 vg_assert(rangep);
1425 vg_assert(rangep->aMax == ~(Addr)0);
1426 break;
1429 vg_assert(range->aMin <= range->aMax);
1430 vg_assert(range->vars);
1432 if (!rangep) {
1433 /* This is the first entry in the range. */
1434 vg_assert(range->aMin == 0);
1435 } else {
1436 vg_assert(rangep->aMax + 1 == range->aMin);
1439 rangep = range;
1440 nInThisScope++;
1441 } /* iterating over ranges in a given scope */
1443 /* If there's only one entry in this (local) scope, it must
1444 cover the entire address space (obviously), but it must not
1445 contain any vars. */
1447 vg_assert(nInThisScope > 0);
1448 if (nInThisScope == 1) {
1449 Addr zero = 0;
1450 vg_assert(VG_(OSetGen_Size)( scope ) == 1);
1451 range = VG_(OSetGen_Lookup)( scope, &zero );
1452 vg_assert(range);
1453 vg_assert(range->aMin == (Addr)0);
1454 vg_assert(range->aMax == ~(Addr)0);
1455 vg_assert(range->vars);
1456 vg_assert(VG_(sizeXA)(range->vars) == 0);
1459 } /* iterate over scopes */
1463 /*------------------------------------------------------------*/
1464 /*--- Canonicalisers ---*/
1465 /*------------------------------------------------------------*/
1467 /* Sort the symtab by starting address, and emit warnings if any
1468 symbols have overlapping address ranges. We use that old chestnut,
1469 shellsort. Mash the table around so as to establish the property
1470 that addresses are in order and the ranges to not overlap. This
1471 facilitates using binary search to map addresses to symbols when we
1472 come to query the table.
1474 static Int compare_DiSym ( const void* va, const void* vb )
1476 const DiSym* a = va;
1477 const DiSym* b = vb;
1478 if (a->avmas.main < b->avmas.main) return -1;
1479 if (a->avmas.main > b->avmas.main) return 1;
1480 return 0;
1484 /* An address is associated with more than one name. Which do we
1485 prefer as the "display" name (that we show the user in stack
1486 traces)? In order:
1488 - Prefer "PMPI_<foo>" over "MPI_<foo>".
1490 - Else, prefer a non-empty name over an empty one.
1492 - Else, prefer a non-whitespace name over an all-whitespace name.
1494 - Else, prefer the shorter symbol name. If the symbol contains a
1495 version symbol ('@' on Linux, other platforms may differ), which means it
1496 is versioned, then the length up to the version symbol is used for length
1497 comparison purposes (so "foo@GLIBC_2.4.2" is considered shorter than
1498 "foobar").
1500 - Else, if two symbols have the same length, prefer a versioned symbol over
1501 a non-versioned symbol.
1503 - Else, use alphabetical ordering.
1505 - Otherwise, they must be the same; use the name with the lower address.
1507 Very occasionally this goes wrong (eg. 'memcmp' and 'bcmp' are
1508 aliases in glibc, we choose the 'bcmp' symbol because it's shorter,
1509 so we can misdescribe memcmp() as bcmp()). This is hard to avoid.
1510 It's mentioned in the FAQ file.
1512 Returned value is True if a_name is preferred, False if b_name is
1513 preferred.
1515 static
1516 Bool preferName ( const DebugInfo* di,
1517 const HChar* a_name, const HChar* b_name,
1518 Addr sym_avma/*exposition only*/ )
1520 Word cmp;
1521 Word vlena, vlenb; /* length without version */
1522 const HChar *vpa, *vpb;
1524 Bool preferA = False;
1525 Bool preferB = False;
1527 vg_assert(a_name);
1528 vg_assert(b_name);
1529 // vg_assert(a_name != b_name);
1530 // ???? now the pointers can be equal but is that
1531 // ???? not the indication of a latent bug ????
1533 vlena = VG_(strlen)(a_name);
1534 vlenb = VG_(strlen)(b_name);
1536 # if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_freebsd)
1537 # define VERSION_CHAR '@'
1538 # elif defined(VGO_darwin)
1539 # define VERSION_CHAR '$'
1540 # else
1541 # error Unknown OS
1542 # endif
1544 vpa = VG_(strchr)(a_name, VERSION_CHAR);
1545 vpb = VG_(strchr)(b_name, VERSION_CHAR);
1547 # undef VERSION_CHAR
1549 if (vpa)
1550 vlena = vpa - a_name;
1551 if (vpb)
1552 vlenb = vpb - b_name;
1554 /* MPI hack: prefer PMPI_Foo over MPI_Foo */
1555 if (0==VG_(strncmp)(a_name, "MPI_", 4)
1556 && 0==VG_(strncmp)(b_name, "PMPI_", 5)
1557 && 0==VG_(strcmp)(a_name, 1+b_name)) {
1558 preferB = True; goto out;
1560 if (0==VG_(strncmp)(b_name, "MPI_", 4)
1561 && 0==VG_(strncmp)(a_name, "PMPI_", 5)
1562 && 0==VG_(strcmp)(b_name, 1+a_name)) {
1563 preferA = True; goto out;
1566 /* Prefer non-empty name. */
1567 if (vlena && !vlenb) {
1568 preferA = True; goto out;
1570 if (vlenb && !vlena) {
1571 preferB = True; goto out;
1574 /* Prefer non-whitespace name. */
1576 Bool blankA = True;
1577 Bool blankB = True;
1578 const HChar *s;
1579 s = a_name;
1580 while (*s) {
1581 if (!VG_(isspace)(*s++)) {
1582 blankA = False;
1583 break;
1586 s = b_name;
1587 while (*s) {
1588 if (!VG_(isspace)(*s++)) {
1589 blankB = False;
1590 break;
1594 if (!blankA && blankB) {
1595 preferA = True; goto out;
1597 if (!blankB && blankA) {
1598 preferB = True; goto out;
1602 /* Select the shortest unversioned name */
1603 if (vlena < vlenb) {
1604 preferA = True; goto out;
1606 if (vlenb < vlena) {
1607 preferB = True; goto out;
1610 /* Equal lengths; select the versioned name */
1611 if (vpa && !vpb) {
1612 preferA = True; goto out;
1614 if (vpb && !vpa) {
1615 preferB = True; goto out;
1618 /* Either both versioned or neither is versioned; select them
1619 alphabetically */
1620 cmp = VG_(strcmp)(a_name, b_name);
1621 if (cmp < 0) {
1622 preferA = True; goto out;
1624 if (cmp > 0) {
1625 preferB = True; goto out;
1628 /* If we get here, they are the same name. */
1630 /* In this case we could choose either (arbitrarily), but might as
1631 well choose the one with the lowest DiSym* address, so as to try
1632 and make the comparison mechanism more stable (a la sorting
1633 parlance). Also, skip the diagnostic printing in this case. */
1634 return a_name <= b_name ? True : False;
1636 /*NOTREACHED*/
1637 vg_assert(0);
1638 out:
1639 if (preferA && !preferB) {
1640 TRACE_SYMTAB("sym at %#lx: prefer '%s' to '%s'\n",
1641 sym_avma, a_name, b_name );
1642 return True;
1644 if (preferB && !preferA) {
1645 TRACE_SYMTAB("sym at %#lx: prefer '%s' to '%s'\n",
1646 sym_avma, b_name, a_name );
1647 return False;
1649 /*NOTREACHED*/
1650 vg_assert(0);
1654 /* Add the names in FROM to the names in TO. */
1655 static
1656 void add_DiSym_names_to_from ( const DebugInfo* di, DiSym* to,
1657 const DiSym* from )
1659 vg_assert(to->pri_name);
1660 vg_assert(from->pri_name);
1661 /* Figure out how many names there will be in the new combined
1662 secondary vector. */
1663 const HChar** to_sec = to->sec_names;
1664 const HChar** from_sec = from->sec_names;
1665 Word n_new_sec = 1;
1666 if (from_sec) {
1667 while (*from_sec) {
1668 n_new_sec++;
1669 from_sec++;
1672 if (to_sec) {
1673 while (*to_sec) {
1674 n_new_sec++;
1675 to_sec++;
1678 if (0)
1679 TRACE_SYMTAB("merge: -> %ld\n", n_new_sec);
1680 /* Create the new sec and copy stuff into it, putting the new
1681 entries at the end. */
1682 const HChar** new_sec = ML_(dinfo_zalloc)( "di.storage.aDntf.1",
1683 (n_new_sec+1) * sizeof(HChar*) );
1684 from_sec = from->sec_names;
1685 to_sec = to->sec_names;
1686 Word i = 0;
1687 if (to_sec) {
1688 while (*to_sec) {
1689 new_sec[i++] = *to_sec;
1690 to_sec++;
1693 new_sec[i++] = from->pri_name;
1694 if (from_sec) {
1695 while (*from_sec) {
1696 new_sec[i++] = *from_sec;
1697 from_sec++;
1700 vg_assert(i == n_new_sec);
1701 vg_assert(new_sec[i] == NULL);
1702 /* If we're replacing an existing secondary vector, free it. */
1703 if (to->sec_names) {
1704 ML_(dinfo_free)(to->sec_names);
1706 to->sec_names = new_sec;
1710 static void canonicaliseSymtab ( struct _DebugInfo* di )
1712 Word i, j, n_truncated;
1713 Addr sta1, sta2, end1, end2, toc1, toc2;
1714 const HChar *pri1, *pri2, **sec1, **sec2;
1715 Bool ist1, ist2, isf1, isf2, isg1, isg2;
1717 # define SWAP(ty,aa,bb) \
1718 do { ty tt = (aa); (aa) = (bb); (bb) = tt; } while (0)
1720 if (di->symtab_used == 0)
1721 return;
1723 /* Check initial invariants */
1724 for (i = 0; i < di->symtab_used; i++) {
1725 DiSym* sym = &di->symtab[i];
1726 vg_assert(sym->pri_name);
1729 /* Sort by address. */
1730 VG_(ssort)(di->symtab, di->symtab_used,
1731 sizeof(*di->symtab), compare_DiSym);
1733 cleanup_more:
1735 /* BEGIN Detect and "fix" identical address ranges. */
1736 while (1) {
1737 Word r, w, n_merged;
1738 n_merged = 0;
1739 w = 0;
1740 /* A pass merging entries together in the case where they agree
1741 on .isText -- that is, either: both are .isText or both are
1742 not .isText. They are merged into a single entry, but both
1743 sets of names are preserved, so we end up knowing all the
1744 names for that particular address range.*/
1745 for (r = 1; r < di->symtab_used; r++) {
1746 vg_assert(w < r);
1747 if ( di->symtab[w].avmas.main == di->symtab[r].avmas.main
1748 && di->symtab[w].size == di->symtab[r].size
1749 && !!di->symtab[w].isText == !!di->symtab[r].isText) {
1750 /* merge the two into one */
1751 n_merged++;
1752 /* Add r names to w if r has secondary names
1753 or r and w primary names differ. */
1754 if (di->symtab[r].sec_names
1755 || (0 != VG_(strcmp)(di->symtab[r].pri_name,
1756 di->symtab[w].pri_name))) {
1757 add_DiSym_names_to_from(di, &di->symtab[w], &di->symtab[r]);
1759 /* mark w as an IFunc if either w or r are */
1760 di->symtab[w].isIFunc = di->symtab[w].isIFunc || di->symtab[r].isIFunc;
1761 /* likewise for global symbols */
1762 di->symtab[w].isGlobal = di->symtab[w].isGlobal || di->symtab[r].isGlobal;
1763 /* and use ::pri_names to indicate this slot is no longer in use */
1764 di->symtab[r].pri_name = NULL;
1765 if (di->symtab[r].sec_names) {
1766 ML_(dinfo_free)(di->symtab[r].sec_names);
1767 di->symtab[r].sec_names = NULL;
1769 /* Completely zap the entry -- paranoia to make it more
1770 likely we'll notice if we inadvertently use it
1771 again. */
1772 VG_(memset)(&di->symtab[r], 0, sizeof(DiSym));
1773 } else {
1774 w = r;
1778 /* A second pass merging entries together where one .isText but
1779 the other isn't. In such cases, just ignore the non-.isText
1780 one (a heuristic hack.) */
1781 for (r = 1; r < di->symtab_used; r++) {
1782 /* Either of the symbols might already have been zapped by
1783 the previous pass, so we first have to check that. */
1784 if (di->symtab[r-1].pri_name == NULL) continue;
1785 if (di->symtab[r-0].pri_name == NULL) continue;
1786 /* ok, they are both still valid. Identical address ranges? */
1787 if (di->symtab[r-1].avmas.main != di->symtab[r-0].avmas.main) continue;
1788 if (di->symtab[r-1].size != di->symtab[r-0].size) continue;
1789 /* Identical address ranges. They must disagree on .isText
1790 since if they agreed, the previous pass would have merged
1791 them. */
1792 if (di->symtab[r-1].isText && di->symtab[r-0].isText) vg_assert(0);
1793 if (!di->symtab[r-1].isText && !di->symtab[r-0].isText) vg_assert(0);
1794 Word to_zap = di->symtab[r-1].isText ? (r-0) : (r-1);
1795 Word to_keep = di->symtab[r-1].isText ? (r-1) : (r-0);
1796 vg_assert(!di->symtab[to_zap].isText);
1797 vg_assert(di->symtab[to_keep].isText);
1798 /* Add to_zap's names to to_keep if to_zap has secondary names
1799 or to_zap's and to_keep's primary names differ. */
1800 if (di->symtab[to_zap].sec_names
1801 || (0 != VG_(strcmp)(di->symtab[to_zap].pri_name,
1802 di->symtab[to_keep].pri_name))) {
1803 add_DiSym_names_to_from(di, &di->symtab[to_keep],
1804 &di->symtab[to_zap]);
1806 /* Mark to_zap as not-in use in the same way as in the
1807 previous loop. */
1808 di->symtab[to_zap].pri_name = NULL;
1809 if (di->symtab[to_zap].sec_names) {
1810 ML_(dinfo_free)(di->symtab[to_zap].sec_names);
1811 di->symtab[to_zap].sec_names = NULL;
1813 VG_(memset)(&di->symtab[to_zap], 0, sizeof(DiSym));
1814 n_merged++;
1817 TRACE_SYMTAB( "canonicaliseSymtab: %ld symbols merged\n", n_merged);
1818 if (n_merged == 0)
1819 break;
1820 /* Now a pass to squeeze out any unused ones */
1821 w = 0;
1822 for (r = 0; r < di->symtab_used; r++) {
1823 vg_assert(w <= r);
1824 if (di->symtab[r].pri_name == NULL)
1825 continue;
1826 if (w < r) {
1827 di->symtab[w] = di->symtab[r];
1829 w++;
1831 vg_assert(w + n_merged == di->symtab_used);
1832 di->symtab_used = w;
1833 } /* while (1) */
1834 /* END Detect and "fix" identical address ranges. */
1836 /* BEGIN Detect and "fix" overlapping address ranges. */
1837 n_truncated = 0;
1839 for (i = 0; i < ((Word)di->symtab_used) -1; i++) {
1841 vg_assert(di->symtab[i].avmas.main <= di->symtab[i+1].avmas.main);
1843 /* Check for common (no overlap) case. */
1844 if (di->symtab[i].avmas.main + di->symtab[i].size
1845 <= di->symtab[i+1].avmas.main)
1846 continue;
1848 /* There's an overlap. Truncate one or the other. */
1849 if (di->trace_symtab) {
1850 VG_(printf)("overlapping address ranges in symbol table\n\t");
1851 ML_(ppSym)( i, &di->symtab[i] );
1852 VG_(printf)("\t");
1853 ML_(ppSym)( i+1, &di->symtab[i+1] );
1854 VG_(printf)("\n");
1857 /* Truncate one or the other. */
1858 sta1 = di->symtab[i].avmas.main;
1859 end1 = sta1 + di->symtab[i].size - 1;
1860 toc1 = GET_TOCPTR_AVMA(di->symtab[i].avmas);
1861 // aren't we missing local_ep here ????
1862 pri1 = di->symtab[i].pri_name;
1863 sec1 = di->symtab[i].sec_names;
1864 ist1 = di->symtab[i].isText;
1865 isf1 = di->symtab[i].isIFunc;
1866 isg1 = di->symtab[i].isGlobal;
1868 sta2 = di->symtab[i+1].avmas.main;
1869 end2 = sta2 + di->symtab[i+1].size - 1;
1870 toc2 = GET_TOCPTR_AVMA(di->symtab[i+1].avmas);
1871 // aren't we missing local_ep here ????
1872 pri2 = di->symtab[i+1].pri_name;
1873 sec2 = di->symtab[i+1].sec_names;
1874 ist2 = di->symtab[i+1].isText;
1875 isf2 = di->symtab[i+1].isIFunc;
1876 isg2 = di->symtab[i+1].isGlobal;
1878 if (sta1 < sta2) {
1879 end1 = sta2 - 1;
1880 } else {
1881 vg_assert(sta1 == sta2);
1882 if (end1 > end2) {
1883 sta1 = end2 + 1;
1884 SWAP(Addr,sta1,sta2); SWAP(Addr,end1,end2); SWAP(Addr,toc1,toc2);
1885 SWAP(const HChar*,pri1,pri2); SWAP(const HChar**,sec1,sec2);
1886 SWAP(Bool,ist1,ist2); SWAP(Bool,isf1,isf2); SWAP(Bool, isg1, isg2);
1887 } else
1888 if (end1 < end2) {
1889 sta2 = end1 + 1;
1890 } else {
1891 /* end1 == end2. Identical addr ranges. We'll eventually wind
1892 up back at cleanup_more, which will take care of it. */
1895 di->symtab[i].avmas.main = sta1;
1896 di->symtab[i].size = end1 - sta1 + 1;
1897 SET_TOCPTR_AVMA(di->symtab[i].avmas, toc1);
1898 // missing local_ep ???
1899 di->symtab[i].pri_name = pri1;
1900 di->symtab[i].sec_names = sec1;
1901 di->symtab[i].isText = ist1;
1902 di->symtab[i].isIFunc = isf1;
1903 di->symtab[i].isGlobal = isg1;
1905 di->symtab[i+1].avmas.main = sta2;
1906 di->symtab[i+1].size = end2 - sta2 + 1;
1907 SET_TOCPTR_AVMA(di->symtab[i+1].avmas, toc2);
1908 // missing local_ep ???
1909 di->symtab[i+1].pri_name = pri2;
1910 di->symtab[i+1].sec_names = sec2;
1911 di->symtab[i+1].isText = ist2;
1912 di->symtab[i+1].isIFunc = isf2;
1913 di->symtab[i+1].isGlobal = isg2;
1915 vg_assert(sta1 <= sta2);
1916 vg_assert(di->symtab[i].size > 0);
1917 vg_assert(di->symtab[i+1].size > 0);
1918 /* It may be that the i+1 entry now needs to be moved further
1919 along to maintain the address order requirement. */
1920 j = i+1;
1921 while (j < ((Word)di->symtab_used)-1
1922 && di->symtab[j].avmas.main > di->symtab[j+1].avmas.main) {
1923 SWAP(DiSym,di->symtab[j],di->symtab[j+1]);
1924 j++;
1926 n_truncated++;
1928 /* END Detect and "fix" overlapping address ranges. */
1930 if (n_truncated > 0) goto cleanup_more;
1932 /* Ensure relevant postconditions hold. */
1933 for (i = 0; i < ((Word)di->symtab_used)-1; i++) {
1934 /* No zero-sized symbols. */
1935 vg_assert(di->symtab[i].size > 0);
1936 /* In order. */
1937 vg_assert(di->symtab[i].avmas.main < di->symtab[i+1].avmas.main);
1938 /* No overlaps. */
1939 vg_assert(di->symtab[i].avmas.main + di->symtab[i].size - 1
1940 < di->symtab[i+1].avmas.main);
1941 /* Names are sane(ish) */
1942 vg_assert(di->symtab[i].pri_name);
1943 if (di->symtab[i].sec_names) {
1944 vg_assert(di->symtab[i].sec_names[0]);
1948 /* For each symbol that has more than one name, use preferName to
1949 select the primary name. This is a complete kludge in that
1950 doing it properly requires making a total ordering on the
1951 candidate names, whilst what we have to work with is an ad-hoc
1952 binary relation (preferName) that certainly doesn't have the
1953 relevant transitivity etc properties that are needed to induce a
1954 legitimate total order. Doesn't matter though if it doesn't
1955 always work right since this is only used to generate names to
1956 show the user. */
1957 for (i = 0; i < ((Word)di->symtab_used)-1; i++) {
1958 DiSym* sym = &di->symtab[i];
1959 const HChar** sec = sym->sec_names;
1960 if (!sec)
1961 continue;
1962 /* Slow but simple. Copy all the cands into a temp array,
1963 choose the primary name, and copy them all back again. */
1964 Word n_tmp = 1;
1965 while (*sec) { n_tmp++; sec++; }
1966 j = 0;
1967 const HChar** tmp = ML_(dinfo_zalloc)( "di.storage.cS.1",
1968 (n_tmp+1) * sizeof(HChar*) );
1969 tmp[j++] = sym->pri_name;
1970 sec = sym->sec_names;
1971 while (*sec) { tmp[j++] = *sec; sec++; }
1972 vg_assert(j == n_tmp);
1973 vg_assert(tmp[n_tmp] == NULL); /* because of zalloc */
1974 /* Choose the most favoured. */
1975 Word best = 0;
1976 for (j = 1; j < n_tmp; j++) {
1977 if (preferName(di, tmp[best], tmp[j], di->symtab[i].avmas.main)) {
1978 /* best is unchanged */
1979 } else {
1980 best = j;
1983 vg_assert(best >= 0 && best < n_tmp);
1984 /* Copy back */
1985 sym->pri_name = tmp[best];
1986 const HChar** cursor = sym->sec_names;
1987 for (j = 0; j < n_tmp; j++) {
1988 if (j == best)
1989 continue;
1990 *cursor = tmp[j];
1991 cursor++;
1993 vg_assert(*cursor == NULL);
1994 ML_(dinfo_free)( tmp );
1997 # undef SWAP
2001 static DiLoc* sorting_loctab = NULL;
2002 static Int compare_DiLoc_via_ix ( const void* va, const void* vb )
2004 const DiLoc* a = &sorting_loctab[*(const UInt*)va];
2005 const DiLoc* b = &sorting_loctab[*(const UInt*)vb];
2006 if (a->addr < b->addr) return -1;
2007 if (a->addr > b->addr) return 1;
2008 return 0;
2010 static void sort_loctab_and_loctab_fndn_ix (struct _DebugInfo* di )
2012 /* We have to sort the array loctab by addr
2013 together with its "parallel" array loctab_fndn_ix.
2014 We first build sort_ix : an array of indexes in loctab,
2015 that we sort by loctab address. Then we can reorder both
2016 arrays according to sort_ix. */
2017 UInt *sort_ix = ML_(dinfo_zalloc)("di.storage.six",
2018 di->loctab_used*sizeof(UInt));
2019 Word i, j, k;
2021 for (i = 0; i < di->loctab_used; i++) sort_ix[i] = i;
2022 sorting_loctab = di->loctab;
2023 VG_(ssort)(sort_ix, di->loctab_used,
2024 sizeof(*sort_ix), compare_DiLoc_via_ix);
2025 sorting_loctab = NULL;
2027 // Permute in place, using the sort_ix.
2028 for (i=0; i < di->loctab_used; i++) {
2029 DiLoc tmp_diloc;
2030 UInt tmp_fndn_ix;
2032 if (i == sort_ix[i])
2033 continue; // i already at the good place
2035 tmp_diloc = di->loctab[i];
2036 tmp_fndn_ix = ML_(fndn_ix)(di, i);
2037 j = i;
2038 for (;;) {
2039 k = sort_ix[j];
2040 sort_ix[j] = j;
2041 if (k == i)
2042 break;
2043 di->loctab[j] = di->loctab[k];
2044 set_fndn_ix (di, j, ML_(fndn_ix)(di, k));
2045 j = k;
2047 di->loctab[j] = tmp_diloc;
2048 set_fndn_ix (di, j, tmp_fndn_ix);
2050 ML_(dinfo_free)(sort_ix);
2053 /* Sort the location table by starting address. Mash the table around
2054 so as to establish the property that addresses are in order and the
2055 ranges do not overlap. This facilitates using binary search to map
2056 addresses to locations when we come to query the table.
2058 static void canonicaliseLoctab ( struct _DebugInfo* di )
2060 Word i, j;
2062 if (di->loctab_used == 0)
2063 return;
2065 /* sort loctab and loctab_fndn_ix by addr. */
2066 sort_loctab_and_loctab_fndn_ix (di);
2068 for (i = 0; i < ((Word)di->loctab_used)-1; i++) {
2069 /* If two adjacent entries overlap, truncate the first. */
2070 if (di->loctab[i].addr + di->loctab[i].size > di->loctab[i+1].addr) {
2071 /* Do this in signed int32 because the actual .size fields
2072 are only 12 bits. */
2073 Int new_size = di->loctab[i+1].addr - di->loctab[i].addr;
2074 TRACE_LOCTAB_CANON ("Truncating",
2075 &(di->loctab[i]), &(di->loctab[i+1]));
2076 if (new_size < 0) {
2077 di->loctab[i].size = 0;
2078 } else
2079 if (new_size > MAX_LOC_SIZE) {
2080 di->loctab[i].size = MAX_LOC_SIZE;
2081 } else {
2082 di->loctab[i].size = (UShort)new_size;
2087 /* Zap any zero-sized entries resulting from the truncation
2088 process. */
2089 j = 0;
2090 for (i = 0; i < (Word)di->loctab_used; i++) {
2091 if (di->loctab[i].size > 0) {
2092 if (j != i) {
2093 di->loctab[j] = di->loctab[i];
2094 set_fndn_ix(di, j, ML_(fndn_ix)(di, i));
2096 j++;
2099 di->loctab_used = j;
2101 /* Ensure relevant postconditions hold. */
2102 for (i = 0; i < ((Word)di->loctab_used)-1; i++) {
2103 if (0)
2104 VG_(printf)("%ld 0x%p lno:%d sz:%d fndn_ix:%u i+1 0x%p\n",
2106 (void*)di->loctab[i].addr,
2107 di->loctab[i].lineno,
2108 di->loctab[i].size,
2109 ML_(fndn_ix)(di, i),
2110 (void*)di->loctab[i+1].addr);
2112 /* No zero-sized symbols. */
2113 vg_assert(di->loctab[i].size > 0);
2114 /* In order. */
2115 vg_assert(di->loctab[i].addr < di->loctab[i+1].addr);
2116 /* No overlaps. */
2117 vg_assert(di->loctab[i].addr + di->loctab[i].size - 1
2118 < di->loctab[i+1].addr);
2121 /* Free up unused space at the end of the table. */
2122 shrinkLocTab(di);
2125 /* Sort the inlined call table by starting address. Mash the table around
2126 so as to establish the property that addresses are in order.
2127 This facilitates using binary search to map addresses to locations when
2128 we come to query the table.
2129 Note : ranges can overlap, multiple ranges can start at an address,
2130 multiple ranges can end at an address.
2132 static Int compare_DiInlLoc ( const void* va, const void* vb )
2134 const DiInlLoc* a = va;
2135 const DiInlLoc* b = vb;
2136 if (a->addr_lo < b->addr_lo) return -1;
2137 if (a->addr_lo > b->addr_lo) return 1;
2138 return 0;
2141 static void canonicaliseInltab ( struct _DebugInfo* di )
2143 Word i;
2145 if (di->inltab_used == 0)
2146 return;
2148 /* Sort by start address. */
2149 VG_(ssort)(di->inltab, di->inltab_used,
2150 sizeof(*di->inltab), compare_DiInlLoc);
2152 /* Ensure relevant postconditions hold. */
2153 for (i = 0; i < ((Word)di->inltab_used)-1; i++) {
2154 /* No zero-sized inlined call. */
2155 vg_assert(di->inltab[i].addr_lo < di->inltab[i].addr_hi);
2156 /* In order, but we can have duplicates and overlapping ranges. */
2157 vg_assert(di->inltab[i].addr_lo <= di->inltab[i+1].addr_lo);
2160 /* Free up unused space at the end of the table. */
2161 shrinkInlTab(di);
2165 /* Sort the call-frame-info cfsi_rd by starting address. Mash the table
2166 around so as to establish the property that addresses are in order
2167 and the ranges do not overlap. This facilitates using binary
2168 search to map addresses to locations when we come to query the
2169 table.
2171 Also, set cfisi_minaddr and cfisi_maxaddr to be the min and max of
2172 any of the address ranges contained in cfisi[0 .. cfisi_used-1], so
2173 as to facilitate rapidly skipping this SegInfo when looking for an
2174 address which falls outside that range.
2176 static Int compare_DiCfSI ( const void* va, const void* vb )
2178 const DiCfSI* a = va;
2179 const DiCfSI* b = vb;
2180 if (a->base < b->base) return -1;
2181 if (a->base > b->base) return 1;
2182 return 0;
2185 static void get_cfsi_rd_stats ( const DebugInfo* di,
2186 UWord *n_mergeables, UWord *n_holes )
2188 Word i;
2190 *n_mergeables = 0;
2191 *n_holes = 0;
2193 vg_assert (di->cfsi_used == 0 || di->cfsi_rd);
2194 for (i = 1; i < (Word)di->cfsi_used; i++) {
2195 Addr here_min = di->cfsi_rd[i].base;
2196 Addr prev_max = di->cfsi_rd[i-1].base + di->cfsi_rd[i-1].len - 1;
2197 Addr sep = here_min - prev_max;
2198 if (sep > 1)
2199 (*n_holes)++;
2200 if (sep == 1 && di->cfsi_rd[i-1].cfsi_m_ix == di->cfsi_rd[i].cfsi_m_ix)
2201 (*n_mergeables)++;
2205 void ML_(canonicaliseCFI) ( struct _DebugInfo* di )
2207 Word i, j;
2208 const Addr minAvma = 0;
2209 const Addr maxAvma = ~minAvma;
2211 /* Note: take care in here. di->cfsi can be NULL, in which
2212 case _used and _size fields will be zero. */
2213 if (di->cfsi_rd == NULL) {
2214 vg_assert(di->cfsi_used == 0);
2215 vg_assert(di->cfsi_size == 0);
2216 vg_assert(di->cfsi_m_pool == NULL);
2217 } else {
2218 vg_assert(di->cfsi_size != 0);
2219 vg_assert(di->cfsi_m_pool != NULL);
2222 /* Set cfsi_minavma and cfsi_maxavma to summarise the entire
2223 address range contained in cfsi_rd[0 .. cfsi_used-1]. */
2224 di->cfsi_minavma = maxAvma;
2225 di->cfsi_maxavma = minAvma;
2226 for (i = 0; i < (Word)di->cfsi_used; i++) {
2227 Addr here_min = di->cfsi_rd[i].base;
2228 Addr here_max = di->cfsi_rd[i].base + di->cfsi_rd[i].len - 1;
2229 if (here_min < di->cfsi_minavma)
2230 di->cfsi_minavma = here_min;
2231 if (here_max > di->cfsi_maxavma)
2232 di->cfsi_maxavma = here_max;
2235 if (di->trace_cfi)
2236 VG_(printf)("canonicaliseCfiSI: %lu entries, %#lx .. %#lx\n",
2237 di->cfsi_used,
2238 di->cfsi_minavma, di->cfsi_maxavma);
2240 /* Sort the cfsi_rd array by base address. */
2241 VG_(ssort)(di->cfsi_rd, di->cfsi_used, sizeof(*di->cfsi_rd), compare_DiCfSI);
2243 /* If two adjacent entries overlap, truncate the first. */
2244 for (i = 0; i < (Word)di->cfsi_used-1; i++) {
2245 if (di->cfsi_rd[i].base + di->cfsi_rd[i].len > di->cfsi_rd[i+1].base) {
2246 Word new_len = di->cfsi_rd[i+1].base - di->cfsi_rd[i].base;
2247 /* how could it be otherwise? The entries are sorted by the
2248 .base field. */
2249 vg_assert(new_len >= 0);
2250 vg_assert(new_len <= di->cfsi_rd[i].len);
2251 di->cfsi_rd[i].len = new_len;
2255 /* Zap any zero-sized entries resulting from the truncation
2256 process. */
2257 j = 0;
2258 for (i = 0; i < (Word)di->cfsi_used; i++) {
2259 if (di->cfsi_rd[i].len > 0) {
2260 if (j != i)
2261 di->cfsi_rd[j] = di->cfsi_rd[i];
2262 j++;
2265 /* VG_(printf)("XXXXXXXXXXXXX %d %d\n", di->cfsi_used, j); */
2266 di->cfsi_used = j;
2268 /* Ensure relevant postconditions hold. */
2269 for (i = 0; i < (Word)di->cfsi_used; i++) {
2270 /* No zero-length ranges. */
2271 vg_assert(di->cfsi_rd[i].len > 0);
2272 /* Makes sense w.r.t. summary address range */
2273 vg_assert(di->cfsi_rd[i].base >= di->cfsi_minavma);
2274 vg_assert(di->cfsi_rd[i].base + di->cfsi_rd[i].len - 1
2275 <= di->cfsi_maxavma);
2277 if (i < di->cfsi_used - 1) {
2279 if (!(di->cfsi[i].base < di->cfsi[i+1].base)) {
2280 VG_(printf)("\nOOO cfsis:\n");
2281 ML_(ppCfiSI)(&di->cfsi[i]);
2282 ML_(ppCfiSI)(&di->cfsi[i+1]);
2285 /* In order. */
2286 vg_assert(di->cfsi_rd[i].base < di->cfsi_rd[i+1].base);
2287 /* No overlaps. */
2288 vg_assert(di->cfsi_rd[i].base + di->cfsi_rd[i].len - 1
2289 < di->cfsi_rd[i+1].base);
2293 if (VG_(clo_stats) && VG_(clo_verbosity) >= 3) {
2294 UWord n_mergeables, n_holes;
2295 get_cfsi_rd_stats (di, &n_mergeables, &n_holes);
2296 VG_(dmsg)("CFSI total %lu mergeables %lu holes %lu uniq cfsi_m %u\n",
2297 di->cfsi_used, n_mergeables, n_holes,
2298 di->cfsi_m_pool ? VG_(sizeDedupPA) (di->cfsi_m_pool) : 0);
2302 void ML_(finish_CFSI_arrays) ( struct _DebugInfo* di )
2304 UWord n_mergeables, n_holes;
2305 UWord new_used;
2306 UWord i;
2307 UWord pos;
2308 UWord f_mergeables, f_holes;
2309 UInt sz_cfsi_m_pool;
2311 get_cfsi_rd_stats (di, &n_mergeables, &n_holes);
2313 if (di->cfsi_used == 0) {
2314 vg_assert (di->cfsi_rd == NULL);
2315 vg_assert (di->cfsi_m_pool == NULL);
2316 vg_assert (n_mergeables == 0);
2317 vg_assert (n_holes == 0);
2318 return;
2321 vg_assert (di->cfsi_used > n_mergeables);
2322 new_used = di->cfsi_used - n_mergeables + n_holes;
2324 sz_cfsi_m_pool = VG_(sizeDedupPA)(di->cfsi_m_pool);
2325 vg_assert (sz_cfsi_m_pool > 0);
2326 if (sz_cfsi_m_pool <= 255)
2327 di->sizeof_cfsi_m_ix = 1;
2328 else if (sz_cfsi_m_pool <= 65535)
2329 di->sizeof_cfsi_m_ix = 2;
2330 else
2331 di->sizeof_cfsi_m_ix = 4;
2333 di->cfsi_base = ML_(dinfo_zalloc)( "di.storage.finCfSI.1",
2334 new_used * sizeof(Addr) );
2335 di->cfsi_m_ix = ML_(dinfo_zalloc)( "di.storage.finCfSI.2",
2336 new_used * sizeof(UChar)*di->sizeof_cfsi_m_ix);
2338 pos = 0;
2339 f_mergeables = 0;
2340 f_holes = 0;
2341 for (i = 0; i < (Word)di->cfsi_used; i++) {
2342 if (i > 0) {
2343 Addr here_min = di->cfsi_rd[i].base;
2344 Addr prev_max = di->cfsi_rd[i-1].base + di->cfsi_rd[i-1].len - 1;
2345 SizeT sep = here_min - prev_max;
2347 // Skip a mergeable entry.
2348 if (sep == 1) {
2349 if (di->cfsi_rd[i-1].cfsi_m_ix == di->cfsi_rd[i].cfsi_m_ix) {
2350 f_mergeables++;
2351 continue;
2354 // Insert a hole if needed.
2355 if (sep > 1) {
2356 f_holes++;
2357 di->cfsi_base[pos] = prev_max + 1;
2358 switch (di->sizeof_cfsi_m_ix) {
2359 case 1: ((UChar*) di->cfsi_m_ix)[pos] = 0; break;
2360 case 2: ((UShort*)di->cfsi_m_ix)[pos] = 0; break;
2361 case 4: ((UInt*) di->cfsi_m_ix)[pos] = 0; break;
2362 default: vg_assert(0);
2364 pos++;
2368 // Insert the cfsi entry i.
2369 di->cfsi_base[pos] = di->cfsi_rd[i].base;
2370 switch (di->sizeof_cfsi_m_ix) {
2371 case 1: ((UChar*) di->cfsi_m_ix)[pos] = di->cfsi_rd[i].cfsi_m_ix; break;
2372 case 2: ((UShort*)di->cfsi_m_ix)[pos] = di->cfsi_rd[i].cfsi_m_ix; break;
2373 case 4: ((UInt*) di->cfsi_m_ix)[pos] = di->cfsi_rd[i].cfsi_m_ix; break;
2374 default: vg_assert(0);
2376 pos++;
2379 vg_assert (f_mergeables == n_mergeables);
2380 vg_assert (f_holes == n_holes);
2381 vg_assert (pos == new_used);
2383 if (di->deferred)
2384 return;
2386 di->cfsi_used = new_used;
2387 di->cfsi_size = new_used;
2388 ML_(dinfo_free) (di->cfsi_rd);
2389 di->cfsi_rd = NULL;
2393 /* Canonicalise the tables held by 'di', in preparation for use. Call
2394 this after finishing adding entries to these tables. */
2395 void ML_(canonicaliseTables) ( struct _DebugInfo* di )
2397 canonicaliseSymtab ( di );
2398 canonicaliseLoctab ( di );
2399 canonicaliseInltab ( di );
2400 ML_(canonicaliseCFI) ( di );
2401 canonicaliseVarInfo ( di );
2403 if (di->deferred)
2404 return;
2406 if (di->cfsi_m_pool)
2407 VG_(freezeDedupPA) (di->cfsi_m_pool, ML_(dinfo_shrink_block));
2408 if (di->strpool)
2409 VG_(freezeDedupPA) (di->strpool, ML_(dinfo_shrink_block));
2410 if (di->fndnpool)
2411 VG_(freezeDedupPA) (di->fndnpool, ML_(dinfo_shrink_block));
2415 /*------------------------------------------------------------*/
2416 /*--- Searching the tables ---*/
2417 /*------------------------------------------------------------*/
2419 /* Find a symbol-table index containing the specified pointer, or -1
2420 if not found. Binary search. */
2422 Word ML_(search_one_symtab) ( DebugInfo* di, Addr ptr,
2423 Bool findText )
2425 VG_(di_load_di)(di);
2426 Addr a_mid_lo, a_mid_hi;
2427 Word mid,
2428 lo = 0,
2429 hi = di->symtab_used-1;
2430 while (True) {
2431 /* current unsearched space is from lo to hi, inclusive. */
2432 if (lo > hi) return -1; /* not found */
2433 mid = (lo + hi) / 2;
2434 a_mid_lo = di->symtab[mid].avmas.main;
2435 a_mid_hi = ((Addr)di->symtab[mid].avmas.main) + di->symtab[mid].size - 1;
2437 if (ptr < a_mid_lo) { hi = mid-1; continue; }
2438 if (ptr > a_mid_hi) { lo = mid+1; continue; }
2439 vg_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
2440 /* Found a symbol with the correct address range. But is it
2441 of the right kind (text vs data) ? */
2442 if ( findText && di->symtab[mid].isText ) return mid;
2443 if ( (!findText) && (!di->symtab[mid].isText) ) return mid;
2444 return -1;
2449 /* Find a location-table index containing the specified pointer, or -1
2450 if not found. Binary search. */
2452 Word ML_(search_one_loctab) ( DebugInfo* di, Addr ptr )
2454 VG_(di_load_di)(di);
2455 Addr a_mid_lo, a_mid_hi;
2456 Word mid,
2457 lo = 0,
2458 hi = di->loctab_used-1;
2459 while (True) {
2460 /* current unsearched space is from lo to hi, inclusive. */
2461 if (lo > hi) return -1; /* not found */
2462 mid = (lo + hi) / 2;
2463 a_mid_lo = di->loctab[mid].addr;
2464 a_mid_hi = ((Addr)di->loctab[mid].addr) + di->loctab[mid].size - 1;
2466 if (ptr < a_mid_lo) { hi = mid-1; continue; }
2467 if (ptr > a_mid_hi) { lo = mid+1; continue; }
2468 vg_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
2469 return mid;
2474 /* Find a CFI-table index containing the specified pointer, or -1
2475 if not found. Binary search. */
2477 Word ML_(search_one_cfitab) ( DebugInfo* di, Addr ptr )
2479 VG_(di_load_di)(di);
2480 Word mid,
2481 lo = 0,
2482 hi = di->cfsi_used-1;
2484 while (lo <= hi) {
2485 /* Invariants : hi == cfsi_used-1 || ptr < cfsi_base[hi+1]
2486 lo == 0 || ptr > cfsi_base[lo-1]
2487 (the first part of the invariants is similar to considering
2488 that cfsi_base[-1] is 0 and cfsi_base[cfsi_used] is ~0) */
2489 mid = (lo + hi) / 2;
2490 if (ptr < di->cfsi_base[mid]) { hi = mid-1; continue; }
2491 if (ptr > di->cfsi_base[mid]) { lo = mid+1; continue; }
2492 lo = mid+1; break;
2495 #if 0
2496 for (mid = 0; mid <= di->cfsi_used-1; mid++)
2497 if (ptr < di->cfsi_base[mid])
2498 break;
2499 vg_assert (lo - 1 == mid - 1);
2500 #endif
2501 return lo - 1;
2505 /* Find a FPO-table index containing the specified pointer, or -1
2506 if not found. Binary search. */
2508 Word ML_(search_one_fpotab) ( const DebugInfo* di, Addr ptr )
2510 Addr const addr = ptr - di->fpo_base_avma;
2511 Addr a_mid_lo, a_mid_hi;
2512 Word mid, size,
2513 lo = 0,
2514 hi = di->fpo_size-1;
2515 while (True) {
2516 /* current unsearched space is from lo to hi, inclusive. */
2517 if (lo > hi) return -1; /* not found */
2518 mid = (lo + hi) / 2;
2519 a_mid_lo = di->fpo[mid].ulOffStart;
2520 size = di->fpo[mid].cbProcSize;
2521 a_mid_hi = a_mid_lo + size - 1;
2522 vg_assert(a_mid_hi >= a_mid_lo);
2523 if (addr < a_mid_lo) { hi = mid-1; continue; }
2524 if (addr > a_mid_hi) { lo = mid+1; continue; }
2525 vg_assert(addr >= a_mid_lo && addr <= a_mid_hi);
2526 return mid;
2530 /*--------------------------------------------------------------------*/
2531 /*--- end ---*/
2532 /*--------------------------------------------------------------------*/