1 /* Copyright (C) 2021-2023 Free Software Foundation, Inc.
4 This file is part of GNU Binutils.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
23 * incorporating former "loadobjects" into more general "map"
24 * (including code and data segments and dynamic functions)
34 #include <sys/param.h>
38 #include "collector.h"
39 #include "gp-experiment.h"
43 * These are obsolete and unreliable.
44 * They are included here only for historical compatibility.
46 #define MA_SHARED 0x08 /* changes are shared by mapped object */
47 #define MA_ANON 0x40 /* anonymous memory (e.g. /dev/zero) */
48 #define MA_ISM 0x80 /* intimate shared mem (shared MMU resources) */
49 #define MA_BREAK 0x10 /* grown by brk(2) */
50 #define MA_STACK 0x20 /* grown automatically on stack faults */
52 typedef struct prmap_t
54 unsigned long pr_vaddr
; /* virtual address of mapping */
55 unsigned long pr_size
; /* size of mapping in bytes */
56 char *pr_mapname
; /* name in /proc/<pid>/object */
57 int pr_mflags
; /* protection and attribute flags (see below) */
58 unsigned long pr_offset
; /* offset into mapped object, if any */
61 int pr_pagesize
; /* pagesize (bytes) for this mapping */
64 typedef struct MapInfo
69 char *mapname
; /* name in /proc/<pid>/object */
76 typedef struct NameInfo
78 struct NameInfo
*next
;
80 char filename
[1]; /* dynamic length file name */
83 static NameInfo
*namemaps
= NULL
;
84 static MapInfo mmaps
; /* current memory maps */
85 static struct DataHandle
*map_hndl
= NULL
;
86 static char dyntext_fname
[MAXPATHLEN
];
87 static void *mapcache
= NULL
;
88 static char *maptext
= NULL
;
89 static size_t maptext_sz
= 4096; /* initial buffer size */
90 static int mmap_mode
= 0;
91 static int mmap_initted
= 0;
92 static collector_mutex_t map_lock
= COLLECTOR_MUTEX_INITIALIZER
;
93 static collector_mutex_t dyntext_lock
= COLLECTOR_MUTEX_INITIALIZER
;
95 /* a reentrance guard for the interposition functions ensures that updates to
96 the map cache/file are sequential, with the first doing the final update */
97 static int reentrance
= 0;
98 #define CHCK_REENTRANCE (reentrance || mmap_mode <= 0)
99 #define CURR_REENTRANCE reentrance
100 #define PUSH_REENTRANCE reentrance++
101 #define POP_REENTRANCE reentrance--
103 /* interposition function handles */
104 static void *(*__real_mmap
)(void* start
, size_t length
, int prot
, int flags
,
105 int fd
, off_t offset
) = NULL
;
106 static void *(*__real_mmap64
)(void* start
, size_t length
, int prot
, int flags
,
107 int fd
, off64_t offset
) = NULL
;
108 static int (*__real_munmap
)(void* start
, size_t length
) = NULL
;
109 static void *(*__real_dlopen
)(const char* pathname
, int mode
) = NULL
;
110 static void *(*__real_dlopen_2_34
)(const char* pathname
, int mode
) = NULL
;
111 static void *(*__real_dlopen_2_17
)(const char* pathname
, int mode
) = NULL
;
112 static void *(*__real_dlopen_2_2_5
)(const char* pathname
, int mode
) = NULL
;
113 static void *(*__real_dlopen_2_1
)(const char* pathname
, int mode
) = NULL
;
114 static void *(*__real_dlopen_2_0
)(const char* pathname
, int mode
) = NULL
;
116 static int (*__real_dlclose
)(void* handle
) = NULL
;
117 static int (*__real_dlclose_2_34
)(void* handle
) = NULL
;
118 static int (*__real_dlclose_2_17
)(void* handle
) = NULL
;
119 static int (*__real_dlclose_2_2_5
)(void* handle
) = NULL
;
120 static int (*__real_dlclose_2_0
)(void* handle
) = NULL
;
121 static void (*collector_heap_record
)(int, size_t, void*) = NULL
;
123 /* internal function prototypes */
124 static int init_mmap_intf ();
125 static int init_mmap_files ();
126 static void append_segment_record (char *format
, ...);
127 static void update_map_segments (hrtime_t hrt
, int resolve
);
128 static void resolve_mapname (MapInfo
*map
, char *name
);
129 static void record_segment_map (hrtime_t timestamp
, uint64_t loadaddr
,
130 unsigned long msize
, int pagesize
, int modeflags
,
131 long long offset
, unsigned check
, char *name
);
132 static void record_segment_unmap (hrtime_t timestamp
, uint64_t loadaddr
);
134 /* Linux needs handling of the vsyscall page to get its data into the map.xml file */
135 static void process_vsyscall_page ();
137 #define MAXVSYSFUNCS 10
138 static int nvsysfuncs
= 0;
139 static char *sysfuncname
[MAXVSYSFUNCS
];
140 static uint64_t sysfuncvaddr
[MAXVSYSFUNCS
];
141 static unsigned long sysfuncsize
[MAXVSYSFUNCS
];
145 static char *dynname
[MAXDYN
];
146 static void *dynvaddr
[MAXDYN
];
147 static unsigned dynsize
[MAXDYN
];
148 static char *dynfuncname
[MAXDYN
];
150 /*===================================================================*/
153 * void __collector_mmap_init_mutex_locks()
154 * Iinitialize mmap mutex locks.
157 __collector_mmap_init_mutex_locks ()
159 __collector_mutex_init (&map_lock
);
160 __collector_mutex_init (&dyntext_lock
);
163 /* __collector_ext_update_map_segments called by the audit agent
164 * Is is also called by dbx/collector when a (possible) map update
165 * is intimated, such as after dlopen/dlclose.
166 * Required when libcollector.so is not preloaded and interpositions inactive.
169 __collector_ext_update_map_segments (void)
173 TprintfT (0, "__collector_ext_update_map_segments(%d)\n", CURR_REENTRANCE
);
177 update_map_segments (GETRELTIME (), 1);
182 * int __collector_ext_mmap_install()
183 * Install and initialise mmap tracing.
186 __collector_ext_mmap_install (int record
)
188 TprintfT (0, "__collector_ext_mmap_install(mmap_mode=%d)\n", mmap_mode
);
191 if (init_mmap_intf ())
193 TprintfT (0, "ERROR: collector mmap tracing initialization failed.\n");
194 return COL_ERROR_EXPOPEN
;
198 TprintfT (DBG_LT2
, "collector mmap tracing: mmap pointer not null\n");
200 /* Initialize side door interface with the heap tracing module */
201 collector_heap_record
= (void(*)(int, size_t, void*))dlsym (RTLD_DEFAULT
, "__collector_heap_record");
204 map_hndl
= __collector_create_handle (SP_MAP_FILE
);
205 if (map_hndl
== NULL
)
206 return COL_ERROR_MAPOPEN
;
207 if (init_mmap_files ())
209 TprintfT (0, "ERROR: collector init_mmap_files() failed.\n");
210 return COL_ERROR_EXPOPEN
;
216 update_map_segments (GETRELTIME (), 1); // initial map
220 process_vsyscall_page ();
221 return COL_ERROR_NONE
;
225 * int __collector_ext_mmap_deinstall()
226 * Optionally update final map and stop tracing mmap events.
229 __collector_ext_mmap_deinstall (int update
)
232 return COL_ERROR_NONE
;
238 update_map_segments (GETRELTIME (), 1);
241 TprintfT (0, "__collector_ext_mmap_deinstall(%d)\n", update
);
242 if (map_hndl
!= NULL
)
244 __collector_delete_handle (map_hndl
);
247 __collector_mutex_lock (&map_lock
); // get lock before resetting
249 /* Free all memory maps */
251 for (mp
= mmaps
.next
; mp
;)
253 MapInfo
*next
= mp
->next
;
254 __collector_freeCSize (__collector_heap
, mp
, sizeof (*mp
));
259 /* Free all name maps */
261 for (np
= namemaps
; np
;)
263 NameInfo
*next
= np
->next
;
264 __collector_freeCSize (__collector_heap
, np
, sizeof (*np
) + __collector_strlen (np
->filename
));
268 mapcache
= __collector_reallocVSize (__collector_heap
, mapcache
, 0);
271 __collector_mutex_unlock (&map_lock
);
272 TprintfT (0, "__collector_ext_mmap_deinstall done\n");
277 * void __collector_mmap_fork_child_cleanup()
278 * Perform all necessary cleanup steps in child process after fork().
281 __collector_mmap_fork_child_cleanup ()
283 /* Initialize all mmap "mutex" locks */
284 __collector_mmap_init_mutex_locks ();
288 __collector_delete_handle (map_hndl
);
289 __collector_mutex_lock (&map_lock
); // get lock before resetting
291 /* Free all memory maps */
293 for (mp
= mmaps
.next
; mp
;)
295 MapInfo
*next
= mp
->next
;
296 __collector_freeCSize (__collector_heap
, mp
, sizeof (*mp
));
301 /* Free all name maps */
303 for (np
= namemaps
; np
;)
305 NameInfo
*next
= np
->next
;
306 __collector_freeCSize (__collector_heap
, np
, sizeof (*np
) + __collector_strlen (np
->filename
));
310 mapcache
= __collector_reallocVSize (__collector_heap
, mapcache
, 0);
313 __collector_mutex_unlock (&map_lock
);
319 TprintfT (DBG_LT2
, "init_mmap_files\n");
320 /* also create the headerless dyntext file (if required) */
321 CALL_UTIL (snprintf
)(dyntext_fname
, sizeof (dyntext_fname
), "%s/%s",
322 __collector_exp_dir_name
, SP_DYNTEXT_FILE
);
323 if (CALL_UTIL (access
)(dyntext_fname
, F_OK
) != 0)
325 int fd
= CALL_UTIL (open
)(dyntext_fname
, O_RDWR
| O_CREAT
| O_TRUNC
,
326 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IROTH
);
330 TprintfT (0, "ERROR: init_mmap_files: open(%s) failed\n",
332 __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">%s: %s</event>\n",
333 SP_JCMD_CERROR
, COL_ERROR_DYNOPEN
, errno
,
334 dyntext_fname
, errmsg
);
335 return COL_ERROR_DYNOPEN
;
338 CALL_UTIL (close
)(fd
);
340 return COL_ERROR_NONE
;
344 append_segment_record (char *format
, ...)
349 va_start (va
, format
);
350 int sz
= __collector_xml_vsnprintf (bufptr
, sizeof (buf
), format
, va
);
353 if (__collector_expstate
!= EXP_OPEN
&& __collector_expstate
!= EXP_PAUSED
)
355 TprintfT (0, "append_segment_record: expt neither open nor paused (%d); "
356 "not writing to map.xml\n\t%s", __collector_expstate
, buf
);
359 if (sz
>= sizeof (buf
))
361 /* Allocate a new buffer */
362 sz
+= 1; /* add the terminating null byte */
363 bufptr
= (char*) alloca (sz
);
364 va_start (va
, format
);
365 sz
= __collector_xml_vsnprintf (bufptr
, sz
, format
, va
);
368 int rc
= __collector_write_string (map_hndl
, bufptr
, sz
);
370 (void) __collector_log_write ("<event kind=\"%s\" id=\"%d\"></event>\n",
371 SP_JCMD_CERROR
, COL_ERROR_MAPWRITE
);
375 record_segment_map (hrtime_t timestamp
, uint64_t loadaddr
, unsigned long msize
,
376 int pagesize
, int modeflags
, long long offset
,
377 unsigned check
, char *name
)
380 TprintfT (DBG_LT2
, "record_segment_map(%s @ 0x%llx)\n", name
, (long long) loadaddr
);
381 append_segment_record ("<event kind=\"map\" object=\"segment\" tstamp=\"%u.%09u\" "
382 "vaddr=\"0x%016llX\" size=\"%lu\" pagesz=\"%d\" foffset=\"%c0x%08llX\" "
383 "modes=\"0x%03X\" chksum=\"0x%0X\" name=\"%s\"/>\n",
384 (unsigned) (timestamp
/ NANOSEC
),
385 (unsigned) (timestamp
% NANOSEC
),
386 loadaddr
, msize
, pagesize
,
387 offset
< 0 ? '-' : '+', offset
< 0 ? -offset
: offset
,
388 modeflags
, check
, name
);
392 record_segment_unmap (hrtime_t timestamp
, uint64_t loadaddr
)
394 TprintfT (DBG_LT2
, "record_segment_unmap(@ 0x%llx)\n", (long long) loadaddr
);
395 append_segment_record ("<event kind=\"unmap\" tstamp=\"%u.%09u\" vaddr=\"0x%016llX\"/>\n",
396 (unsigned) (timestamp
/ NANOSEC
),
397 (unsigned) (timestamp
% NANOSEC
), loadaddr
);
401 #define ELF_EHDR Elf64_Ehdr
402 #define ELF_PHDR Elf64_Phdr
403 #define ELF_SHDR Elf64_Shdr
404 #define ELF_DYN Elf64_Dyn
405 #define ELF_AUX Elf64_auxv_t
406 #define ELF_SYM Elf64_Sym
407 #define ELF_ST_BIND ELF64_ST_BIND
408 #define ELF_ST_TYPE ELF64_ST_TYPE
410 #define ELF_EHDR Elf32_Ehdr
411 #define ELF_PHDR Elf32_Phdr
412 #define ELF_SHDR Elf32_Shdr
413 #define ELF_DYN Elf32_Dyn
414 #define ELF_AUX Elf32_auxv_t
415 #define ELF_SYM Elf32_Sym
416 #define ELF_ST_BIND ELF32_ST_BIND
417 #define ELF_ST_TYPE ELF32_ST_TYPE
421 checksum_mapname (MapInfo
* map
)
423 unsigned checksum
= 0;
424 /* only checksum code segments */
425 if ((map
->mflags
& (PROT_EXEC
| PROT_READ
)) == 0 ||
426 (map
->mflags
& PROT_WRITE
) != 0)
428 checksum
= (unsigned) - 1;
429 TprintfT (DBG_LT2
, "checksum_mapname checksum = 0x%0X\n", checksum
);
435 dlopen_searchpath (void*(real_dlopen
) (const char *, int),
436 void *caller_addr
, const char *basename
, int mode
)
438 TprintfT (DBG_LT2
, "dlopen_searchpath(%p, %s, %d)\n", caller_addr
, basename
, mode
);
440 if (dladdr (caller_addr
, &dl_info
) == 0)
442 TprintfT (0, "ERROR: dladdr(%p): %s\n", caller_addr
, dlerror ());
445 TprintfT (DBG_LT2
, "dladdr(%p): %p fname=%s\n",
446 caller_addr
, dl_info
.dli_fbase
, dl_info
.dli_fname
);
447 int noload
= RTLD_LAZY
| RTLD_NOW
| RTLD_NOLOAD
;
448 void *caller_hndl
= NULL
;
449 #define WORKAROUND_RTLD_BUG 1
450 #ifdef WORKAROUND_RTLD_BUG
451 // A dynamic linker dlopen bug can result in corruption/closure of open streams
452 // XXXX workaround should be removed once linker patches are all available
454 #define MAINBASE 0x400000
456 #define MAINBASE 0x08048000
458 const char* tmp_path
=
459 (dl_info
.dli_fbase
== (void*) MAINBASE
) ? NULL
: dl_info
.dli_fname
;
460 caller_hndl
= real_dlopen (tmp_path
, noload
);
462 #else //XXXX workaround should be removed once linker patches are all available
464 caller_hndl
= real_dlopen (dl_info
.dli_fname
, noload
);
466 #endif //XXXX workaround should be removed once linker patches are all available
470 TprintfT (0, "ERROR: dlopen(%s,NOLOAD): %s\n", dl_info
.dli_fname
, dlerror ());
473 #if !defined(__MUSL_LIBC)
474 Dl_serinfo _info
, *info
= &_info
;
477 /* determine search path count and required buffer size */
478 dlinfo (caller_hndl
, RTLD_DI_SERINFOSIZE
, (void *) info
);
480 /* allocate new buffer and initialize */
483 There is a bug in Linux that causes the first call
484 to dlinfo() to return a small value for the dls_size.
486 The first call to dlinfo() determines the search path
487 count and the required buffer size. The second call to
488 dlinfo() tries to obtain the search path information.
490 However, the size of the buffer that is returned by
491 the first call to the dlinfo() is incorrect (too small).
492 The second call to dlinfo() uses the incorrect size to
493 allocate memory on the stack and internally uses the memcpy()
494 function to copy the search paths to the allocated memory space.
495 The length of the search path is much larger than the buffer
496 that is allocated on the stack. The memcpy() overwrites some
497 of the information that are saved on the stack, specifically,
498 it overwrites the "basename" parameter.
500 collect crashes right after the second call to dlinfo().
502 The search paths are used to locate the shared libraries.
503 dlinfo() creates the search paths based on the paths
504 that are assigned to LD_LIBRARY_PATH environment variable
505 and the standard library paths. The standard library paths
506 consists of the /lib and the /usr/lib paths. The
507 standard library paths are always included to the search
508 paths by dlinfo() even if the LD_LIBRARY_PATH environment
509 variable is not defined. Therefore, at the very least the
510 dls_cnt is assigned to 2 (/lib and /usr/lib) and dlinfo()
511 will never assign dls_cnt to zero. The dls_cnt is the count
512 of the potential paths for searching the shared libraries.
514 So we need to increase the buffer size before the second
515 call to dlinfo(). There are number of ways to increase
516 the buffer size. However, none of them can calculate the
517 buffer size precisely. Some users on the web have suggested
518 to multiply the MAXPATHLEN by dls_cnt for the buffer size.
519 The MAXPATHLEN is assigned to 1024 bytes. In my opinion
520 this is too much. So I have decided to multiply dls_size
521 by dls_cnt for the buffer size since the dls_size is much
522 smaller than 1024 bytes.
524 I have already confirmed with our user that the workaround
525 is working with his real application. Additionally,
526 the dlopen_searchpath() function is called only by the
527 libcollector init() function when the experiment is started.
528 Therefore, allocating some extra bytes on the stack which
529 is local to this routine is harmless.
532 info
= alloca (_info
.dls_size
* _info
.dls_cnt
);
533 info
->dls_size
= _info
.dls_size
;
534 info
->dls_cnt
= _info
.dls_cnt
;
536 /* obtain search path information */
537 dlinfo (caller_hndl
, RTLD_DI_SERINFO
, (void *) info
);
538 path
= &info
->dls_serpath
[0];
540 char pathname
[MAXPATHLEN
];
541 for (unsigned int cnt
= 1; cnt
<= info
->dls_cnt
; cnt
++, path
++)
543 __collector_strlcpy (pathname
, path
->dls_name
, sizeof (pathname
));
544 __collector_strlcat (pathname
, "/", sizeof (pathname
));
545 __collector_strlcat (pathname
, basename
, sizeof (pathname
));
547 #if (ARCH(Intel) && WSIZE(32)) || ARCH(SPARC)
548 ret
= (real_dlopen
) (pathname
, mode
);
550 ret
= CALL_REAL (dlopen
)(pathname
, mode
);
552 TprintfT (DBG_LT2
, "try %d/%d: %s = %p\n", cnt
, info
->dls_cnt
, pathname
, ret
);
554 return ret
; // success!
561 resolve_mapname (MapInfo
*map
, char *name
)
565 if (name
== NULL
|| *name
== '\0')
567 if (map
->mflags
& MA_STACK
)
568 map
->filename
= "<" SP_MAP_STACK
">";
569 else if (map
->mflags
& MA_BREAK
)
570 map
->filename
= "<" SP_MAP_HEAP
">";
571 else if (map
->mflags
& MA_ISM
)
572 map
->filename
= "<" SP_MAP_SHMEM
">";
576 for (np
= namemaps
; np
; np
= np
->next
)
577 if (__collector_strcmp (np
->mapname
, name
) == 0)
584 /* Create and link a new name map */
585 size_t fnamelen
= __collector_strlen (fname
) + 1;
586 np
= (NameInfo
*) __collector_allocCSize (__collector_heap
, sizeof (NameInfo
) + fnamelen
, 1);
587 if (np
== NULL
) // We could not get memory
589 np
->mapname
= np
->filename
;
590 __collector_strlcpy (np
->filename
, fname
, fnamelen
);
594 map
->mapname
= np
->mapname
;
595 map
->filename
= np
->filename
;
596 if (map
->filename
[0] == (char) 0)
597 map
->filename
= map
->mapname
;
598 TprintfT (DBG_LT2
, "resolve_mapname: %s resolved to %s\n", map
->mapname
, map
->filename
);
602 str2ulong (char **ss
)
605 unsigned long val
= 0UL;
610 if (c
>= '0' && c
<= '9')
611 val
= val
* base
+ (c
- '0');
612 else if (c
>= 'a' && c
<= 'f')
613 val
= val
* base
+ (c
- 'a') + 10;
614 else if (c
>= 'A' && c
<= 'F')
615 val
= val
* base
+ (c
- 'A') + 10;
624 update_map_segments (hrtime_t hrt
, int resolve
)
627 if (__collector_mutex_trylock (&map_lock
))
629 TprintfT (0, "WARNING: update_map_segments(resolve=%d) BUSY\n", resolve
);
632 TprintfT (DBG_LT2
, "\n");
633 TprintfT (DBG_LT2
, "begin update_map_segments(hrt, %d)\n", resolve
);
635 // Note: there is similar code to read /proc/$PID/map[s] in
636 // perfan/er_kernel/src/KSubExp.cc KSubExp::write_subexpt_map()
637 const char* proc_map
= "/proc/self/maps";
638 size_t bufsz
= maptext_sz
;
641 int map_fd
= CALL_UTIL (open
)(proc_map
, O_RDONLY
);
645 maptext
= __collector_reallocVSize (__collector_heap
, maptext
, bufsz
);
646 TprintfT (DBG_LT2
, " update_map_segments: Loop for bufsize=%ld\n",
650 int n
= CALL_UTIL (read
)(map_fd
, maptext
+ filesz
, bufsz
- filesz
);
651 TprintfT (DBG_LT2
, " update_map_segments: __collector_read(bufp=%p nbyte=%ld)=%d\n",
652 maptext
+ filesz
, (long) ( bufsz
- filesz
), n
);
655 TprintfT (0, "ERROR: update_map_segments: read(maps): errno=%d\n", errno
);
656 (void) __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">%s</event>\n",
657 SP_JCMD_CERROR
, COL_ERROR_MAPREAD
, errno
, proc_map
);
658 CALL_UTIL (close
)(map_fd
);
659 __collector_mutex_unlock (&map_lock
);
668 if (filesz
>= bufsz
) /* Buffer too small */
672 CALL_UTIL (close
)(map_fd
);
675 int mapcache_entries
= 0;
677 for (str
= maptext
;; str
= str1
)
679 for (str1
= str
; str1
- maptext
< filesz
; str1
++)
687 if (str1
- maptext
>= filesz
)
691 mapcache
= __collector_reallocVSize (__collector_heap
, mapcache
,
692 sizeof (prmap_t
) * mapcache_entries
);
693 prmap_t
*map
= ((prmap_t
*) mapcache
) + (mapcache_entries
- 1);
694 map
->pr_vaddr
= str2ulong (&str
);
696 unsigned long eaddr
= str2ulong (&str
);
698 map
->pr_size
= eaddr
- map
->pr_vaddr
;
700 map
->pr_mflags
+= (*str
++ == 'r' ? PROT_READ
: 0);
701 map
->pr_mflags
+= (*str
++ == 'w' ? PROT_WRITE
: 0);
702 map
->pr_mflags
+= (*str
++ == 'x' ? PROT_EXEC
: 0);
703 map
->pr_mflags
+= (*str
++ == 's' ? MA_SHARED
: 0);
705 map
->pr_offset
= str2ulong (&str
);
707 map
->pr_dev
= str2ulong (&str
) * 0x100;
709 map
->pr_dev
+= str2ulong (&str
);
711 map
->pr_ino
= str2ulong (&str
);
712 if (map
->pr_dev
== 0)
713 map
->pr_mflags
|= MA_ANON
;
716 map
->pr_mapname
= str
;
717 map
->pr_pagesize
= 4096;
720 /* Compare two maps and record all differences */
722 MapInfo
*prev
= &mmaps
;
723 MapInfo
*oldp
= mmaps
.next
;
726 prmap_t
*newp
= nidx
< mapcache_entries
?
727 (prmap_t
*) mapcache
+ nidx
: NULL
;
728 if (oldp
== NULL
&& newp
== NULL
)
731 /* If two maps are equal proceed to the next pair */
733 oldp
->vaddr
== newp
->pr_vaddr
&&
734 oldp
->size
== newp
->pr_size
&&
735 __collector_strcmp (oldp
->mapname
, newp
->pr_mapname
) == 0)
742 /* Check if we need to unload the old map first */
743 if (newp
== NULL
|| (oldp
&& oldp
->vaddr
<= newp
->pr_vaddr
))
747 /* Don't record MA_ANON maps except MA_STACK and MA_BREAK */
748 if ((!(oldp
->mflags
& MA_ANON
) || (oldp
->mflags
& (MA_STACK
| MA_BREAK
))))
749 record_segment_unmap (hrt
, oldp
->vaddr
);
750 /* Remove and free map */
751 prev
->next
= oldp
->next
;
754 __collector_freeCSize (__collector_heap
, tmp
, sizeof (*tmp
));
759 MapInfo
*map
= (MapInfo
*) __collector_allocCSize (__collector_heap
, sizeof (MapInfo
), 1);
762 __collector_mutex_unlock (&map_lock
);
765 map
->vaddr
= newp
->pr_vaddr
;
766 map
->size
= newp
->pr_size
;
767 map
->offset
= newp
->pr_offset
;
768 map
->mflags
= newp
->pr_mflags
;
769 map
->pagesize
= newp
->pr_pagesize
;
770 resolve_mapname (map
, newp
->pr_mapname
);
773 map
->next
= prev
->next
;
777 /* Don't record MA_ANON maps except MA_STACK and MA_BREAK */
778 if (!(newp
->pr_mflags
& MA_ANON
) || (newp
->pr_mflags
& (MA_STACK
| MA_BREAK
)))
780 unsigned checksum
= checksum_mapname (map
);
781 record_segment_map (hrt
, map
->vaddr
, map
->size
,
782 map
->pagesize
, map
->mflags
,
783 map
->offset
, checksum
, map
->filename
);
788 TprintfT (DBG_LT2
, "update_map_segments: done\n\n");
789 __collector_mutex_unlock (&map_lock
);
790 } /* update_map_segments */
793 * Map addr to a segment. Cope with split segments.
796 __collector_check_segment_internal (unsigned long addr
, unsigned long *base
,
797 unsigned long *end
, int maxnretries
, int MA_FLAGS
)
799 int number_of_tries
= 0;
803 unsigned long curbase
= 0;
804 unsigned long curfoff
= 0;
805 unsigned long cursize
= 0;
808 for (mp
= mmaps
.next
; mp
; mp
= mp
->next
)
811 if (curbase
+ cursize
== mp
->vaddr
&&
812 curfoff
+ cursize
== mp
->offset
&&
813 ((mp
->mflags
& MA_FLAGS
) == MA_FLAGS
814 || __collector_strncmp (mp
->mapname
, "[vdso]", 6) == 0
815 || __collector_strncmp (mp
->mapname
, "[vsyscall]", 10) == 0
817 cursize
= mp
->vaddr
+ mp
->size
- curbase
;
818 else if (addr
< mp
->vaddr
)
820 else if ((mp
->mflags
& MA_FLAGS
) != MA_FLAGS
821 && __collector_strncmp (mp
->mapname
, "[vdso]", 6)
822 && __collector_strncmp (mp
->mapname
, "[vsyscall]", 10))
831 curfoff
= mp
->offset
;
836 if (addr
>= curbase
&& addr
< curbase
+ cursize
)
839 *end
= curbase
+ cursize
;
844 * 21275311 Unwind failure in native stack for java application running on jdk8 on x86
846 * On JDK8, we've observed cases where Java-compiled methods end up
847 * in virtual address segments that were "dead zones" (mflags&PROT_READ==0) at
848 * the time of the last update_map_segments() but are now "live". So if we
849 * fail to find a segment, let's call update_map_segments and then retry
852 if (number_of_tries
< maxnretries
)
855 __collector_ext_update_map_segments ();
864 * Check if address belongs to a readable and executable segment
869 * @return 1 - yes, 0 - no
872 __collector_check_segment (unsigned long addr
, unsigned long *base
,
873 unsigned long *end
, int maxnretries
)
875 int MA_FLAGS
= PROT_READ
| PROT_EXEC
;
876 int res
= __collector_check_segment_internal (addr
, base
, end
, maxnretries
, MA_FLAGS
);
881 * Check if address belongs to a readable segment
886 * @return 1 - yes, 0 - no
889 __collector_check_readable_segment( unsigned long addr
, unsigned long *base
, unsigned long *end
, int maxnretries
)
891 int MA_FLAGS
= PROT_READ
;
892 int res
= __collector_check_segment_internal(addr
, base
, end
, maxnretries
, MA_FLAGS
);
896 static ELF_AUX
*auxv
= NULL
;
899 process_vsyscall_page ()
901 TprintfT (DBG_LT2
, "process_vsyscall_page()\n");
904 /* We've done this one in this process, and cached the results */
905 /* use the cached results */
906 for (int i
= 0; i
< ndyn
; i
++)
908 append_segment_record ("<event kind=\"map\" object=\"dynfunc\" name=\"%s\" "
909 "vaddr=\"0x%016lX\" size=\"%u\" funcname=\"%s\" />\n",
910 dynname
[i
], dynvaddr
[i
], dynsize
[i
], dynfuncname
[i
]);
911 TprintfT (DBG_LT2
, "process_vsyscall_page: append_segment_record map dynfunc='%s' vaddr=0x%016lX size=%ld funcname='%s' -- from cache\n",
912 dynname
[i
], (unsigned long) dynvaddr
[i
],
913 (long) dynsize
[i
], dynfuncname
[i
]);
918 /* We've done this one in this process, and cached the results */
919 /* use the cached results */
920 hrtime_t hrt
= GETRELTIME ();
921 for (int i
= 0; i
< nvsysfuncs
; i
++)
923 append_segment_record ("<event kind=\"map\" object=\"function\" tstamp=\"%u.%09u\" "
924 "vaddr=\"0x%016lX\" size=\"%u\" name=\"%s\" />\n",
925 (unsigned) (hrt
/ NANOSEC
), (unsigned) (hrt
% NANOSEC
),
926 (unsigned long) sysfuncvaddr
[i
], (unsigned) sysfuncsize
[i
], sysfuncname
[i
]);
927 TprintfT (DBG_LT2
, "process_vsyscall_page: append_segment_record map function='%s' vaddr=0x%016lX size=%ld -- from cache\n",
928 sysfuncname
[i
], (unsigned long) sysfuncvaddr
[i
], (long) sysfuncsize
[i
]);
931 if (ndyn
+ nvsysfuncs
!= 0)
934 /* After fork we can't rely on environ as it might have
935 * been moved by putenv(). Use the pointer saved by the parent.
939 char **envp
= (char**) environ
;
942 while (*envp
++ != NULL
);
943 auxv
= (ELF_AUX
*) envp
;
945 TprintfT (DBG_LT2
, "process_vsyscall_page, auxv = ox%p\n", auxv
);
949 for (ap
= auxv
; ap
->a_type
!= AT_NULL
; ap
++)
950 TprintfT (DBG_LT2
, "process_vsyscall_page: ELF_AUX: "
951 " a_type = 0x%016llx %10lld "
952 " a_un.a_val = 0x%016llx %10lld\n",
953 (long long) ap
->a_type
, (long long) ap
->a_type
,
954 (long long) ap
->a_un
.a_val
, (long long) ap
->a_un
.a_val
);
957 // find the first ELF_AUX of type AT_SYSINFO_EHDR
958 ELF_EHDR
*ehdr
= NULL
;
959 for (ap
= auxv
; ap
->a_type
!= AT_NULL
; ap
++)
961 if (ap
->a_type
== AT_SYSINFO_EHDR
)
963 // newer Linuxes do not have a_ptr field, they just have a_val
964 ehdr
= (ELF_EHDR
*)(intptr_t) ap
->a_un
.a_val
;
973 char *mapName
= "SYSINFO_EHDR";
975 for (mp
= mmaps
.next
; mp
; mp
= mp
->next
)
977 if ((unsigned long) ehdr
== mp
->vaddr
)
979 mp
->mflags
|= PROT_EXEC
;
980 if (mp
->mapname
&& mp
->mapname
[0])
981 mapName
= mp
->mapname
;
986 // Find the dynsym section and record all symbols
987 char *base
= (char*) ehdr
;
988 ELF_SHDR
*shdr
= (ELF_SHDR
*) (base
+ ehdr
->e_shoff
);
992 TprintfT (DBG_LT2
, "process_vsyscall_page: ehdr: EI_CLASS=%lld EI_DATA=%lld EI_OSABI=%lld e_type=%lld e_machine=%lld e_version=%lld\n"
993 " e_entry =0x%016llx %10lld e_phoff =0x%016llx %10lld\n"
994 " e_shoff =0x%016llx %10lld e_flags =0x%016llx %10lld\n"
995 " e_ehsize =0x%016llx %10lld e_phentsize =0x%016llx %10lld\n"
996 " e_phnum =0x%016llx %10lld e_shentsize =0x%016llx %10lld\n"
997 " e_shnum =0x%016llx %10lld e_shstrndx =0x%016llx %10lld\n",
998 (long long) ehdr
->e_ident
[EI_CLASS
], (long long) ehdr
->e_ident
[EI_DATA
], (long long) ehdr
->e_ident
[EI_OSABI
],
999 (long long) ehdr
->e_type
, (long long) ehdr
->e_machine
, (long long) ehdr
->e_version
,
1000 (long long) ehdr
->e_entry
, (long long) ehdr
->e_entry
,
1001 (long long) ehdr
->e_phoff
, (long long) ehdr
->e_phoff
,
1002 (long long) ehdr
->e_shoff
, (long long) ehdr
->e_shoff
,
1003 (long long) ehdr
->e_flags
, (long long) ehdr
->e_flags
,
1004 (long long) ehdr
->e_ehsize
, (long long) ehdr
->e_ehsize
,
1005 (long long) ehdr
->e_phentsize
, (long long) ehdr
->e_phentsize
,
1006 (long long) ehdr
->e_phnum
, (long long) ehdr
->e_phnum
,
1007 (long long) ehdr
->e_shentsize
, (long long) ehdr
->e_shentsize
,
1008 (long long) ehdr
->e_shnum
, (long long) ehdr
->e_shnum
,
1009 (long long) ehdr
->e_shstrndx
, (long long) ehdr
->e_shstrndx
);
1010 for (i
= 1; i
< ehdr
->e_shnum
; i
++)
1012 TprintfT (DBG_LT2
, "process_vsyscall_page: SECTION=%d sh_name=%lld '%s'\n"
1013 " sh_type =0x%016llx %10lld\n"
1014 " sh_flags =0x%016llx %10lld\n"
1015 " sh_addr =0x%016llx %10lld\n"
1016 " sh_offset =0x%016llx %10lld\n"
1017 " sh_size =0x%016llx %10lld\n"
1018 " sh_link =0x%016llx %10lld\n"
1019 " sh_info =0x%016llx %10lld\n"
1020 " sh_addralign =0x%016llx %10lld\n"
1021 " sh_entsize =0x%016llx %10lld\n",
1022 i
, (long long) shdr
[i
].sh_name
, base
+ shdr
[ehdr
->e_shstrndx
].sh_offset
+ shdr
[i
].sh_name
,
1023 (long long) shdr
[i
].sh_type
, (long long) shdr
[i
].sh_type
,
1024 (long long) shdr
[i
].sh_flags
, (long long) shdr
[i
].sh_flags
,
1025 (long long) shdr
[i
].sh_addr
, (long long) shdr
[i
].sh_addr
,
1026 (long long) shdr
[i
].sh_offset
, (long long) shdr
[i
].sh_offset
,
1027 (long long) shdr
[i
].sh_size
, (long long) shdr
[i
].sh_size
,
1028 (long long) shdr
[i
].sh_link
, (long long) shdr
[i
].sh_link
,
1029 (long long) shdr
[i
].sh_info
, (long long) shdr
[i
].sh_info
,
1030 (long long) shdr
[i
].sh_addralign
, (long long) shdr
[i
].sh_addralign
,
1031 (long long) shdr
[i
].sh_entsize
, (long long) shdr
[i
].sh_entsize
);
1036 for (i
= 1; i
< ehdr
->e_shnum
; i
++)
1037 if (shdr
[i
].sh_type
== SHT_DYNSYM
)
1044 char *symbase
= base
+ shdr
[shdr
[dynSec
].sh_link
].sh_offset
;
1045 ELF_SYM
*symbols
= (ELF_SYM
*) (base
+ shdr
[dynSec
].sh_offset
);
1047 int n
= shdr
[dynSec
].sh_size
/ shdr
[dynSec
].sh_entsize
;
1048 for (i
= 0; i
< n
; i
++)
1050 ELF_SYM
*sym
= symbols
+ i
;
1051 TprintfT (DBG_LT2
, "process_vsyscall_page: symbol=%d st_name=%lld '%s'\n"
1052 " st_size = 0x%016llx %10lld\n"
1053 " st_value = 0x%016llx %10lld\n"
1054 " st_shndx = 0x%016llx %10lld\n"
1055 " st_info = 0x%016llx %10lld\n",
1056 i
, (long long) sym
->st_name
, symbase
+ sym
->st_name
,
1057 (long long) sym
->st_size
, (long long) sym
->st_size
,
1058 (long long) sym
->st_value
, (long long) sym
->st_value
,
1059 (long long) sym
->st_shndx
, (long long) sym
->st_shndx
,
1060 (long long) sym
->st_info
, (long long) sym
->st_info
);
1061 if (sym
->st_shndx
<= 0 || sym
->st_size
<= 0 ||
1062 ELF_ST_BIND (sym
->st_info
) != STB_GLOBAL
|| ELF_ST_TYPE (sym
->st_info
) != STT_FUNC
)
1065 nextSec
= sym
->st_shndx
;
1066 else if (nextSec
> sym
->st_shndx
)
1067 nextSec
= sym
->st_shndx
;
1072 while (nextSec
!= 0)
1074 int curSec
= nextSec
;
1075 char *bgn
= base
+ shdr
[curSec
].sh_offset
;
1076 char *end
= bgn
+ shdr
[curSec
].sh_size
;
1077 for (i
= 0; i
< n
; i
++)
1079 ELF_SYM
*sym
= symbols
+ i
;
1080 if (sym
->st_shndx
<= 0 || sym
->st_size
<= 0 ||
1081 ELF_ST_BIND (sym
->st_info
) != STB_GLOBAL
|| ELF_ST_TYPE (sym
->st_info
) != STT_FUNC
)
1083 if (sym
->st_shndx
> curSec
)
1085 if (nextSec
== curSec
)
1086 nextSec
= sym
->st_shndx
;
1087 else if (nextSec
> sym
->st_shndx
)
1088 nextSec
= sym
->st_shndx
;
1089 nextSec
= sym
->st_shndx
;
1092 if (sym
->st_shndx
!= curSec
)
1094 long long st_delta
= (sym
->st_value
>= shdr
[sym
->st_shndx
].sh_addr
) ?
1095 (sym
->st_value
- shdr
[sym
->st_shndx
].sh_addr
) : -1;
1096 char *st_value
= bgn
+ st_delta
;
1097 if (st_delta
>= 0 && st_value
+ sym
->st_size
<= end
)
1099 append_segment_record ("<event kind=\"map\" object=\"dynfunc\" name=\"%s\" "
1100 "vaddr=\"0x%016lX\" size=\"%u\" funcname=\"%s\" />\n",
1101 mapName
, (void*) st_value
, sym
->st_size
, symbase
+ sym
->st_name
);
1103 TprintfT (DBG_LT2
, "process_vsyscall_page: append_segment_record map dynfunc='%s' vaddr=%016lX size=%ld funcname='%s'\n",
1104 mapName
, (unsigned long) st_value
,
1105 (long) sym
->st_size
, symbase
+ sym
->st_name
);
1107 /* now cache this for a subsequent experiment */
1109 __collector_log_write ("<event kind=\"%s\" id=\"%d\">MAXDYN=%d</event>\n",
1110 SP_JCMD_CERROR
, COL_ERROR_MAPCACHE
, MAXDYN
);
1113 dynname
[ndyn
] = CALL_UTIL (libc_strdup
)(mapName
);
1114 dynvaddr
[ndyn
] = (void *) st_value
;
1115 dynsize
[ndyn
] = (unsigned) sym
->st_size
;
1116 dynfuncname
[ndyn
] = CALL_UTIL (libc_strdup
)(symbase
+ sym
->st_name
);
1117 TprintfT (DBG_LT2
, "process_vsyscall_page: cached entry %d map function='%s' vaddr=0x%016lX size=%ld '%s'\n",
1118 ndyn
, dynname
[ndyn
], (unsigned long) dynvaddr
[ndyn
],
1119 (long) dynsize
[ndyn
], dynfuncname
[ndyn
]);
1124 __collector_int_func_load (DFUNC_KERNEL
, mapName
, NULL
,
1125 (void*) (base
+ shdr
[curSec
].sh_offset
), shdr
[curSec
].sh_size
, 0, NULL
);
1127 /* now cache this function for a subsequent experiment */
1128 if (nvsysfuncs
>= MAXVSYSFUNCS
)
1129 __collector_log_write ("<event kind=\"%s\" id=\"%d\">MAXVSYSFUNCS=%d</event>\n",
1130 SP_JCMD_CERROR
, COL_ERROR_MAPCACHE
, MAXVSYSFUNCS
);
1133 sysfuncname
[nvsysfuncs
] = CALL_UTIL (libc_strdup
)(mapName
);
1134 sysfuncvaddr
[nvsysfuncs
] = (unsigned long) (base
+ shdr
[curSec
].sh_offset
);
1135 sysfuncsize
[nvsysfuncs
] = (unsigned long) (shdr
[curSec
].sh_size
);
1136 TprintfT (DBG_LT2
, "process_vsyscall_page: cached entry %d map function='%s' vaddr=0x%016lX size=%ld\n",
1137 nvsysfuncs
, sysfuncname
[nvsysfuncs
],
1138 (unsigned long) sysfuncvaddr
[nvsysfuncs
],
1139 (long) sysfuncsize
[nvsysfuncs
]);
1142 TprintfT (DBG_LT2
, "process_vsyscall_page: collector_int_func_load='%s' vaddr=0x%016lX size=%ld\n",
1143 mapName
, (unsigned long) (base
+ shdr
[curSec
].sh_offset
),
1144 (long) shdr
[curSec
].sh_size
);
1145 if (curSec
== nextSec
)
1152 unsigned long vsysaddr
= (unsigned long) 0xffffe000;
1154 unsigned long vsysaddr
= (unsigned long) 0xffffffffff600000;
1156 // Make sure the vsyscall map has PROT_EXEC
1158 for (mp
= mmaps
.next
; mp
; mp
= mp
->next
)
1160 TprintfT (DBG_LT2
, "MapInfo: vaddr=0x%016llx [size=%lld] mflags=0x%llx offset=%lld pagesize=%lld\n"
1161 " mapname='%s' filename='%s'\n",
1162 (unsigned long long) mp
->vaddr
, (long long) mp
->size
,
1163 (long long) mp
->mflags
, (long long) mp
->offset
, (long long) mp
->pagesize
,
1164 mp
->mapname
? mp
->mapname
: "NULL",
1165 mp
->filename
? mp
->filename
: "NULL");
1166 if (vsysaddr
== mp
->vaddr
)
1167 mp
->mflags
|= PROT_EXEC
;
1168 if ((unsigned long) ehdr
== (unsigned long) mp
->vaddr
)
1170 if (__collector_strncmp (mp
->mapname
, "[vdso]", 6) == 0
1171 || __collector_strncmp (mp
->mapname
, "[vsyscall]", 10) == 0)
1174 * On rubbia ( 2.6.9-5.ELsmp #1 SMP 32-bit ) access to ehdr causes SEGV.
1175 * There doesn't seem to be a way to reliably determine the actual presence
1176 * of the page: even when /proc reports it's there it can't be accessed.
1177 * We will have to put up with <Unknown> on some Linuxes until this is resolved.
1178 __collector_int_func_load(DFUNC_KERNEL, mp->mapname, NULL, (void*) mp->vaddr, mp->size, 0, NULL);
1180 hrtime_t hrt
= GETRELTIME ();
1181 append_segment_record (
1182 "<event kind=\"map\" object=\"function\" tstamp=\"%u.%09u\" "
1183 "vaddr=\"0x%016lX\" size=\"%u\" name=\"%s\" />\n",
1184 (unsigned) (hrt
/ NANOSEC
), (unsigned) (hrt
% NANOSEC
),
1185 (unsigned long) mp
->vaddr
, (unsigned) mp
->size
, mp
->mapname
);
1186 TprintfT (DBG_LT2
, "process_vsyscall_page: append_segment_record map function = %s, vaddr = 0x%016lX, size = %u\n",
1187 mp
->mapname
, (unsigned long) mp
->vaddr
, (unsigned) mp
->size
);
1189 /* now cache this function for a subsequent experiment */
1190 if (nvsysfuncs
>= MAXVSYSFUNCS
)
1191 __collector_log_write ("<event kind=\"%s\" id=\"%d\">MAXVSYSFUNCS=%d</event>\n",
1192 SP_JCMD_CERROR
, COL_ERROR_MAPCACHE
, MAXVSYSFUNCS
);
1195 sysfuncname
[nvsysfuncs
] = CALL_UTIL (libc_strdup
)(mp
->mapname
);
1196 sysfuncvaddr
[nvsysfuncs
] = mp
->vaddr
;
1197 sysfuncsize
[nvsysfuncs
] = (unsigned long) mp
->size
;
1198 TprintfT (DBG_LT2
, "process_vsyscall_page: cached entry %d map function='%s' vaddr=0x%016lX size=%ld\n",
1199 nvsysfuncs
, sysfuncname
[nvsysfuncs
],
1200 (unsigned long) sysfuncvaddr
[nvsysfuncs
],
1201 (long) sysfuncsize
[nvsysfuncs
]);
1210 * collector API for dynamic functions
1212 void collector_func_load () __attribute__ ((weak
, alias ("__collector_func_load")));
1214 __collector_func_load (char *name
, char *alias
, char *sourcename
,
1215 void *vaddr
, int size
, int lntsize
, DT_lineno
*lntable
)
1217 __collector_int_func_load (DFUNC_API
, name
, sourcename
,
1218 vaddr
, size
, lntsize
, lntable
);
1221 void collector_func_unload () __attribute__ ((weak
, alias ("__collector_func_unload")));
1223 __collector_func_unload (void *vaddr
)
1225 __collector_int_func_unload (DFUNC_API
, vaddr
);
1228 /* routines for handling dynamic functions */
1230 rwrite (int fd
, void *buf
, size_t nbyte
)
1232 size_t left
= nbyte
;
1234 char *ptr
= (char*) buf
;
1237 res
= CALL_UTIL (write
)(fd
, ptr
, left
);
1240 TprintfT (0, "ERROR: rwrite(%s) failed: errno=%d\n", dyntext_fname
, errno
);
1241 (void) __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">%s</event>\n",
1242 SP_JCMD_CERROR
, COL_ERROR_DYNWRITE
, errno
, dyntext_fname
);
1251 __collector_int_func_load (dfunc_mode_t mode
, char *name
, char *sourcename
,
1252 void *vaddr
, int size
, int lntsize
, DT_lineno
*lntable
)
1256 static char pad
[16];
1260 hrtime_t hrt
= GETRELTIME ();
1264 /* generate a name based on vaddr */
1265 CALL_UTIL (snprintf
)(name_buf
, sizeof (name_buf
), "0x%lx", (unsigned long) vaddr
);
1273 append_segment_record ("<event kind=\"map\" object=\"function\" tstamp=\"%u.%09u\" "
1274 "vaddr=\"0x%016lX\" size=\"%u\" name=\"%s\" />\n",
1275 (unsigned) (hrt
/ NANOSEC
), (unsigned) (hrt
% NANOSEC
),
1276 (unsigned long) vaddr
, (unsigned) size
, name
);
1279 append_segment_record ("<event kind=\"map\" object=\"jcm\" tstamp=\"%u.%09u\" "
1280 "vaddr=\"0x%016lX\" size=\"%u\" methodId=\"%s\" />\n",
1281 (unsigned) (hrt
/ NANOSEC
), (unsigned) (hrt
% NANOSEC
),
1282 (unsigned long) vaddr
, (unsigned) size
, name
);
1288 /* 21275311 Unwind failure in native stack for java application running on jdk8 on x86
1290 * - function starts in a known segment (base1 != 0)
1291 * - function ends in the same segment (base1==base2 && end1==end2)
1292 * If not, then call update_map_segments().
1294 unsigned long base1
, end1
, base2
, end2
;
1295 __collector_check_segment ((unsigned long) vaddr
, &base1
, &end1
, 0);
1297 __collector_check_segment (((unsigned long) vaddr
)+((unsigned long) size
), &base2
, &end2
, 0);
1298 if (base1
== 0 || base1
!= base2
|| end1
!= end2
)
1299 __collector_ext_update_map_segments ();
1301 /* Write a copy of actual code to the "dyntext" file */
1303 dt_hdr
.type
= DT_HEADER
;
1304 dt_hdr
.size
= sizeof (dt_hdr
);
1306 unsigned long t
= (unsigned long) vaddr
; /* to suppress a warning from gcc */
1307 dt_hdr
.vaddr
= (uint64_t) t
;
1310 dt_code
.type
= DT_CODE
;
1312 if (vaddr
!= NULL
&& size
> 0)
1314 dt_code
.size
= sizeof (dt_code
) + ((size
+ 0xf) & ~0xf);
1315 if (mode
== DFUNC_KERNEL
)
1317 /* Some Linuxes don't accept vaddrs from the vsyscall
1318 * page in write(). Make a copy.
1320 code
= alloca (size
);
1321 __collector_memcpy (code
, vaddr
, size
);
1328 dt_src
.type
= DT_SRCFILE
;
1331 slen
= CALL_UTIL (strlen
)(sourcename
) + 1;
1332 dt_src
.size
= slen
? sizeof (dt_src
) + ((slen
+ 0xf) & ~0xf) : 0;
1341 dt_ltbl
.type
= DT_LTABLE
;
1342 if (lntable
!= NULL
&& lntsize
> 0)
1343 dt_ltbl
.size
= sizeof (dt_ltbl
) + lntsize
* sizeof (DT_lineno
);
1347 int fd
= CALL_UTIL (open
)(dyntext_fname
, O_RDWR
| O_APPEND
);
1350 TprintfT (0, "ERROR: __collector_int_func_load: open(%s) failed: errno=%d\n",
1351 dyntext_fname
, errno
);
1352 (void) __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">%s</event>\n",
1353 SP_JCMD_CERROR
, COL_ERROR_DYNOPEN
, errno
, dyntext_fname
);
1357 /* Lock the whole file */
1358 __collector_mutex_lock (&dyntext_lock
);
1359 rwrite (fd
, &dt_hdr
, sizeof (dt_hdr
));
1362 padn
= dt_code
.size
- sizeof (dt_code
) - size
;
1363 rwrite (fd
, &dt_code
, sizeof (dt_code
));
1364 rwrite (fd
, code
, size
);
1365 rwrite (fd
, &pad
, padn
);
1369 padn
= dt_src
.size
- sizeof (dt_src
) - slen
;
1370 rwrite (fd
, &dt_src
, sizeof (dt_src
));
1371 rwrite (fd
, sourcename
, slen
);
1372 rwrite (fd
, &pad
, padn
);
1376 rwrite (fd
, &dt_ltbl
, sizeof (dt_ltbl
));
1377 rwrite (fd
, lntable
, dt_ltbl
.size
- sizeof (dt_ltbl
));
1380 /* Unlock the file */
1381 __collector_mutex_unlock( &dyntext_lock
);
1382 CALL_UTIL(close( fd
) );
1386 __collector_int_func_unload (dfunc_mode_t mode
, void *vaddr
)
1390 hrtime_t hrt
= GETRELTIME ();
1391 if (mode
== DFUNC_API
)
1392 append_segment_record ("<event kind=\"unmap\" tstamp=\"%u.%09u\" vaddr=\"0x%016lX\"/>\n",
1393 (unsigned) (hrt
/ NANOSEC
), (unsigned) (hrt
% NANOSEC
), (unsigned long) vaddr
);
1394 else if (mode
== DFUNC_JAVA
)
1395 /* note that the "vaddr" is really a method id, not an address */
1396 append_segment_record ("<event kind=\"unmap\" tstamp=\"%u.%09u\" methodId=\"0x%016lX\"/>\n",
1397 (unsigned) (hrt
/ NANOSEC
), (unsigned) (hrt
% NANOSEC
), (unsigned long) vaddr
);
1403 * int init_mmap_intf()
1404 * Set up interposition (if not already done).
1409 if (__collector_dlsym_guard
)
1411 void *dlflag
= RTLD_NEXT
;
1412 __real_mmap
= dlsym (dlflag
, "mmap");
1413 if (__real_mmap
== NULL
)
1416 /* We are probably dlopened after libthread/libc,
1417 * try to search in the previously loaded objects
1419 __real_mmap
= dlsym (RTLD_DEFAULT
, "mmap");
1420 if (__real_mmap
== NULL
)
1422 TprintfT (0, "ERROR: collector real mmap not found\n");
1425 TprintfT (DBG_LT2
, "collector real mmap found with RTLD_DEFAULT\n");
1426 dlflag
= RTLD_DEFAULT
;
1429 __real_mmap64
= dlsym (dlflag
, "mmap64");
1430 __real_munmap
= dlsym (dlflag
, "munmap");
1432 // dlopen/dlmopen/dlclose are in libdl.so
1433 __real_dlopen_2_34
= dlvsym (dlflag
, "dlopen", "GLIBC_2.34");
1434 __real_dlopen_2_17
= dlvsym (dlflag
, "dlopen", "GLIBC_2.17");
1435 __real_dlopen_2_2_5
= dlvsym (dlflag
, "dlopen", "GLIBC_2.2.5");
1436 __real_dlopen_2_1
= dlvsym (dlflag
, "dlopen", "GLIBC_2.1");
1437 __real_dlopen_2_0
= dlvsym (dlflag
, "dlopen", "GLIBC_2.0");
1438 if (__real_dlopen_2_34
)
1439 __real_dlopen
= __real_dlopen_2_34
;
1440 else if (__real_dlopen_2_17
)
1441 __real_dlopen
= __real_dlopen_2_17
;
1442 else if (__real_dlopen_2_2_5
)
1443 __real_dlopen
= __real_dlopen_2_2_5
;
1444 else if (__real_dlopen_2_1
)
1445 __real_dlopen
= __real_dlopen_2_1
;
1446 else if (__real_dlopen_2_0
)
1447 __real_dlopen
= __real_dlopen_2_0
;
1449 __real_dlopen
= dlsym (dlflag
, "dlopen");
1451 __real_dlclose_2_34
= dlvsym (dlflag
, "dlclose", "GLIBC_2.34");
1452 __real_dlclose_2_17
= dlvsym (dlflag
, "dlclose", "GLIBC_2.17");
1453 __real_dlclose_2_2_5
= dlvsym (dlflag
, "dlclose", "GLIBC_2.2.5");
1454 __real_dlclose_2_0
= dlvsym (dlflag
, "dlclose", "GLIBC_2.0");
1455 if (__real_dlclose_2_34
)
1456 __real_dlclose
= __real_dlclose_2_34
;
1457 else if (__real_dlclose_2_17
)
1458 __real_dlclose
= __real_dlclose_2_17
;
1459 else if (__real_dlclose_2_2_5
)
1460 __real_dlclose
= __real_dlclose_2_2_5
;
1461 else if (__real_dlclose_2_0
)
1462 __real_dlclose
= __real_dlclose_2_0
;
1464 __real_dlclose
= dlsym (dlflag
, "dlclose");
1466 #define PR_FUNC(f) TprintfT (DBG_LT2, " mmaptrace.c: " #f ": @%p\n", f)
1467 PR_FUNC (__real_dlclose
);
1468 PR_FUNC (__real_dlclose_2_0
);
1469 PR_FUNC (__real_dlclose_2_17
);
1470 PR_FUNC (__real_dlclose_2_2_5
);
1471 PR_FUNC (__real_dlclose_2_34
);
1472 PR_FUNC (__real_dlopen
);
1473 PR_FUNC (__real_dlopen_2_0
);
1474 PR_FUNC (__real_dlopen_2_1
);
1475 PR_FUNC (__real_dlopen_2_17
);
1476 PR_FUNC (__real_dlopen_2_2_5
);
1477 PR_FUNC (__real_dlopen_2_34
);
1478 PR_FUNC (__real_mmap
);
1479 PR_FUNC (__real_mmap64
);
1480 PR_FUNC (__real_munmap
);
1485 /*------------------------------------------------------------- mmap */
1487 mmap (void *start
, size_t length
, int prot
, int flags
, int fd
, off_t offset
)
1490 if (NULL_PTR (mmap
))
1491 err
= init_mmap_intf ();
1495 /* hrtime_t hrt = GETRELTIME(); */
1496 void *ret
= CALL_REAL (mmap
)(start
, length
, prot
, flags
, fd
, offset
);
1498 if (!CHCK_REENTRANCE
&& (ret
!= MAP_FAILED
) && collector_heap_record
!= NULL
)
1501 /* write a separate record for mmap tracing */
1502 collector_heap_record (MMAP_TRACE
, length
, ret
);
1505 TprintfT (DBG_LT2
, "libcollector.mmap(%p, %ld, %d, %d, %d, 0x%lld) = %p\n",
1506 start
, (long) length
, prot
, flags
, fd
, (long long) offset
, ret
);
1510 /*------------------------------------------------------------- mmap64 */
1511 #if WSIZE(32) && !defined(__USE_FILE_OFFSET64)
1514 mmap64 (void *start
, size_t length
, int prot
, int flags
, int fd
, off64_t offset
)
1516 if (NULL_PTR (mmap64
))
1519 /* hrtime_t hrt = GETRELTIME(); */
1520 void *ret
= CALL_REAL (mmap64
)(start
, length
, prot
, flags
, fd
, offset
);
1521 if (!CHCK_REENTRANCE
&& (ret
!= MAP_FAILED
) && collector_heap_record
!= NULL
)
1524 /* write a separate record for mmap tracing */
1525 collector_heap_record (MMAP_TRACE
, length
, ret
);
1528 TprintfT (DBG_LT2
, "libcollector.mmap64(%p, %ld, %d, %d, %d, 0x%lld) = %p\n",
1529 start
, (long) length
, prot
, flags
, fd
, (long long) offset
, ret
);
1532 #endif /* WSIZE(32) */
1534 /*------------------------------------------------------------- munmap */
1536 munmap (void *start
, size_t length
)
1538 if (NULL_PTR (munmap
))
1541 /* hrtime_t hrt = GETRELTIME(); */
1542 int rc
= CALL_REAL (munmap
)(start
, length
);
1543 if (!CHCK_REENTRANCE
&& (rc
== 0) && collector_heap_record
!= NULL
)
1546 /* write a separate record for mmap tracing */
1547 collector_heap_record (MUNMAP_TRACE
, length
, start
);
1550 TprintfT (DBG_LT2
, "libcollector.munmap(%p, %ld) = %d\n", start
, (long) length
, rc
);
1555 /*------------------------------------------------------------- dlopen */
1557 gprofng_dlopen (void*(real_dlopen
) (const char *, int),
1558 void *caller
, const char *pathname
, int mode
)
1560 const char * real_pathname
= pathname
;
1561 char new_pathname
[MAXPATHLEN
];
1562 int origin_offset
= 0;
1563 TprintfT (DBG_LT2
, "dlopen: pathname=%s, mode=%d\n", pathname
? pathname
: "NULL", mode
);
1564 if (pathname
&& __collector_strStartWith (pathname
, "$ORIGIN/") == 0)
1566 else if (pathname
&& __collector_strStartWith (pathname
, "${ORIGIN}/") == 0)
1571 if (caller
&& dladdr (caller
, &dl_info
) != 0)
1573 TprintfT (DBG_LT2
, "dladdr(%p): %p fname=%s\n",
1574 caller
, dl_info
.dli_fbase
, dl_info
.dli_fname
);
1575 new_pathname
[0] = '\0';
1576 const char *p
= __collector_strrchr (dl_info
.dli_fname
, '/');
1578 __collector_strlcpy (new_pathname
, dl_info
.dli_fname
,
1579 (p
- dl_info
.dli_fname
+ 2) < MAXPATHLEN
?
1580 (p
- dl_info
.dli_fname
+ 2) : MAXPATHLEN
);
1581 __collector_strlcat (new_pathname
, pathname
+ origin_offset
,
1582 MAXPATHLEN
- CALL_UTIL (strlen
)(new_pathname
));
1583 real_pathname
= new_pathname
;
1586 TprintfT (0, "ERROR: dladdr(%p): %s\n", caller
, dlerror ());
1588 TprintfT (DBG_LT2
, "libcollector.dlopen(%s,%d) interposing\n",
1589 pathname
? pathname
: "", mode
);
1592 // set guard for duration of handling dlopen, since want to ensure
1593 // new mappings are resolved after the actual dlopen has occurred
1595 hrtime_t hrt
= GETRELTIME ();
1597 if (caller
&& real_pathname
&& !__collector_strchr (real_pathname
, '/'))
1598 ret
= dlopen_searchpath (real_dlopen
, caller
, real_pathname
, mode
);
1601 ret
= real_dlopen (real_pathname
, mode
);
1602 TprintfT (DBG_LT2
, "libcollector -- dlopen(%s) returning %p\n", pathname
, ret
);
1604 /* Don't call update if dlopen failed: preserve dlerror() */
1605 if (ret
&& (mmap_mode
> 0) && !(mode
& RTLD_NOLOAD
))
1606 update_map_segments (hrt
, 1);
1607 TprintfT (DBG_LT2
, "libcollector -- dlopen(%s) returning %p\n", pathname
, ret
);
1612 #define DCL_DLOPEN(dcl_f) \
1613 void *dcl_f (const char *pathname, int mode) \
1615 if (__real_dlopen == NULL) \
1616 init_mmap_intf (); \
1617 void *caller = __builtin_return_address (0); \
1618 return gprofng_dlopen (__real_dlopen, caller, pathname, mode); \
1621 DCL_FUNC_VER (DCL_DLOPEN
, dlopen_2_34
, dlopen@GLIBC_2
.34
)
1622 DCL_FUNC_VER (DCL_DLOPEN
, dlopen_2_17
, dlopen@GLIBC_2
.17
)
1623 DCL_FUNC_VER (DCL_DLOPEN
, dlopen_2_2_5
, dlopen@GLIBC_2
.2
.5)
1624 DCL_FUNC_VER (DCL_DLOPEN
, dlopen_2_1
, dlopen@GLIBC_2
.1
)
1625 DCL_FUNC_VER (DCL_DLOPEN
, dlopen_2_0
, dlopen@GLIBC_2
.0
)
1628 /*------------------------------------------------------------- dlclose */
1630 gprofng_dlclose (int (real_dlclose
) (void *), void *handle
)
1632 hrtime_t hrt
= GETRELTIME ();
1633 if (!CHCK_REENTRANCE
)
1636 update_map_segments (hrt
, 1);
1638 hrt
= GETRELTIME ();
1640 int ret
= real_dlclose (handle
);
1642 /* Don't call update if dlclose failed: preserve dlerror() */
1643 if (!ret
&& !CHCK_REENTRANCE
)
1646 update_map_segments (hrt
, 1);
1649 TprintfT (DBG_LT2
, "gprofng_dlclose @%p (%p) returning %d\n", real_dlclose
,
1654 #define DCL_DLCLOSE(dcl_f) \
1655 int dcl_f (void *handle) \
1657 if (__real_dlclose == NULL) \
1658 init_mmap_intf (); \
1659 return gprofng_dlclose (__real_dlclose, handle); \
1662 DCL_FUNC_VER (DCL_DLCLOSE
, dlclose_2_34
, dlclose@GLIBC_2
.34
)
1663 DCL_FUNC_VER (DCL_DLCLOSE
, dlclose_2_17
, dlclose@GLIBC_2
.17
)
1664 DCL_FUNC_VER (DCL_DLCLOSE
, dlclose_2_2_5
, dlclose@GLIBC_2
.2
.5)
1665 DCL_FUNC_VER (DCL_DLCLOSE
, dlclose_2_0
, dlclose@GLIBC_2
.0
)
1666 DCL_DLCLOSE (dlclose
)