2 * mono-profiler-log.c: mono log profiler
5 * Paolo Molaro (lupus@ximian.com)
6 * Alex Rønne Petersen (alexrp@xamarin.com)
8 * Copyright 2010 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include <mono/metadata/assembly.h>
15 #include <mono/metadata/debug-helpers.h>
16 #include "../metadata/metadata-internals.h"
17 #include <mono/metadata/mono-config.h>
18 #include <mono/metadata/mono-gc.h>
19 #include <mono/metadata/mono-perfcounters.h>
20 #include <mono/metadata/profiler.h>
21 #include <mono/utils/atomic.h>
22 #include <mono/utils/hazard-pointer.h>
23 #include <mono/utils/lock-free-alloc.h>
24 #include <mono/utils/lock-free-queue.h>
25 #include <mono/utils/mono-conc-hashtable.h>
26 #include <mono/utils/mono-counters.h>
27 #include <mono/utils/mono-linked-list-set.h>
28 #include <mono/utils/mono-membar.h>
29 #include <mono/utils/mono-mmap.h>
30 #include <mono/utils/mono-os-mutex.h>
31 #include <mono/utils/mono-os-semaphore.h>
32 #include <mono/utils/mono-threads.h>
33 #include <mono/utils/mono-threads-api.h>
34 #include "mono-profiler-log.h"
46 #if defined(__APPLE__)
47 #include <mach/mach_time.h>
49 #include <netinet/in.h>
50 #ifdef HAVE_SYS_MMAN_H
53 #include <sys/socket.h>
54 #if defined (HAVE_SYS_ZLIB)
58 #ifdef HAVE_SCHED_GETAFFINITY
59 # ifndef GLIBC_HAS_CPU_COUNT
61 CPU_COUNT(cpu_set_t
*set
)
65 for (int i
= 0; i
< CPU_SETSIZE
; i
++)
66 if (CPU_ISSET(i
, set
))
73 #define BUFFER_SIZE (4096 * 16)
75 /* Worst-case size in bytes of a 64-bit value encoded with LEB128. */
76 #define LEB128_SIZE 10
78 /* Size of a value encoded as a single byte. */
79 #undef BYTE_SIZE // mach/i386/vm_param.h on OS X defines this to 8, but it isn't used for anything.
82 /* Size in bytes of the event prefix (ID + time). */
83 #define EVENT_SIZE (BYTE_SIZE + LEB128_SIZE)
85 static volatile gint32 runtime_inited
;
86 static volatile gint32 in_shutdown
;
88 static gboolean no_counters
;
89 static int nocalls
= 0;
90 static int notraces
= 0;
91 static int use_zip
= 0;
92 static int do_report
= 0;
93 static int do_heap_shot
= 0;
94 static int max_call_depth
= 100;
95 static int command_port
= 0;
96 static int heapshot_requested
= 0;
97 static int sample_freq
= 0;
98 static int do_mono_sample
= 0;
99 static int do_debug
= 0;
100 static int do_coverage
= 0;
101 static gboolean only_coverage
;
102 static gboolean debug_coverage
= FALSE
;
103 static MonoProfileSamplingMode sampling_mode
= MONO_PROFILER_STAT_MODE_PROCESS
;
104 static int max_allocated_sample_hits
;
106 // Statistics for internal profiler data structures.
107 static gint32 sample_allocations_ctr
,
108 buffer_allocations_ctr
;
110 // Statistics for profiler events.
111 static gint32 sync_points_ctr
,
120 gc_handle_creations_ctr
,
121 gc_handle_deletions_ctr
,
124 finalize_object_begins_ctr
,
125 finalize_object_ends_ctr
,
129 assembly_unloads_ctr
,
134 method_exception_exits_ctr
,
137 exception_throws_ctr
,
138 exception_clauses_ctr
,
139 monitor_contentions_ctr
,
140 monitor_acquisitions_ctr
,
141 monitor_failures_ctr
,
153 counter_descriptors_ctr
,
155 perfcounter_descriptors_ctr
,
156 perfcounter_samples_ctr
,
157 coverage_methods_ctr
,
158 coverage_statements_ctr
,
159 coverage_classes_ctr
,
160 coverage_assemblies_ctr
;
162 static MonoLinkedListSet profiler_thread_list
;
168 * The file is composed by a header followed by 0 or more buffers.
169 * Each buffer contains events that happened on a thread: for a given thread
170 * buffers that appear later in the file are guaranteed to contain events
171 * that happened later in time. Buffers from separate threads could be interleaved,
173 * Buffers are not required to be aligned.
176 * [id: 4 bytes] constant value: LOG_HEADER_ID
177 * [major: 1 byte] [minor: 1 byte] major and minor version of the log profiler
178 * [format: 1 byte] version of the data format for the rest of the file
179 * [ptrsize: 1 byte] size in bytes of a pointer in the profiled program
180 * [startup time: 8 bytes] time in milliseconds since the unix epoch when the program started
181 * [timer overhead: 4 bytes] approximate overhead in nanoseconds of the timer
182 * [flags: 4 bytes] file format flags, should be 0 for now
183 * [pid: 4 bytes] pid of the profiled process
184 * [port: 2 bytes] tcp port for server if != 0
185 * [args size: 4 bytes] size of args
186 * [args: string] arguments passed to the profiler
187 * [arch size: 4 bytes] size of arch
188 * [arch: string] architecture the profiler is running on
189 * [os size: 4 bytes] size of os
190 * [os: string] operating system the profiler is running on
192 * The multiple byte integers are in little-endian format.
195 * [buffer header] [event]*
196 * Buffers have a fixed-size header followed by 0 or more bytes of event data.
197 * Timing information and other values in the event data are usually stored
198 * as uleb128 or sleb128 integers. To save space, as noted for each item below,
199 * some data is represented as a difference between the actual value and
200 * either the last value of the same type (like for timing information) or
201 * as the difference from a value stored in a buffer header.
203 * For timing information the data is stored as uleb128, since timing
204 * increases in a monotonic way in each thread: the value is the number of
205 * nanoseconds to add to the last seen timing data in a buffer. The first value
206 * in a buffer will be calculated from the time_base field in the buffer head.
208 * Object or heap sizes are stored as uleb128.
209 * Pointer differences are stored as sleb128, instead.
211 * If an unexpected value is found, the rest of the buffer should be ignored,
212 * as generally the later values need the former to be interpreted correctly.
214 * buffer header format:
215 * [bufid: 4 bytes] constant value: BUF_ID
216 * [len: 4 bytes] size of the data following the buffer header
217 * [time_base: 8 bytes] time base in nanoseconds since an unspecified epoch
218 * [ptr_base: 8 bytes] base value for pointers
219 * [obj_base: 8 bytes] base value for object addresses
220 * [thread id: 8 bytes] system-specific thread ID (pthread_t for example)
221 * [method_base: 8 bytes] base value for MonoMethod pointers
224 * [extended info: upper 4 bits] [type: lower 4 bits]
225 * [time diff: uleb128] nanoseconds since last timing
227 * The data that follows depends on type and the extended info.
228 * Type is one of the enum values in mono-profiler-log.h: TYPE_ALLOC, TYPE_GC,
229 * TYPE_METADATA, TYPE_METHOD, TYPE_EXCEPTION, TYPE_MONITOR, TYPE_HEAP.
230 * The extended info bits are interpreted based on type, see
231 * each individual event description below.
232 * strings are represented as a 0-terminated utf8 sequence.
235 * [num: uleb128] number of frames following
236 * [frame: sleb128]* mum MonoMethod* as a pointer difference from the last such
237 * pointer or the buffer method_base
241 * exinfo: flags: TYPE_ALLOC_BT
242 * [ptr: sleb128] class as a byte difference from ptr_base
243 * [obj: sleb128] object address as a byte difference from obj_base
244 * [size: uleb128] size of the object in the heap
245 * If the TYPE_ALLOC_BT flag is set, a backtrace follows.
249 * exinfo: one of TYPE_GC_EVENT, TYPE_GC_RESIZE, TYPE_GC_MOVE, TYPE_GC_HANDLE_CREATED[_BT],
250 * TYPE_GC_HANDLE_DESTROYED[_BT], TYPE_GC_FINALIZE_START, TYPE_GC_FINALIZE_END,
251 * TYPE_GC_FINALIZE_OBJECT_START, TYPE_GC_FINALIZE_OBJECT_END
252 * if exinfo == TYPE_GC_RESIZE
253 * [heap_size: uleb128] new heap size
254 * if exinfo == TYPE_GC_EVENT
255 * [event type: byte] GC event (MONO_GC_EVENT_* from profiler.h)
256 * [generation: byte] GC generation event refers to
257 * if exinfo == TYPE_GC_MOVE
258 * [num_objects: uleb128] number of object moves that follow
259 * [objaddr: sleb128]+ num_objects object pointer differences from obj_base
260 * num is always an even number: the even items are the old
261 * addresses, the odd numbers are the respective new object addresses
262 * if exinfo == TYPE_GC_HANDLE_CREATED[_BT]
263 * [handle_type: uleb128] GC handle type (System.Runtime.InteropServices.GCHandleType)
264 * upper bits reserved as flags
265 * [handle: uleb128] GC handle value
266 * [objaddr: sleb128] object pointer differences from obj_base
267 * If exinfo == TYPE_GC_HANDLE_CREATED_BT, a backtrace follows.
268 * if exinfo == TYPE_GC_HANDLE_DESTROYED[_BT]
269 * [handle_type: uleb128] GC handle type (System.Runtime.InteropServices.GCHandleType)
270 * upper bits reserved as flags
271 * [handle: uleb128] GC handle value
272 * If exinfo == TYPE_GC_HANDLE_DESTROYED_BT, a backtrace follows.
273 * if exinfo == TYPE_GC_FINALIZE_OBJECT_{START,END}
274 * [object: sleb128] the object as a difference from obj_base
276 * type metadata format:
277 * type: TYPE_METADATA
278 * exinfo: one of: TYPE_END_LOAD, TYPE_END_UNLOAD (optional for TYPE_THREAD and TYPE_DOMAIN)
279 * [mtype: byte] metadata type, one of: TYPE_CLASS, TYPE_IMAGE, TYPE_ASSEMBLY, TYPE_DOMAIN,
280 * TYPE_THREAD, TYPE_CONTEXT
281 * [pointer: sleb128] pointer of the metadata type depending on mtype
282 * if mtype == TYPE_CLASS
283 * [image: sleb128] MonoImage* as a pointer difference from ptr_base
284 * [name: string] full class name
285 * if mtype == TYPE_IMAGE
286 * [name: string] image file name
287 * if mtype == TYPE_ASSEMBLY
288 * [name: string] assembly name
289 * if mtype == TYPE_DOMAIN && exinfo == 0
290 * [name: string] domain friendly name
291 * if mtype == TYPE_CONTEXT
292 * [domain: sleb128] domain id as pointer
293 * if mtype == TYPE_THREAD && exinfo == 0
294 * [name: string] thread name
296 * type method format:
298 * exinfo: one of: TYPE_LEAVE, TYPE_ENTER, TYPE_EXC_LEAVE, TYPE_JIT
299 * [method: sleb128] MonoMethod* as a pointer difference from the last such
300 * pointer or the buffer method_base
301 * if exinfo == TYPE_JIT
302 * [code address: sleb128] pointer to the native code as a diff from ptr_base
303 * [code size: uleb128] size of the generated code
304 * [name: string] full method name
306 * type exception format:
307 * type: TYPE_EXCEPTION
308 * exinfo: TYPE_THROW_BT flag or one of: TYPE_CLAUSE
309 * if exinfo == TYPE_CLAUSE
310 * [clause type: byte] MonoExceptionEnum enum value
311 * [clause index: uleb128] index of the current clause
312 * [method: sleb128] MonoMethod* as a pointer difference from the last such
313 * pointer or the buffer method_base
315 * [object: sleb128] the exception object as a difference from obj_base
316 * if exinfo has TYPE_THROW_BT set, a backtrace follows.
318 * type runtime format:
320 * exinfo: one of: TYPE_JITHELPER
321 * if exinfo == TYPE_JITHELPER
322 * [type: byte] MonoProfilerCodeBufferType enum value
323 * [buffer address: sleb128] pointer to the native code as a diff from ptr_base
324 * [buffer size: uleb128] size of the generated code
325 * if type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE
326 * [name: string] buffer description name
328 * type monitor format:
330 * exinfo: TYPE_MONITOR_BT flag and one of: MONO_PROFILER_MONITOR_(CONTENTION|FAIL|DONE)
331 * [object: sleb128] the lock object as a difference from obj_base
332 * if exinfo.low3bits == MONO_PROFILER_MONITOR_CONTENTION
333 * If the TYPE_MONITOR_BT flag is set, a backtrace follows.
337 * exinfo: one of TYPE_HEAP_START, TYPE_HEAP_END, TYPE_HEAP_OBJECT, TYPE_HEAP_ROOT
338 * if exinfo == TYPE_HEAP_OBJECT
339 * [object: sleb128] the object as a difference from obj_base
340 * [class: sleb128] the object MonoClass* as a difference from ptr_base
341 * [size: uleb128] size of the object on the heap
342 * [num_refs: uleb128] number of object references
343 * each referenced objref is preceded by a uleb128 encoded offset: the
344 * first offset is from the object address and each next offset is relative
345 * to the previous one
346 * [objrefs: sleb128]+ object referenced as a difference from obj_base
347 * The same object can appear multiple times, but only the first time
348 * with size != 0: in the other cases this data will only be used to
349 * provide additional referenced objects.
350 * if exinfo == TYPE_HEAP_ROOT
351 * [num_roots: uleb128] number of root references
352 * [num_gc: uleb128] number of major gcs
353 * [object: sleb128] the object as a difference from obj_base
354 * [root_type: byte] the root_type: MonoProfileGCRootType (profiler.h)
355 * [extra_info: uleb128] the extra_info value
356 * object, root_type and extra_info are repeated num_roots times
360 * exinfo: one of TYPE_SAMPLE_HIT, TYPE_SAMPLE_USYM, TYPE_SAMPLE_UBIN, TYPE_SAMPLE_COUNTERS_DESC, TYPE_SAMPLE_COUNTERS
361 * if exinfo == TYPE_SAMPLE_HIT
362 * [thread: sleb128] thread id as difference from ptr_base
363 * [count: uleb128] number of following instruction addresses
364 * [ip: sleb128]* instruction pointer as difference from ptr_base
365 * [mbt_count: uleb128] number of managed backtrace frames
366 * [method: sleb128]* MonoMethod* as a pointer difference from the last such
367 * pointer or the buffer method_base (the first such method can be also indentified by ip, but this is not neccessarily true)
368 * if exinfo == TYPE_SAMPLE_USYM
369 * [address: sleb128] symbol address as a difference from ptr_base
370 * [size: uleb128] symbol size (may be 0 if unknown)
371 * [name: string] symbol name
372 * if exinfo == TYPE_SAMPLE_UBIN
373 * [address: sleb128] address where binary has been loaded
374 * [offset: uleb128] file offset of mapping (the same file can be mapped multiple times)
375 * [size: uleb128] memory size
376 * [name: string] binary name
377 * if exinfo == TYPE_SAMPLE_COUNTERS_DESC
378 * [len: uleb128] number of counters
380 * [section: uleb128] section of counter
381 * if section == MONO_COUNTER_PERFCOUNTERS:
382 * [section_name: string] section name of counter
383 * [name: string] name of counter
384 * [type: byte] type of counter
385 * [unit: byte] unit of counter
386 * [variance: byte] variance of counter
387 * [index: uleb128] unique index of counter
388 * if exinfo == TYPE_SAMPLE_COUNTERS
390 * [index: uleb128] unique index of counter
393 * [type: byte] type of counter value
396 * [0: uleb128] 0 -> value is null
398 * [1: uleb128] 1 -> value is not null
399 * [value: string] counter value
401 * [value: uleb128/sleb128/double] counter value, can be sleb128, uleb128 or double (determined by using type)
403 * type coverage format
404 * type: TYPE_COVERAGE
405 * exinfo: one of TYPE_COVERAGE_METHOD, TYPE_COVERAGE_STATEMENT, TYPE_COVERAGE_ASSEMBLY, TYPE_COVERAGE_CLASS
406 * if exinfo == TYPE_COVERAGE_METHOD
407 * [assembly: string] name of assembly
408 * [class: string] name of the class
409 * [name: string] name of the method
410 * [signature: string] the signature of the method
411 * [filename: string] the file path of the file that contains this method
412 * [token: uleb128] the method token
413 * [method_id: uleb128] an ID for this data to associate with the buffers of TYPE_COVERAGE_STATEMENTS
414 * [len: uleb128] the number of TYPE_COVERAGE_BUFFERS associated with this method
415 * if exinfo == TYPE_COVERAGE_STATEMENTS
416 * [method_id: uleb128] an the TYPE_COVERAGE_METHOD buffer to associate this with
417 * [offset: uleb128] the il offset relative to the previous offset
418 * [counter: uleb128] the counter for this instruction
419 * [line: uleb128] the line of filename containing this instruction
420 * [column: uleb128] the column containing this instruction
421 * if exinfo == TYPE_COVERAGE_ASSEMBLY
422 * [name: string] assembly name
423 * [guid: string] assembly GUID
424 * [filename: string] assembly filename
425 * [number_of_methods: uleb128] the number of methods in this assembly
426 * [fully_covered: uleb128] the number of fully covered methods
427 * [partially_covered: uleb128] the number of partially covered methods
428 * currently partially_covered will always be 0, and fully_covered is the
429 * number of methods that are fully and partially covered.
430 * if exinfo == TYPE_COVERAGE_CLASS
431 * [name: string] assembly name
432 * [class: string] class name
433 * [number_of_methods: uleb128] the number of methods in this class
434 * [fully_covered: uleb128] the number of fully covered methods
435 * [partially_covered: uleb128] the number of partially covered methods
436 * currently partially_covered will always be 0, and fully_covered is the
437 * number of methods that are fully and partially covered.
441 * exinfo: one of: TYPE_SYNC_POINT
442 * if exinfo == TYPE_SYNC_POINT
443 * [type: byte] MonoProfilerSyncPointType enum value
446 // Pending data to be written to the log, for a single thread.
447 // Threads periodically flush their own LogBuffers by calling safe_send
448 typedef struct _LogBuffer LogBuffer
;
450 // Next (older) LogBuffer in processing queue
456 uintptr_t method_base
;
457 uintptr_t last_method
;
461 // Bytes allocated for this LogBuffer
464 // Start of currently unused space in buffer
465 unsigned char* cursor
;
467 // Pointer to start-of-structure-plus-size (for convenience)
468 unsigned char* buf_end
;
470 // Start of data in buffer. Contents follow "buffer format" described above.
471 unsigned char buf
[1];
475 MonoLinkedListSetNode node
;
477 // Convenience pointer to the profiler structure.
478 MonoProfiler
*profiler
;
480 // Was this thread added to the LLS?
483 // The current log buffer for this thread.
486 // Methods referenced by events in `buffer`, see `MethodInfo`.
489 // Current call depth for enter/leave events.
492 // Indicates whether this thread is currently writing to its `buffer`.
495 // Has this thread written a thread end event to `buffer`?
497 } MonoProfilerThread
;
502 return (uintptr_t) mono_native_thread_id_get ();
509 return (uintptr_t) GetCurrentProcessId ();
511 return (uintptr_t) getpid ();
516 static mach_timebase_info_data_t timebase_info
;
517 #elif defined (HOST_WIN32)
518 static LARGE_INTEGER pcounter_freq
;
521 #define TICKS_PER_SEC 1000000000LL
527 uint64_t time
= mach_absolute_time ();
529 time
*= timebase_info
.numer
;
530 time
/= timebase_info
.denom
;
533 #elif defined (HOST_WIN32)
536 QueryPerformanceCounter (&value
);
538 return value
.QuadPart
* TICKS_PER_SEC
/ pcounter_freq
.QuadPart
;
539 #elif defined (CLOCK_MONOTONIC)
540 struct timespec tspec
;
542 clock_gettime (CLOCK_MONOTONIC
, &tspec
);
544 return ((uint64_t) tspec
.tv_sec
* TICKS_PER_SEC
+ tspec
.tv_nsec
);
548 gettimeofday (&tv
, NULL
);
550 return ((uint64_t) tv
.tv_sec
* TICKS_PER_SEC
+ tv
.tv_usec
* 1000);
554 static int timer_overhead
;
560 mach_timebase_info (&timebase_info
);
561 #elif defined (HOST_WIN32)
562 QueryPerformanceFrequency (&pcounter_freq
);
565 uint64_t time_start
= current_time ();
567 for (int i
= 0; i
< 256; ++i
)
570 uint64_t time_end
= current_time ();
572 timer_overhead
= (time_end
- time_start
) / 256;
576 * These macros should be used when writing an event to a log buffer. They take
577 * care of a bunch of stuff that can be repetitive and error-prone, such as
578 * acquiring/releasing the buffer lock, incrementing the event counter,
579 * expanding the log buffer, processing requests, etc. They also create a scope
580 * so that it's harder to leak the LogBuffer pointer, which can be problematic
581 * as the pointer is unstable when the buffer lock isn't acquired.
584 #define ENTER_LOG(COUNTER, BUFFER, SIZE) \
586 MonoProfilerThread *thread__ = PROF_TLS_GET (); \
587 if (thread__->attached) \
589 g_assert (!thread__->busy && "Why are we trying to write a new event while already writing one?"); \
590 thread__->busy = TRUE; \
591 InterlockedIncrement ((COUNTER)); \
592 LogBuffer *BUFFER = ensure_logbuf_unsafe (thread__, (SIZE))
594 #define EXIT_LOG_EXPLICIT(SEND, REQUESTS) \
595 thread__->busy = FALSE; \
597 send_log_unsafe (TRUE); \
598 if (thread__->attached) \
601 process_requests (); \
604 // Pass these to EXIT_LOG_EXPLICIT () for easier reading.
606 #define NO_SEND FALSE
607 #define DO_REQUESTS TRUE
608 #define NO_REQUESTS FALSE
610 #define EXIT_LOG EXIT_LOG_EXPLICIT (DO_SEND, DO_REQUESTS)
612 static volatile gint32 buffer_rwlock_count
;
613 static volatile gpointer buffer_rwlock_exclusive
;
615 // Can be used recursively.
620 * If the thread holding the exclusive lock tries to modify the
621 * reader count, just make it a no-op. This way, we also avoid
622 * invoking the GC safe point macros below, which could break if
623 * done from a thread that is currently the initiator of STW.
625 * In other words, we rely on the fact that the GC thread takes
626 * the exclusive lock in the gc_event () callback when the world
629 if (InterlockedReadPointer (&buffer_rwlock_exclusive
) != (gpointer
) thread_id ()) {
632 while (InterlockedReadPointer (&buffer_rwlock_exclusive
))
633 mono_thread_info_yield ();
635 InterlockedIncrement (&buffer_rwlock_count
);
640 mono_memory_barrier ();
646 mono_memory_barrier ();
648 // See the comment in buffer_lock ().
649 if (InterlockedReadPointer (&buffer_rwlock_exclusive
) == (gpointer
) thread_id ())
652 g_assert (InterlockedRead (&buffer_rwlock_count
) && "Why are we trying to decrement a zero reader count?");
654 InterlockedDecrement (&buffer_rwlock_count
);
657 // Cannot be used recursively.
659 buffer_lock_excl (void)
661 gpointer tid
= (gpointer
) thread_id ();
663 g_assert (InterlockedReadPointer (&buffer_rwlock_exclusive
) != tid
&& "Why are we taking the exclusive lock twice?");
667 while (InterlockedCompareExchangePointer (&buffer_rwlock_exclusive
, tid
, 0))
668 mono_thread_info_yield ();
670 while (InterlockedRead (&buffer_rwlock_count
))
671 mono_thread_info_yield ();
675 mono_memory_barrier ();
679 buffer_unlock_excl (void)
681 mono_memory_barrier ();
683 g_assert (InterlockedReadPointer (&buffer_rwlock_exclusive
) && "Why is the exclusive lock not held?");
684 g_assert (InterlockedReadPointer (&buffer_rwlock_exclusive
) == (gpointer
) thread_id () && "Why does another thread hold the exclusive lock?");
685 g_assert (!InterlockedRead (&buffer_rwlock_count
) && "Why are there readers when the exclusive lock is held?");
687 InterlockedWritePointer (&buffer_rwlock_exclusive
, NULL
);
690 typedef struct _BinaryObject BinaryObject
;
691 struct _BinaryObject
{
697 struct _MonoProfiler
{
699 #if defined (HAVE_SYS_ZLIB)
703 uint64_t startup_time
;
708 MonoNativeThreadId helper_thread
;
709 MonoNativeThreadId writer_thread
;
710 MonoNativeThreadId dumper_thread
;
711 volatile gint32 run_writer_thread
;
712 MonoLockFreeAllocSizeClass writer_entry_size_class
;
713 MonoLockFreeAllocator writer_entry_allocator
;
714 MonoLockFreeQueue writer_queue
;
715 MonoSemType writer_queue_sem
;
716 MonoConcurrentHashTable
*method_table
;
717 mono_mutex_t method_table_mutex
;
718 volatile gint32 run_dumper_thread
;
719 MonoLockFreeQueue dumper_queue
;
720 MonoSemType dumper_queue_sem
;
721 MonoLockFreeAllocSizeClass sample_size_class
;
722 MonoLockFreeAllocator sample_allocator
;
723 MonoLockFreeQueue sample_reuse_queue
;
724 BinaryObject
*binary_objects
;
725 GPtrArray
*coverage_filters
;
729 MonoLockFreeQueueNode node
;
734 #define WRITER_ENTRY_BLOCK_SIZE (mono_pagesize ())
744 #define PROF_TLS_SET(VAL) (TlsSetValue (profiler_tls, (VAL)))
745 #define PROF_TLS_GET() ((MonoProfilerThread *) TlsGetValue (profiler_tls))
746 #define PROF_TLS_INIT() (profiler_tls = TlsAlloc ())
747 #define PROF_TLS_FREE() (TlsFree (profiler_tls))
749 static DWORD profiler_tls
;
753 #define PROF_TLS_SET(VAL) (profiler_tls = (VAL))
754 #define PROF_TLS_GET() (profiler_tls)
755 #define PROF_TLS_INIT()
756 #define PROF_TLS_FREE()
758 static __thread MonoProfilerThread
*profiler_tls
;
762 #define PROF_TLS_SET(VAL) (pthread_setspecific (profiler_tls, (VAL)))
763 #define PROF_TLS_GET() ((MonoProfilerThread *) pthread_getspecific (profiler_tls))
764 #define PROF_TLS_INIT() (pthread_key_create (&profiler_tls, NULL))
765 #define PROF_TLS_FREE() (pthread_key_delete (profiler_tls))
767 static pthread_key_t profiler_tls
;
772 pstrdup (const char *s
)
774 int len
= strlen (s
) + 1;
775 char *p
= (char *) g_malloc (len
);
781 alloc_buffer (int size
)
783 return mono_valloc (NULL
, size
, MONO_MMAP_READ
| MONO_MMAP_WRITE
| MONO_MMAP_ANON
| MONO_MMAP_PRIVATE
, MONO_MEM_ACCOUNT_PROFILER
);
787 free_buffer (void *buf
, int size
)
789 mono_vfree (buf
, size
, MONO_MEM_ACCOUNT_PROFILER
);
793 create_buffer (uintptr_t tid
)
795 LogBuffer
* buf
= (LogBuffer
*) alloc_buffer (BUFFER_SIZE
);
797 InterlockedIncrement (&buffer_allocations_ctr
);
799 buf
->size
= BUFFER_SIZE
;
800 buf
->time_base
= current_time ();
801 buf
->last_time
= buf
->time_base
;
802 buf
->buf_end
= (unsigned char *) buf
+ buf
->size
;
803 buf
->cursor
= buf
->buf
;
804 buf
->thread_id
= tid
;
810 * Must be called with the reader lock held if thread is the current thread, or
811 * the exclusive lock if thread is a different thread. However, if thread is
812 * the current thread, and init_thread () was called with add_to_lls = FALSE,
813 * then no locking is necessary.
816 init_buffer_state (MonoProfilerThread
*thread
)
818 thread
->buffer
= create_buffer (thread
->node
.key
);
819 thread
->methods
= NULL
;
823 clear_hazard_pointers (MonoThreadHazardPointers
*hp
)
825 mono_hazard_pointer_clear (hp
, 0);
826 mono_hazard_pointer_clear (hp
, 1);
827 mono_hazard_pointer_clear (hp
, 2);
830 static MonoProfilerThread
*
831 init_thread (MonoProfiler
*prof
, gboolean add_to_lls
)
833 MonoProfilerThread
*thread
= PROF_TLS_GET ();
836 * Sometimes we may try to initialize a thread twice. One example is the
837 * main thread: We initialize it when setting up the profiler, but we will
838 * also get a thread_start () callback for it. Another example is when
839 * attaching new threads to the runtime: We may get a gc_alloc () callback
840 * for that thread's thread object (where we initialize it), soon followed
841 * by a thread_start () callback.
843 * These cases are harmless anyhow. Just return if we've already done the
844 * initialization work.
849 thread
= g_malloc (sizeof (MonoProfilerThread
));
850 thread
->node
.key
= thread_id ();
851 thread
->profiler
= prof
;
852 thread
->attached
= add_to_lls
;
853 thread
->call_depth
= 0;
855 thread
->ended
= FALSE
;
857 init_buffer_state (thread
);
860 * Some internal profiler threads don't need to be cleaned up
861 * by the main thread on shutdown.
864 MonoThreadHazardPointers
*hp
= mono_hazard_pointer_get ();
865 g_assert (mono_lls_insert (&profiler_thread_list
, hp
, &thread
->node
) && "Why can't we insert the thread in the LLS?");
866 clear_hazard_pointers (hp
);
869 PROF_TLS_SET (thread
);
874 // Only valid if init_thread () was called with add_to_lls = FALSE.
876 deinit_thread (MonoProfilerThread
*thread
)
878 g_assert (!thread
->attached
&& "Why are we manually freeing an attached thread?");
884 // Only valid if init_thread () was called with add_to_lls = FALSE.
886 ensure_logbuf_unsafe (MonoProfilerThread
*thread
, int bytes
)
888 LogBuffer
*old
= thread
->buffer
;
890 if (old
&& old
->cursor
+ bytes
+ 100 < old
->buf_end
)
893 LogBuffer
*new_
= create_buffer (thread
->node
.key
);
895 thread
->buffer
= new_
;
901 encode_uleb128 (uint64_t value
, uint8_t *buf
, uint8_t **endbuf
)
906 uint8_t b
= value
& 0x7f;
909 if (value
!= 0) /* more bytes to come */
919 encode_sleb128 (intptr_t value
, uint8_t *buf
, uint8_t **endbuf
)
922 int negative
= (value
< 0);
923 unsigned int size
= sizeof (intptr_t) * 8;
931 /* the following is unnecessary if the
932 * implementation of >>= uses an arithmetic rather
933 * than logical shift for a signed left operand
937 value
|= - ((intptr_t) 1 <<(size
- 7));
939 /* sign bit of byte is second high order bit (0x40) */
940 if ((value
== 0 && !(byte
& 0x40)) ||
941 (value
== -1 && (byte
& 0x40)))
953 emit_byte (LogBuffer
*logbuffer
, int value
)
955 logbuffer
->cursor
[0] = value
;
958 g_assert (logbuffer
->cursor
<= logbuffer
->buf_end
&& "Why are we writing past the buffer end?");
962 emit_value (LogBuffer
*logbuffer
, int value
)
964 encode_uleb128 (value
, logbuffer
->cursor
, &logbuffer
->cursor
);
966 g_assert (logbuffer
->cursor
<= logbuffer
->buf_end
&& "Why are we writing past the buffer end?");
970 emit_time (LogBuffer
*logbuffer
, uint64_t value
)
972 uint64_t tdiff
= value
- logbuffer
->last_time
;
973 encode_uleb128 (tdiff
, logbuffer
->cursor
, &logbuffer
->cursor
);
974 logbuffer
->last_time
= value
;
976 g_assert (logbuffer
->cursor
<= logbuffer
->buf_end
&& "Why are we writing past the buffer end?");
980 emit_event_time (LogBuffer
*logbuffer
, int event
, uint64_t time
)
982 emit_byte (logbuffer
, event
);
983 emit_time (logbuffer
, time
);
987 emit_event (LogBuffer
*logbuffer
, int event
)
989 emit_event_time (logbuffer
, event
, current_time ());
993 emit_svalue (LogBuffer
*logbuffer
, int64_t value
)
995 encode_sleb128 (value
, logbuffer
->cursor
, &logbuffer
->cursor
);
997 g_assert (logbuffer
->cursor
<= logbuffer
->buf_end
&& "Why are we writing past the buffer end?");
1001 emit_uvalue (LogBuffer
*logbuffer
, uint64_t value
)
1003 encode_uleb128 (value
, logbuffer
->cursor
, &logbuffer
->cursor
);
1005 g_assert (logbuffer
->cursor
<= logbuffer
->buf_end
&& "Why are we writing past the buffer end?");
1009 emit_ptr (LogBuffer
*logbuffer
, void *ptr
)
1011 if (!logbuffer
->ptr_base
)
1012 logbuffer
->ptr_base
= (uintptr_t) ptr
;
1014 emit_svalue (logbuffer
, (intptr_t) ptr
- logbuffer
->ptr_base
);
1016 g_assert (logbuffer
->cursor
<= logbuffer
->buf_end
&& "Why are we writing past the buffer end?");
1020 emit_method_inner (LogBuffer
*logbuffer
, void *method
)
1022 if (!logbuffer
->method_base
) {
1023 logbuffer
->method_base
= (intptr_t) method
;
1024 logbuffer
->last_method
= (intptr_t) method
;
1027 encode_sleb128 ((intptr_t) ((char *) method
- (char *) logbuffer
->last_method
), logbuffer
->cursor
, &logbuffer
->cursor
);
1028 logbuffer
->last_method
= (intptr_t) method
;
1030 g_assert (logbuffer
->cursor
<= logbuffer
->buf_end
&& "Why are we writing past the buffer end?");
1034 register_method_local (MonoMethod
*method
, MonoJitInfo
*ji
)
1036 MonoProfilerThread
*thread
= PROF_TLS_GET ();
1038 if (!mono_conc_hashtable_lookup (thread
->profiler
->method_table
, method
)) {
1039 MethodInfo
*info
= (MethodInfo
*) g_malloc (sizeof (MethodInfo
));
1041 info
->method
= method
;
1043 info
->time
= current_time ();
1045 GPtrArray
*arr
= thread
->methods
? thread
->methods
: (thread
->methods
= g_ptr_array_new ());
1046 g_ptr_array_add (arr
, info
);
1051 emit_method (LogBuffer
*logbuffer
, MonoMethod
*method
)
1053 register_method_local (method
, NULL
);
1054 emit_method_inner (logbuffer
, method
);
1058 emit_obj (LogBuffer
*logbuffer
, void *ptr
)
1060 if (!logbuffer
->obj_base
)
1061 logbuffer
->obj_base
= (uintptr_t) ptr
>> 3;
1063 emit_svalue (logbuffer
, ((uintptr_t) ptr
>> 3) - logbuffer
->obj_base
);
1065 g_assert (logbuffer
->cursor
<= logbuffer
->buf_end
&& "Why are we writing past the buffer end?");
1069 emit_string (LogBuffer
*logbuffer
, const char *str
, size_t size
)
1073 for (; i
< size
; i
++) {
1076 emit_byte (logbuffer
, str
[i
]);
1079 emit_byte (logbuffer
, '\0');
1083 emit_double (LogBuffer
*logbuffer
, double value
)
1086 unsigned char buffer
[8];
1087 memcpy (buffer
, &value
, 8);
1088 #if G_BYTE_ORDER == G_BIG_ENDIAN
1089 for (i
= 7; i
>= 0; i
--)
1091 for (i
= 0; i
< 8; i
++)
1093 emit_byte (logbuffer
, buffer
[i
]);
1097 write_int16 (char *buf
, int32_t value
)
1100 for (i
= 0; i
< 2; ++i
) {
1108 write_int32 (char *buf
, int32_t value
)
1111 for (i
= 0; i
< 4; ++i
) {
1119 write_int64 (char *buf
, int64_t value
)
1122 for (i
= 0; i
< 8; ++i
) {
1130 write_header_string (char *p
, const char *str
)
1132 size_t len
= strlen (str
) + 1;
1134 p
= write_int32 (p
, len
);
1141 dump_header (MonoProfiler
*profiler
)
1143 const char *args
= profiler
->args
;
1144 const char *arch
= mono_config_get_cpu ();
1145 const char *os
= mono_config_get_os ();
1147 char *hbuf
= g_malloc (
1148 sizeof (gint32
) /* header id */ +
1149 sizeof (gint8
) /* major version */ +
1150 sizeof (gint8
) /* minor version */ +
1151 sizeof (gint8
) /* data version */ +
1152 sizeof (gint8
) /* word size */ +
1153 sizeof (gint64
) /* startup time */ +
1154 sizeof (gint32
) /* timer overhead */ +
1155 sizeof (gint32
) /* flags */ +
1156 sizeof (gint32
) /* process id */ +
1157 sizeof (gint16
) /* command port */ +
1158 sizeof (gint32
) + strlen (args
) + 1 /* arguments */ +
1159 sizeof (gint32
) + strlen (arch
) + 1 /* architecture */ +
1160 sizeof (gint32
) + strlen (os
) + 1 /* operating system */
1164 p
= write_int32 (p
, LOG_HEADER_ID
);
1165 *p
++ = LOG_VERSION_MAJOR
;
1166 *p
++ = LOG_VERSION_MINOR
;
1167 *p
++ = LOG_DATA_VERSION
;
1168 *p
++ = sizeof (void *);
1169 p
= write_int64 (p
, ((uint64_t) time (NULL
)) * 1000);
1170 p
= write_int32 (p
, timer_overhead
);
1171 p
= write_int32 (p
, 0); /* flags */
1172 p
= write_int32 (p
, process_id ());
1173 p
= write_int16 (p
, profiler
->command_port
);
1174 p
= write_header_string (p
, args
);
1175 p
= write_header_string (p
, arch
);
1176 p
= write_header_string (p
, os
);
1178 #if defined (HAVE_SYS_ZLIB)
1179 if (profiler
->gzfile
) {
1180 gzwrite (profiler
->gzfile
, hbuf
, p
- hbuf
);
1184 fwrite (hbuf
, p
- hbuf
, 1, profiler
->file
);
1185 fflush (profiler
->file
);
1192 * Must be called with the reader lock held if thread is the current thread, or
1193 * the exclusive lock if thread is a different thread. However, if thread is
1194 * the current thread, and init_thread () was called with add_to_lls = FALSE,
1195 * then no locking is necessary.
1198 send_buffer (MonoProfilerThread
*thread
)
1200 WriterQueueEntry
*entry
= mono_lock_free_alloc (&thread
->profiler
->writer_entry_allocator
);
1201 entry
->methods
= thread
->methods
;
1202 entry
->buffer
= thread
->buffer
;
1204 mono_lock_free_queue_node_init (&entry
->node
, FALSE
);
1206 mono_lock_free_queue_enqueue (&thread
->profiler
->writer_queue
, &entry
->node
);
1207 mono_os_sem_post (&thread
->profiler
->writer_queue_sem
);
1211 free_thread (gpointer p
)
1213 MonoProfilerThread
*thread
= p
;
1215 if (!thread
->ended
) {
1217 * The thread is being cleaned up by the main thread during
1218 * shutdown. This typically happens for internal runtime
1219 * threads. We need to synthesize a thread end event.
1222 InterlockedIncrement (&thread_ends_ctr
);
1224 LogBuffer
*buf
= ensure_logbuf_unsafe (thread
,
1225 EVENT_SIZE
/* event */ +
1226 BYTE_SIZE
/* type */ +
1227 LEB128_SIZE
/* tid */
1230 emit_event (buf
, TYPE_END_UNLOAD
| TYPE_METADATA
);
1231 emit_byte (buf
, TYPE_THREAD
);
1232 emit_ptr (buf
, (void *) thread
->node
.key
);
1235 send_buffer (thread
);
1241 remove_thread (MonoProfilerThread
*thread
)
1243 MonoThreadHazardPointers
*hp
= mono_hazard_pointer_get ();
1245 if (mono_lls_remove (&profiler_thread_list
, hp
, &thread
->node
))
1246 mono_thread_hazardous_try_free (thread
, free_thread
);
1248 clear_hazard_pointers (hp
);
1252 dump_buffer (MonoProfiler
*profiler
, LogBuffer
*buf
)
1258 dump_buffer (profiler
, buf
->next
);
1260 if (buf
->cursor
- buf
->buf
) {
1261 p
= write_int32 (p
, BUF_ID
);
1262 p
= write_int32 (p
, buf
->cursor
- buf
->buf
);
1263 p
= write_int64 (p
, buf
->time_base
);
1264 p
= write_int64 (p
, buf
->ptr_base
);
1265 p
= write_int64 (p
, buf
->obj_base
);
1266 p
= write_int64 (p
, buf
->thread_id
);
1267 p
= write_int64 (p
, buf
->method_base
);
1269 #if defined (HAVE_SYS_ZLIB)
1270 if (profiler
->gzfile
) {
1271 gzwrite (profiler
->gzfile
, hbuf
, p
- hbuf
);
1272 gzwrite (profiler
->gzfile
, buf
->buf
, buf
->cursor
- buf
->buf
);
1276 fwrite (hbuf
, p
- hbuf
, 1, profiler
->file
);
1277 fwrite (buf
->buf
, buf
->cursor
- buf
->buf
, 1, profiler
->file
);
1278 fflush (profiler
->file
);
1282 free_buffer (buf
, buf
->size
);
1286 dump_buffer_threadless (MonoProfiler
*profiler
, LogBuffer
*buf
)
1288 for (LogBuffer
*iter
= buf
; iter
; iter
= iter
->next
)
1289 iter
->thread_id
= 0;
1291 dump_buffer (profiler
, buf
);
1295 process_requests (void)
1297 if (heapshot_requested
)
1298 mono_gc_collect (mono_gc_max_generation ());
1301 // Only valid if init_thread () was called with add_to_lls = FALSE.
1303 send_log_unsafe (gboolean if_needed
)
1305 MonoProfilerThread
*thread
= PROF_TLS_GET ();
1307 if (!if_needed
|| (if_needed
&& thread
->buffer
->next
)) {
1308 if (!thread
->attached
)
1309 for (LogBuffer
*iter
= thread
->buffer
; iter
; iter
= iter
->next
)
1310 iter
->thread_id
= 0;
1312 send_buffer (thread
);
1313 init_buffer_state (thread
);
1317 // Assumes that the exclusive lock is held.
1319 sync_point_flush (void)
1321 g_assert (InterlockedReadPointer (&buffer_rwlock_exclusive
) == (gpointer
) thread_id () && "Why don't we hold the exclusive lock?");
1323 MONO_LLS_FOREACH_SAFE (&profiler_thread_list
, MonoProfilerThread
, thread
) {
1324 g_assert (thread
->attached
&& "Why is a thread in the LLS not attached?");
1326 send_buffer (thread
);
1327 init_buffer_state (thread
);
1328 } MONO_LLS_FOREACH_SAFE_END
1331 // Assumes that the exclusive lock is held.
1333 sync_point_mark (MonoProfilerSyncPointType type
)
1335 g_assert (InterlockedReadPointer (&buffer_rwlock_exclusive
) == (gpointer
) thread_id () && "Why don't we hold the exclusive lock?");
1337 ENTER_LOG (&sync_points_ctr
, logbuffer
,
1338 EVENT_SIZE
/* event */ +
1339 LEB128_SIZE
/* type */
1342 emit_event (logbuffer
, TYPE_META
| TYPE_SYNC_POINT
);
1343 emit_byte (logbuffer
, type
);
1345 EXIT_LOG_EXPLICIT (NO_SEND
, NO_REQUESTS
);
1347 send_log_unsafe (FALSE
);
1350 // Assumes that the exclusive lock is held.
1352 sync_point (MonoProfilerSyncPointType type
)
1354 sync_point_flush ();
1355 sync_point_mark (type
);
1359 gc_reference (MonoObject
*obj
, MonoClass
*klass
, uintptr_t size
, uintptr_t num
, MonoObject
**refs
, uintptr_t *offsets
, void *data
)
1361 /* account for object alignment in the heap */
1365 ENTER_LOG (&heap_objects_ctr
, logbuffer
,
1366 EVENT_SIZE
/* event */ +
1367 LEB128_SIZE
/* obj */ +
1368 LEB128_SIZE
/* klass */ +
1369 LEB128_SIZE
/* size */ +
1370 LEB128_SIZE
/* num */ +
1372 LEB128_SIZE
/* offset */ +
1373 LEB128_SIZE
/* ref */
1377 emit_event (logbuffer
, TYPE_HEAP_OBJECT
| TYPE_HEAP
);
1378 emit_obj (logbuffer
, obj
);
1379 emit_ptr (logbuffer
, klass
);
1380 emit_value (logbuffer
, size
);
1381 emit_value (logbuffer
, num
);
1383 uintptr_t last_offset
= 0;
1385 for (int i
= 0; i
< num
; ++i
) {
1386 emit_value (logbuffer
, offsets
[i
] - last_offset
);
1387 last_offset
= offsets
[i
];
1388 emit_obj (logbuffer
, refs
[i
]);
1391 EXIT_LOG_EXPLICIT (DO_SEND
, NO_REQUESTS
);
1396 static unsigned int hs_mode_ms
= 0;
1397 static unsigned int hs_mode_gc
= 0;
1398 static unsigned int hs_mode_ondemand
= 0;
1399 static unsigned int gc_count
= 0;
1400 static uint64_t last_hs_time
= 0;
1401 static gboolean do_heap_walk
= FALSE
;
1404 heap_walk (MonoProfiler
*profiler
)
1406 ENTER_LOG (&heap_starts_ctr
, logbuffer
,
1407 EVENT_SIZE
/* event */
1410 emit_event (logbuffer
, TYPE_HEAP_START
| TYPE_HEAP
);
1412 EXIT_LOG_EXPLICIT (DO_SEND
, NO_REQUESTS
);
1414 mono_gc_walk_heap (0, gc_reference
, NULL
);
1416 ENTER_LOG (&heap_ends_ctr
, logbuffer
,
1417 EVENT_SIZE
/* event */
1420 emit_event (logbuffer
, TYPE_HEAP_END
| TYPE_HEAP
);
1422 EXIT_LOG_EXPLICIT (DO_SEND
, NO_REQUESTS
);
1426 gc_roots (MonoProfiler
*prof
, int num
, void **objects
, int *root_types
, uintptr_t *extra_info
)
1428 ENTER_LOG (&heap_roots_ctr
, logbuffer
,
1429 EVENT_SIZE
/* event */ +
1430 LEB128_SIZE
/* num */ +
1431 LEB128_SIZE
/* collections */ +
1433 LEB128_SIZE
/* object */ +
1434 LEB128_SIZE
/* root type */ +
1435 LEB128_SIZE
/* extra info */
1439 emit_event (logbuffer
, TYPE_HEAP_ROOT
| TYPE_HEAP
);
1440 emit_value (logbuffer
, num
);
1441 emit_value (logbuffer
, mono_gc_collection_count (mono_gc_max_generation ()));
1443 for (int i
= 0; i
< num
; ++i
) {
1444 emit_obj (logbuffer
, objects
[i
]);
1445 emit_byte (logbuffer
, root_types
[i
]);
1446 emit_value (logbuffer
, extra_info
[i
]);
1449 EXIT_LOG_EXPLICIT (DO_SEND
, NO_REQUESTS
);
1453 gc_event (MonoProfiler
*profiler
, MonoGCEvent ev
, int generation
)
1455 ENTER_LOG (&gc_events_ctr
, logbuffer
,
1456 EVENT_SIZE
/* event */ +
1457 BYTE_SIZE
/* gc event */ +
1458 BYTE_SIZE
/* generation */
1461 emit_event (logbuffer
, TYPE_GC_EVENT
| TYPE_GC
);
1462 emit_byte (logbuffer
, ev
);
1463 emit_byte (logbuffer
, generation
);
1465 EXIT_LOG_EXPLICIT (NO_SEND
, NO_REQUESTS
);
1468 case MONO_GC_EVENT_START
:
1469 if (generation
== mono_gc_max_generation ())
1472 uint64_t now
= current_time ();
1474 if (hs_mode_ms
&& (now
- last_hs_time
) / 1000 * 1000 >= hs_mode_ms
)
1475 do_heap_walk
= TRUE
;
1476 else if (hs_mode_gc
&& !(gc_count
% hs_mode_gc
))
1477 do_heap_walk
= TRUE
;
1478 else if (hs_mode_ondemand
)
1479 do_heap_walk
= heapshot_requested
;
1480 else if (!hs_mode_ms
&& !hs_mode_gc
&& generation
== mono_gc_max_generation ())
1481 do_heap_walk
= TRUE
;
1483 case MONO_GC_EVENT_PRE_STOP_WORLD_LOCKED
:
1485 * Ensure that no thread can be in the middle of writing to
1486 * a buffer when the world stops...
1488 buffer_lock_excl ();
1490 case MONO_GC_EVENT_POST_STOP_WORLD
:
1492 * ... So that we now have a consistent view of all buffers.
1493 * This allows us to flush them. We need to do this because
1494 * they may contain object allocation events that need to be
1495 * committed to the log file before any object move events
1496 * that will be produced during this GC.
1498 sync_point (SYNC_POINT_WORLD_STOP
);
1500 case MONO_GC_EVENT_PRE_START_WORLD
:
1501 if (do_heap_shot
&& do_heap_walk
) {
1502 heap_walk (profiler
);
1504 do_heap_walk
= FALSE
;
1505 heapshot_requested
= 0;
1506 last_hs_time
= current_time ();
1509 case MONO_GC_EVENT_POST_START_WORLD_UNLOCKED
:
1511 * Similarly, we must now make sure that any object moves
1512 * written to the GC thread's buffer are flushed. Otherwise,
1513 * object allocation events for certain addresses could come
1514 * after the move events that made those addresses available.
1516 sync_point_mark (SYNC_POINT_WORLD_START
);
1519 * Finally, it is safe to allow other threads to write to
1520 * their buffers again.
1522 buffer_unlock_excl ();
1530 gc_resize (MonoProfiler
*profiler
, int64_t new_size
)
1532 ENTER_LOG (&gc_resizes_ctr
, logbuffer
,
1533 EVENT_SIZE
/* event */ +
1534 LEB128_SIZE
/* new size */
1537 emit_event (logbuffer
, TYPE_GC_RESIZE
| TYPE_GC
);
1538 emit_value (logbuffer
, new_size
);
1540 EXIT_LOG_EXPLICIT (DO_SEND
, NO_REQUESTS
);
1543 // If you alter MAX_FRAMES, you may need to alter SAMPLE_BLOCK_SIZE too.
1544 #define MAX_FRAMES 32
1548 MonoMethod
* methods
[MAX_FRAMES
];
1549 int32_t il_offsets
[MAX_FRAMES
];
1550 int32_t native_offsets
[MAX_FRAMES
];
1553 static int num_frames
= MAX_FRAMES
;
1556 walk_stack (MonoMethod
*method
, int32_t native_offset
, int32_t il_offset
, mono_bool managed
, void* data
)
1558 FrameData
*frame
= (FrameData
*)data
;
1559 if (method
&& frame
->count
< num_frames
) {
1560 frame
->il_offsets
[frame
->count
] = il_offset
;
1561 frame
->native_offsets
[frame
->count
] = native_offset
;
1562 frame
->methods
[frame
->count
++] = method
;
1563 //printf ("In %d %s at %d (native: %d)\n", frame->count, mono_method_get_name (method), il_offset, native_offset);
1565 return frame
->count
== num_frames
;
1569 * a note about stack walks: they can cause more profiler events to fire,
1570 * so we need to make sure they don't happen after we started emitting an
1571 * event, hence the collect_bt/emit_bt split.
1574 collect_bt (FrameData
*data
)
1577 mono_stack_walk_no_il (walk_stack
, data
);
1581 emit_bt (MonoProfiler
*prof
, LogBuffer
*logbuffer
, FrameData
*data
)
1583 /* FIXME: this is actually tons of data and we should
1584 * just output it the first time and use an id the next
1586 if (data
->count
> num_frames
)
1587 printf ("bad num frames: %d\n", data
->count
);
1588 emit_value (logbuffer
, data
->count
);
1589 //if (*p != data.count) {
1590 // printf ("bad num frames enc at %d: %d -> %d\n", count, data.count, *p); printf ("frames end: %p->%p\n", p, logbuffer->cursor); exit(0);}
1591 while (data
->count
) {
1592 emit_method (logbuffer
, data
->methods
[--data
->count
]);
1597 gc_alloc (MonoProfiler
*prof
, MonoObject
*obj
, MonoClass
*klass
)
1599 init_thread (prof
, TRUE
);
1601 int do_bt
= (nocalls
&& InterlockedRead (&runtime_inited
) && !notraces
) ? TYPE_ALLOC_BT
: 0;
1603 uintptr_t len
= mono_object_get_size (obj
);
1604 /* account for object alignment in the heap */
1611 ENTER_LOG (&gc_allocs_ctr
, logbuffer
,
1612 EVENT_SIZE
/* event */ +
1613 LEB128_SIZE
/* klass */ +
1614 LEB128_SIZE
/* obj */ +
1615 LEB128_SIZE
/* size */ +
1617 LEB128_SIZE
/* count */ +
1619 LEB128_SIZE
/* method */
1624 emit_event (logbuffer
, do_bt
| TYPE_ALLOC
);
1625 emit_ptr (logbuffer
, klass
);
1626 emit_obj (logbuffer
, obj
);
1627 emit_value (logbuffer
, len
);
1630 emit_bt (prof
, logbuffer
, &data
);
1636 gc_moves (MonoProfiler
*prof
, void **objects
, int num
)
1638 ENTER_LOG (&gc_moves_ctr
, logbuffer
,
1639 EVENT_SIZE
/* event */ +
1640 LEB128_SIZE
/* num */ +
1642 LEB128_SIZE
/* object */
1646 emit_event (logbuffer
, TYPE_GC_MOVE
| TYPE_GC
);
1647 emit_value (logbuffer
, num
);
1649 for (int i
= 0; i
< num
; ++i
)
1650 emit_obj (logbuffer
, objects
[i
]);
1652 EXIT_LOG_EXPLICIT (DO_SEND
, NO_REQUESTS
);
1656 gc_handle (MonoProfiler
*prof
, int op
, int type
, uintptr_t handle
, MonoObject
*obj
)
1658 int do_bt
= nocalls
&& InterlockedRead (&runtime_inited
) && !notraces
;
1664 gint32
*ctr
= op
== MONO_PROFILER_GC_HANDLE_CREATED
? &gc_handle_creations_ctr
: &gc_handle_deletions_ctr
;
1666 ENTER_LOG (ctr
, logbuffer
,
1667 EVENT_SIZE
/* event */ +
1668 LEB128_SIZE
/* type */ +
1669 LEB128_SIZE
/* handle */ +
1670 (op
== MONO_PROFILER_GC_HANDLE_CREATED
? (
1671 LEB128_SIZE
/* obj */
1674 LEB128_SIZE
/* count */ +
1676 LEB128_SIZE
/* method */
1681 if (op
== MONO_PROFILER_GC_HANDLE_CREATED
)
1682 emit_event (logbuffer
, (do_bt
? TYPE_GC_HANDLE_CREATED_BT
: TYPE_GC_HANDLE_CREATED
) | TYPE_GC
);
1683 else if (op
== MONO_PROFILER_GC_HANDLE_DESTROYED
)
1684 emit_event (logbuffer
, (do_bt
? TYPE_GC_HANDLE_DESTROYED_BT
: TYPE_GC_HANDLE_DESTROYED
) | TYPE_GC
);
1686 g_assert_not_reached ();
1688 emit_value (logbuffer
, type
);
1689 emit_value (logbuffer
, handle
);
1691 if (op
== MONO_PROFILER_GC_HANDLE_CREATED
)
1692 emit_obj (logbuffer
, obj
);
1695 emit_bt (prof
, logbuffer
, &data
);
1701 finalize_begin (MonoProfiler
*prof
)
1703 ENTER_LOG (&finalize_begins_ctr
, buf
,
1704 EVENT_SIZE
/* event */
1707 emit_event (buf
, TYPE_GC_FINALIZE_START
| TYPE_GC
);
1713 finalize_end (MonoProfiler
*prof
)
1715 ENTER_LOG (&finalize_ends_ctr
, buf
,
1716 EVENT_SIZE
/* event */
1719 emit_event (buf
, TYPE_GC_FINALIZE_END
| TYPE_GC
);
1725 finalize_object_begin (MonoProfiler
*prof
, MonoObject
*obj
)
1727 ENTER_LOG (&finalize_object_begins_ctr
, buf
,
1728 EVENT_SIZE
/* event */ +
1729 LEB128_SIZE
/* obj */
1732 emit_event (buf
, TYPE_GC_FINALIZE_OBJECT_START
| TYPE_GC
);
1733 emit_obj (buf
, obj
);
1739 finalize_object_end (MonoProfiler
*prof
, MonoObject
*obj
)
1741 ENTER_LOG (&finalize_object_ends_ctr
, buf
,
1742 EVENT_SIZE
/* event */ +
1743 LEB128_SIZE
/* obj */
1746 emit_event (buf
, TYPE_GC_FINALIZE_OBJECT_END
| TYPE_GC
);
1747 emit_obj (buf
, obj
);
1753 push_nesting (char *p
, MonoClass
*klass
)
1758 nesting
= mono_class_get_nesting_type (klass
);
1760 p
= push_nesting (p
, nesting
);
1764 name
= mono_class_get_name (klass
);
1765 nspace
= mono_class_get_namespace (klass
);
1768 p
+= strlen (nspace
);
1778 type_name (MonoClass
*klass
)
1782 push_nesting (buf
, klass
);
1783 p
= (char *) g_malloc (strlen (buf
) + 1);
1789 image_loaded (MonoProfiler
*prof
, MonoImage
*image
, int result
)
1791 if (result
!= MONO_PROFILE_OK
)
1794 const char *name
= mono_image_get_filename (image
);
1795 int nlen
= strlen (name
) + 1;
1797 ENTER_LOG (&image_loads_ctr
, logbuffer
,
1798 EVENT_SIZE
/* event */ +
1799 BYTE_SIZE
/* type */ +
1800 LEB128_SIZE
/* image */ +
1804 emit_event (logbuffer
, TYPE_END_LOAD
| TYPE_METADATA
);
1805 emit_byte (logbuffer
, TYPE_IMAGE
);
1806 emit_ptr (logbuffer
, image
);
1807 memcpy (logbuffer
->cursor
, name
, nlen
);
1808 logbuffer
->cursor
+= nlen
;
1814 image_unloaded (MonoProfiler
*prof
, MonoImage
*image
)
1816 const char *name
= mono_image_get_filename (image
);
1817 int nlen
= strlen (name
) + 1;
1819 ENTER_LOG (&image_unloads_ctr
, logbuffer
,
1820 EVENT_SIZE
/* event */ +
1821 BYTE_SIZE
/* type */ +
1822 LEB128_SIZE
/* image */ +
1826 emit_event (logbuffer
, TYPE_END_UNLOAD
| TYPE_METADATA
);
1827 emit_byte (logbuffer
, TYPE_IMAGE
);
1828 emit_ptr (logbuffer
, image
);
1829 memcpy (logbuffer
->cursor
, name
, nlen
);
1830 logbuffer
->cursor
+= nlen
;
1836 assembly_loaded (MonoProfiler
*prof
, MonoAssembly
*assembly
, int result
)
1838 if (result
!= MONO_PROFILE_OK
)
1841 char *name
= mono_stringify_assembly_name (mono_assembly_get_name (assembly
));
1842 int nlen
= strlen (name
) + 1;
1844 ENTER_LOG (&assembly_loads_ctr
, logbuffer
,
1845 EVENT_SIZE
/* event */ +
1846 BYTE_SIZE
/* type */ +
1847 LEB128_SIZE
/* assembly */ +
1851 emit_event (logbuffer
, TYPE_END_LOAD
| TYPE_METADATA
);
1852 emit_byte (logbuffer
, TYPE_ASSEMBLY
);
1853 emit_ptr (logbuffer
, assembly
);
1854 memcpy (logbuffer
->cursor
, name
, nlen
);
1855 logbuffer
->cursor
+= nlen
;
1863 assembly_unloaded (MonoProfiler
*prof
, MonoAssembly
*assembly
)
1865 char *name
= mono_stringify_assembly_name (mono_assembly_get_name (assembly
));
1866 int nlen
= strlen (name
) + 1;
1868 ENTER_LOG (&assembly_unloads_ctr
, logbuffer
,
1869 EVENT_SIZE
/* event */ +
1870 BYTE_SIZE
/* type */ +
1871 LEB128_SIZE
/* assembly */ +
1875 emit_event (logbuffer
, TYPE_END_UNLOAD
| TYPE_METADATA
);
1876 emit_byte (logbuffer
, TYPE_ASSEMBLY
);
1877 emit_ptr (logbuffer
, assembly
);
1878 memcpy (logbuffer
->cursor
, name
, nlen
);
1879 logbuffer
->cursor
+= nlen
;
1887 class_loaded (MonoProfiler
*prof
, MonoClass
*klass
, int result
)
1889 if (result
!= MONO_PROFILE_OK
)
1894 if (InterlockedRead (&runtime_inited
))
1895 name
= mono_type_get_name (mono_class_get_type (klass
));
1897 name
= type_name (klass
);
1899 int nlen
= strlen (name
) + 1;
1900 MonoImage
*image
= mono_class_get_image (klass
);
1902 ENTER_LOG (&class_loads_ctr
, logbuffer
,
1903 EVENT_SIZE
/* event */ +
1904 BYTE_SIZE
/* type */ +
1905 LEB128_SIZE
/* klass */ +
1906 LEB128_SIZE
/* image */ +
1910 emit_event (logbuffer
, TYPE_END_LOAD
| TYPE_METADATA
);
1911 emit_byte (logbuffer
, TYPE_CLASS
);
1912 emit_ptr (logbuffer
, klass
);
1913 emit_ptr (logbuffer
, image
);
1914 memcpy (logbuffer
->cursor
, name
, nlen
);
1915 logbuffer
->cursor
+= nlen
;
1926 class_unloaded (MonoProfiler
*prof
, MonoClass
*klass
)
1930 if (InterlockedRead (&runtime_inited
))
1931 name
= mono_type_get_name (mono_class_get_type (klass
));
1933 name
= type_name (klass
);
1935 int nlen
= strlen (name
) + 1;
1936 MonoImage
*image
= mono_class_get_image (klass
);
1938 ENTER_LOG (&class_unloads_ctr
, logbuffer
,
1939 EVENT_SIZE
/* event */ +
1940 BYTE_SIZE
/* type */ +
1941 LEB128_SIZE
/* klass */ +
1942 LEB128_SIZE
/* image */ +
1946 emit_event (logbuffer
, TYPE_END_UNLOAD
| TYPE_METADATA
);
1947 emit_byte (logbuffer
, TYPE_CLASS
);
1948 emit_ptr (logbuffer
, klass
);
1949 emit_ptr (logbuffer
, image
);
1950 memcpy (logbuffer
->cursor
, name
, nlen
);
1951 logbuffer
->cursor
+= nlen
;
1961 static void process_method_enter_coverage (MonoProfiler
*prof
, MonoMethod
*method
);
1964 method_enter (MonoProfiler
*prof
, MonoMethod
*method
)
1966 process_method_enter_coverage (prof
, method
);
1968 if (!only_coverage
&& PROF_TLS_GET ()->call_depth
++ <= max_call_depth
) {
1969 ENTER_LOG (&method_entries_ctr
, logbuffer
,
1970 EVENT_SIZE
/* event */ +
1971 LEB128_SIZE
/* method */
1974 emit_event (logbuffer
, TYPE_ENTER
| TYPE_METHOD
);
1975 emit_method (logbuffer
, method
);
1982 method_leave (MonoProfiler
*prof
, MonoMethod
*method
)
1984 if (!only_coverage
&& --PROF_TLS_GET ()->call_depth
<= max_call_depth
) {
1985 ENTER_LOG (&method_exits_ctr
, logbuffer
,
1986 EVENT_SIZE
/* event */ +
1987 LEB128_SIZE
/* method */
1990 emit_event (logbuffer
, TYPE_LEAVE
| TYPE_METHOD
);
1991 emit_method (logbuffer
, method
);
1998 method_exc_leave (MonoProfiler
*prof
, MonoMethod
*method
)
2000 if (!only_coverage
&& !nocalls
&& --PROF_TLS_GET ()->call_depth
<= max_call_depth
) {
2001 ENTER_LOG (&method_exception_exits_ctr
, logbuffer
,
2002 EVENT_SIZE
/* event */ +
2003 LEB128_SIZE
/* method */
2006 emit_event (logbuffer
, TYPE_EXC_LEAVE
| TYPE_METHOD
);
2007 emit_method (logbuffer
, method
);
2014 method_jitted (MonoProfiler
*prof
, MonoMethod
*method
, MonoJitInfo
*ji
, int result
)
2016 if (result
!= MONO_PROFILE_OK
)
2019 register_method_local (method
, ji
);
2021 process_requests ();
2025 code_buffer_new (MonoProfiler
*prof
, void *buffer
, int size
, MonoProfilerCodeBufferType type
, void *data
)
2030 if (type
== MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE
) {
2031 name
= (char *) data
;
2032 nlen
= strlen (name
) + 1;
2038 ENTER_LOG (&code_buffers_ctr
, logbuffer
,
2039 EVENT_SIZE
/* event */ +
2040 BYTE_SIZE
/* type */ +
2041 LEB128_SIZE
/* buffer */ +
2042 LEB128_SIZE
/* size */ +
2048 emit_event (logbuffer
, TYPE_JITHELPER
| TYPE_RUNTIME
);
2049 emit_byte (logbuffer
, type
);
2050 emit_ptr (logbuffer
, buffer
);
2051 emit_value (logbuffer
, size
);
2054 memcpy (logbuffer
->cursor
, name
, nlen
);
2055 logbuffer
->cursor
+= nlen
;
2062 throw_exc (MonoProfiler
*prof
, MonoObject
*object
)
2064 int do_bt
= (nocalls
&& InterlockedRead (&runtime_inited
) && !notraces
) ? TYPE_THROW_BT
: 0;
2070 ENTER_LOG (&exception_throws_ctr
, logbuffer
,
2071 EVENT_SIZE
/* event */ +
2072 LEB128_SIZE
/* object */ +
2074 LEB128_SIZE
/* count */ +
2076 LEB128_SIZE
/* method */
2081 emit_event (logbuffer
, do_bt
| TYPE_EXCEPTION
);
2082 emit_obj (logbuffer
, object
);
2085 emit_bt (prof
, logbuffer
, &data
);
2091 clause_exc (MonoProfiler
*prof
, MonoMethod
*method
, int clause_type
, int clause_num
)
2093 ENTER_LOG (&exception_clauses_ctr
, logbuffer
,
2094 EVENT_SIZE
/* event */ +
2095 BYTE_SIZE
/* clause type */ +
2096 LEB128_SIZE
/* clause num */ +
2097 LEB128_SIZE
/* method */
2100 emit_event (logbuffer
, TYPE_EXCEPTION
| TYPE_CLAUSE
);
2101 emit_byte (logbuffer
, clause_type
);
2102 emit_value (logbuffer
, clause_num
);
2103 emit_method (logbuffer
, method
);
2109 monitor_event (MonoProfiler
*profiler
, MonoObject
*object
, MonoProfilerMonitorEvent event
)
2111 int do_bt
= (nocalls
&& InterlockedRead (&runtime_inited
) && !notraces
&& event
== MONO_PROFILER_MONITOR_CONTENTION
) ? TYPE_MONITOR_BT
: 0;
2120 case MONO_PROFILER_MONITOR_CONTENTION
:
2121 ctr
= &monitor_contentions_ctr
;
2123 case MONO_PROFILER_MONITOR_DONE
:
2124 ctr
= &monitor_acquisitions_ctr
;
2126 case MONO_PROFILER_MONITOR_FAIL
:
2127 ctr
= &monitor_failures_ctr
;
2130 g_assert_not_reached ();
2134 ENTER_LOG (ctr
, logbuffer
,
2135 EVENT_SIZE
/* event */ +
2136 LEB128_SIZE
/* object */ +
2138 LEB128_SIZE
/* count */ +
2140 LEB128_SIZE
/* method */
2145 emit_event (logbuffer
, (event
<< 4) | do_bt
| TYPE_MONITOR
);
2146 emit_obj (logbuffer
, object
);
2149 emit_bt (profiler
, logbuffer
, &data
);
2155 thread_start (MonoProfiler
*prof
, uintptr_t tid
)
2157 init_thread (prof
, TRUE
);
2159 ENTER_LOG (&thread_starts_ctr
, logbuffer
,
2160 EVENT_SIZE
/* event */ +
2161 BYTE_SIZE
/* type */ +
2162 LEB128_SIZE
/* tid */
2165 emit_event (logbuffer
, TYPE_END_LOAD
| TYPE_METADATA
);
2166 emit_byte (logbuffer
, TYPE_THREAD
);
2167 emit_ptr (logbuffer
, (void*) tid
);
2173 thread_end (MonoProfiler
*prof
, uintptr_t tid
)
2175 ENTER_LOG (&thread_ends_ctr
, logbuffer
,
2176 EVENT_SIZE
/* event */ +
2177 BYTE_SIZE
/* type */ +
2178 LEB128_SIZE
/* tid */
2181 emit_event (logbuffer
, TYPE_END_UNLOAD
| TYPE_METADATA
);
2182 emit_byte (logbuffer
, TYPE_THREAD
);
2183 emit_ptr (logbuffer
, (void*) tid
);
2185 EXIT_LOG_EXPLICIT (NO_SEND
, NO_REQUESTS
);
2187 MonoProfilerThread
*thread
= PROF_TLS_GET ();
2189 thread
->ended
= TRUE
;
2190 remove_thread (thread
);
2192 PROF_TLS_SET (NULL
);
2196 thread_name (MonoProfiler
*prof
, uintptr_t tid
, const char *name
)
2198 int len
= strlen (name
) + 1;
2200 ENTER_LOG (&thread_names_ctr
, logbuffer
,
2201 EVENT_SIZE
/* event */ +
2202 BYTE_SIZE
/* type */ +
2203 LEB128_SIZE
/* tid */ +
2207 emit_event (logbuffer
, TYPE_METADATA
);
2208 emit_byte (logbuffer
, TYPE_THREAD
);
2209 emit_ptr (logbuffer
, (void*)tid
);
2210 memcpy (logbuffer
->cursor
, name
, len
);
2211 logbuffer
->cursor
+= len
;
2217 domain_loaded (MonoProfiler
*prof
, MonoDomain
*domain
, int result
)
2219 if (result
!= MONO_PROFILE_OK
)
2222 ENTER_LOG (&domain_loads_ctr
, logbuffer
,
2223 EVENT_SIZE
/* event */ +
2224 BYTE_SIZE
/* type */ +
2225 LEB128_SIZE
/* domain id */
2228 emit_event (logbuffer
, TYPE_END_LOAD
| TYPE_METADATA
);
2229 emit_byte (logbuffer
, TYPE_DOMAIN
);
2230 emit_ptr (logbuffer
, (void*)(uintptr_t) mono_domain_get_id (domain
));
2236 domain_unloaded (MonoProfiler
*prof
, MonoDomain
*domain
)
2238 ENTER_LOG (&domain_unloads_ctr
, logbuffer
,
2239 EVENT_SIZE
/* event */ +
2240 BYTE_SIZE
/* type */ +
2241 LEB128_SIZE
/* domain id */
2244 emit_event (logbuffer
, TYPE_END_UNLOAD
| TYPE_METADATA
);
2245 emit_byte (logbuffer
, TYPE_DOMAIN
);
2246 emit_ptr (logbuffer
, (void*)(uintptr_t) mono_domain_get_id (domain
));
2252 domain_name (MonoProfiler
*prof
, MonoDomain
*domain
, const char *name
)
2254 int nlen
= strlen (name
) + 1;
2256 ENTER_LOG (&domain_names_ctr
, logbuffer
,
2257 EVENT_SIZE
/* event */ +
2258 BYTE_SIZE
/* type */ +
2259 LEB128_SIZE
/* domain id */ +
2263 emit_event (logbuffer
, TYPE_METADATA
);
2264 emit_byte (logbuffer
, TYPE_DOMAIN
);
2265 emit_ptr (logbuffer
, (void*)(uintptr_t) mono_domain_get_id (domain
));
2266 memcpy (logbuffer
->cursor
, name
, nlen
);
2267 logbuffer
->cursor
+= nlen
;
2273 context_loaded (MonoProfiler
*prof
, MonoAppContext
*context
)
2275 ENTER_LOG (&context_loads_ctr
, logbuffer
,
2276 EVENT_SIZE
/* event */ +
2277 BYTE_SIZE
/* type */ +
2278 LEB128_SIZE
/* context id */ +
2279 LEB128_SIZE
/* domain id */
2282 emit_event (logbuffer
, TYPE_END_LOAD
| TYPE_METADATA
);
2283 emit_byte (logbuffer
, TYPE_CONTEXT
);
2284 emit_ptr (logbuffer
, (void*)(uintptr_t) mono_context_get_id (context
));
2285 emit_ptr (logbuffer
, (void*)(uintptr_t) mono_context_get_domain_id (context
));
2291 context_unloaded (MonoProfiler
*prof
, MonoAppContext
*context
)
2293 ENTER_LOG (&context_unloads_ctr
, logbuffer
,
2294 EVENT_SIZE
/* event */ +
2295 BYTE_SIZE
/* type */ +
2296 LEB128_SIZE
/* context id */ +
2297 LEB128_SIZE
/* domain id */
2300 emit_event (logbuffer
, TYPE_END_UNLOAD
| TYPE_METADATA
);
2301 emit_byte (logbuffer
, TYPE_CONTEXT
);
2302 emit_ptr (logbuffer
, (void*)(uintptr_t) mono_context_get_id (context
));
2303 emit_ptr (logbuffer
, (void*)(uintptr_t) mono_context_get_domain_id (context
));
2316 MonoLockFreeQueueNode node
;
2322 AsyncFrameInfo frames
[MONO_ZERO_LEN_ARRAY
];
2326 async_walk_stack (MonoMethod
*method
, MonoDomain
*domain
, void *base_address
, int offset
, void *data
)
2328 SampleHit
*sample
= (SampleHit
*) data
;
2330 if (sample
->count
< num_frames
) {
2331 int i
= sample
->count
;
2333 sample
->frames
[i
].method
= method
;
2334 sample
->frames
[i
].domain
= domain
;
2335 sample
->frames
[i
].base_address
= base_address
;
2336 sample
->frames
[i
].offset
= offset
;
2341 return sample
->count
== num_frames
;
2344 #define SAMPLE_SLOT_SIZE(FRAMES) (sizeof (SampleHit) + sizeof (AsyncFrameInfo) * (FRAMES - MONO_ZERO_LEN_ARRAY))
2345 #define SAMPLE_BLOCK_SIZE (mono_pagesize ())
2348 enqueue_sample_hit (gpointer p
)
2350 SampleHit
*sample
= p
;
2352 mono_lock_free_queue_node_unpoison (&sample
->node
);
2353 mono_lock_free_queue_enqueue (&sample
->prof
->dumper_queue
, &sample
->node
);
2354 mono_os_sem_post (&sample
->prof
->dumper_queue_sem
);
2358 mono_sample_hit (MonoProfiler
*profiler
, unsigned char *ip
, void *context
)
2361 * Please note: We rely on the runtime loading the profiler with
2362 * MONO_DL_EAGER (RTLD_NOW) so that references to runtime functions within
2363 * this function (and its siblings) are resolved when the profiler is
2364 * loaded. Otherwise, we would potentially invoke the dynamic linker when
2365 * invoking runtime functions, which is not async-signal-safe.
2368 if (InterlockedRead (&in_shutdown
))
2371 SampleHit
*sample
= (SampleHit
*) mono_lock_free_queue_dequeue (&profiler
->sample_reuse_queue
);
2375 * If we're out of reusable sample events and we're not allowed to
2376 * allocate more, we have no choice but to drop the event.
2378 if (InterlockedRead (&sample_allocations_ctr
) >= max_allocated_sample_hits
)
2381 sample
= mono_lock_free_alloc (&profiler
->sample_allocator
);
2382 sample
->prof
= profiler
;
2383 mono_lock_free_queue_node_init (&sample
->node
, TRUE
);
2385 InterlockedIncrement (&sample_allocations_ctr
);
2389 mono_stack_walk_async_safe (&async_walk_stack
, context
, sample
);
2391 sample
->time
= current_time ();
2392 sample
->tid
= thread_id ();
2395 mono_thread_hazardous_try_free (sample
, enqueue_sample_hit
);
2398 static uintptr_t *code_pages
= 0;
2399 static int num_code_pages
= 0;
2400 static int size_code_pages
= 0;
2401 #define CPAGE_SHIFT (9)
2402 #define CPAGE_SIZE (1 << CPAGE_SHIFT)
2403 #define CPAGE_MASK (~(CPAGE_SIZE - 1))
2404 #define CPAGE_ADDR(p) ((p) & CPAGE_MASK)
2407 add_code_page (uintptr_t *hash
, uintptr_t hsize
, uintptr_t page
)
2410 uintptr_t start_pos
;
2411 start_pos
= (page
>> CPAGE_SHIFT
) % hsize
;
2414 if (hash
[i
] && CPAGE_ADDR (hash
[i
]) == CPAGE_ADDR (page
)) {
2416 } else if (!hash
[i
]) {
2423 } while (i
!= start_pos
);
2424 /* should not happen */
2425 printf ("failed code page store\n");
2430 add_code_pointer (uintptr_t ip
)
2433 if (num_code_pages
* 2 >= size_code_pages
) {
2435 uintptr_t old_size
= size_code_pages
;
2436 size_code_pages
*= 2;
2437 if (size_code_pages
== 0)
2438 size_code_pages
= 16;
2439 n
= (uintptr_t *) g_calloc (sizeof (uintptr_t) * size_code_pages
, 1);
2440 for (i
= 0; i
< old_size
; ++i
) {
2442 add_code_page (n
, size_code_pages
, code_pages
[i
]);
2445 g_free (code_pages
);
2448 num_code_pages
+= add_code_page (code_pages
, size_code_pages
, ip
& CPAGE_MASK
);
2451 /* ELF code crashes on some systems. */
2452 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2455 dump_ubin (MonoProfiler
*prof
, const char *filename
, uintptr_t load_addr
, uint64_t offset
, uintptr_t size
)
2457 int len
= strlen (filename
) + 1;
2459 ENTER_LOG (&sample_ubins_ctr
, logbuffer
,
2460 EVENT_SIZE
/* event */ +
2461 LEB128_SIZE
/* load address */ +
2462 LEB128_SIZE
/* offset */ +
2463 LEB128_SIZE
/* size */ +
2464 nlen
/* file name */
2467 emit_event (logbuffer
, TYPE_SAMPLE
| TYPE_SAMPLE_UBIN
);
2468 emit_svalue (logbuffer
, load_addr
);
2469 emit_uvalue (logbuffer
, offset
);
2470 emit_uvalue (logbuffer
, size
);
2471 memcpy (logbuffer
->cursor
, filename
, len
);
2472 logbuffer
->cursor
+= len
;
2474 EXIT_LOG_EXPLICIT (DO_SEND
, NO_REQUESTS
);
2479 dump_usym (MonoProfiler
*prof
, const char *name
, uintptr_t value
, uintptr_t size
)
2481 int len
= strlen (name
) + 1;
2483 ENTER_LOG (&sample_usyms_ctr
, logbuffer
,
2484 EVENT_SIZE
/* event */ +
2485 LEB128_SIZE
/* value */ +
2486 LEB128_SIZE
/* size */ +
2490 emit_event (logbuffer
, TYPE_SAMPLE
| TYPE_SAMPLE_USYM
);
2491 emit_ptr (logbuffer
, (void*)value
);
2492 emit_value (logbuffer
, size
);
2493 memcpy (logbuffer
->cursor
, name
, len
);
2494 logbuffer
->cursor
+= len
;
2496 EXIT_LOG_EXPLICIT (DO_SEND
, NO_REQUESTS
);
2499 /* ELF code crashes on some systems. */
2500 //#if defined(ELFMAG0)
2503 #if SIZEOF_VOID_P == 4
2504 #define ELF_WSIZE 32
2506 #define ELF_WSIZE 64
2509 #define ElfW(type) _ElfW (Elf, ELF_WSIZE, type)
2510 #define _ElfW(e,w,t) _ElfW_1 (e, w, _##t)
2511 #define _ElfW_1(e,w,t) e##w##t
2515 dump_elf_symbols (MonoProfiler
*prof
, ElfW(Sym
) *symbols
, int num_symbols
, const char *strtab
, void *load_addr
)
2518 for (i
= 0; i
< num_symbols
; ++i
) {
2520 sym
= strtab
+ symbols
[i
].st_name
;
2521 if (!symbols
[i
].st_name
|| !symbols
[i
].st_size
|| (symbols
[i
].st_info
& 0xf) != STT_FUNC
)
2523 //printf ("symbol %s at %d\n", sym, symbols [i].st_value);
2524 dump_usym (sym
, (uintptr_t)load_addr
+ symbols
[i
].st_value
, symbols
[i
].st_size
);
2529 read_elf_symbols (MonoProfiler
*prof
, const char *filename
, void *load_addr
)
2536 ElfW(Shdr
) *sheader
;
2537 ElfW(Shdr
) *shstrtabh
;
2538 ElfW(Shdr
) *symtabh
= NULL
;
2539 ElfW(Shdr
) *strtabh
= NULL
;
2540 ElfW(Sym
) *symbols
= NULL
;
2544 fd
= open (filename
, O_RDONLY
);
2547 if (fstat (fd
, &statb
) != 0) {
2551 file_size
= statb
.st_size
;
2552 data
= mmap (NULL
, file_size
, PROT_READ
, MAP_PRIVATE
, fd
, 0);
2554 if (data
== MAP_FAILED
)
2557 if (header
->e_ident
[EI_MAG0
] != ELFMAG0
||
2558 header
->e_ident
[EI_MAG1
] != ELFMAG1
||
2559 header
->e_ident
[EI_MAG2
] != ELFMAG2
||
2560 header
->e_ident
[EI_MAG3
] != ELFMAG3
) {
2561 munmap (data
, file_size
);
2564 sheader
= (void*)((char*)data
+ header
->e_shoff
);
2565 shstrtabh
= (void*)((char*)sheader
+ (header
->e_shentsize
* header
->e_shstrndx
));
2566 strtab
= (const char*)data
+ shstrtabh
->sh_offset
;
2567 for (i
= 0; i
< header
->e_shnum
; ++i
) {
2568 //printf ("section header: %d\n", sheader->sh_type);
2569 if (sheader
->sh_type
== SHT_SYMTAB
) {
2571 strtabh
= (void*)((char*)data
+ header
->e_shoff
+ sheader
->sh_link
* header
->e_shentsize
);
2572 /*printf ("symtab section header: %d, .strstr: %d\n", i, sheader->sh_link);*/
2575 sheader
= (void*)((char*)sheader
+ header
->e_shentsize
);
2577 if (!symtabh
|| !strtabh
) {
2578 munmap (data
, file_size
);
2581 strtab
= (const char*)data
+ strtabh
->sh_offset
;
2582 num_symbols
= symtabh
->sh_size
/ symtabh
->sh_entsize
;
2583 symbols
= (void*)((char*)data
+ symtabh
->sh_offset
);
2584 dump_elf_symbols (symbols
, num_symbols
, strtab
, load_addr
);
2585 munmap (data
, file_size
);
2590 /* ELF code crashes on some systems. */
2591 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2594 elf_dl_callback (struct dl_phdr_info
*info
, size_t size
, void *data
)
2596 MonoProfiler
*prof
= data
;
2598 const char *filename
;
2600 char *a
= (void*)info
->dlpi_addr
;
2602 ElfW(Dyn
) *dyn
= NULL
;
2603 ElfW(Sym
) *symtab
= NULL
;
2604 ElfW(Word
) *hash_table
= NULL
;
2605 ElfW(Ehdr
) *header
= NULL
;
2606 const char* strtab
= NULL
;
2607 for (obj
= prof
->binary_objects
; obj
; obj
= obj
->next
) {
2611 filename
= info
->dlpi_name
;
2614 if (!info
->dlpi_addr
&& !filename
[0]) {
2615 int l
= readlink ("/proc/self/exe", buf
, sizeof (buf
) - 1);
2621 obj
= g_calloc (sizeof (BinaryObject
), 1);
2622 obj
->addr
= (void*)info
->dlpi_addr
;
2623 obj
->name
= pstrdup (filename
);
2624 obj
->next
= prof
->binary_objects
;
2625 prof
->binary_objects
= obj
;
2626 //printf ("loaded file: %s at %p, segments: %d\n", filename, (void*)info->dlpi_addr, info->dlpi_phnum);
2628 for (i
= 0; i
< info
->dlpi_phnum
; ++i
) {
2629 //printf ("segment type %d file offset: %d, size: %d\n", info->dlpi_phdr[i].p_type, info->dlpi_phdr[i].p_offset, info->dlpi_phdr[i].p_memsz);
2630 if (info
->dlpi_phdr
[i
].p_type
== PT_LOAD
&& !header
) {
2631 header
= (ElfW(Ehdr
)*)(info
->dlpi_addr
+ info
->dlpi_phdr
[i
].p_vaddr
);
2632 if (header
->e_ident
[EI_MAG0
] != ELFMAG0
||
2633 header
->e_ident
[EI_MAG1
] != ELFMAG1
||
2634 header
->e_ident
[EI_MAG2
] != ELFMAG2
||
2635 header
->e_ident
[EI_MAG3
] != ELFMAG3
) {
2638 dump_ubin (prof
, filename
, info
->dlpi_addr
+ info
->dlpi_phdr
[i
].p_vaddr
, info
->dlpi_phdr
[i
].p_offset
, info
->dlpi_phdr
[i
].p_memsz
);
2639 } else if (info
->dlpi_phdr
[i
].p_type
== PT_DYNAMIC
) {
2640 dyn
= (ElfW(Dyn
) *)(info
->dlpi_addr
+ info
->dlpi_phdr
[i
].p_vaddr
);
2643 if (read_elf_symbols (prof
, filename
, (void*)info
->dlpi_addr
))
2645 if (!info
->dlpi_name
|| !info
->dlpi_name
[0])
2649 for (i
= 0; dyn
[i
].d_tag
!= DT_NULL
; ++i
) {
2650 if (dyn
[i
].d_tag
== DT_SYMTAB
) {
2651 if (symtab
&& do_debug
)
2652 printf ("multiple symtabs: %d\n", i
);
2653 symtab
= (ElfW(Sym
) *)(a
+ dyn
[i
].d_un
.d_ptr
);
2654 } else if (dyn
[i
].d_tag
== DT_HASH
) {
2655 hash_table
= (ElfW(Word
) *)(a
+ dyn
[i
].d_un
.d_ptr
);
2656 } else if (dyn
[i
].d_tag
== DT_STRTAB
) {
2657 strtab
= (const char*)(a
+ dyn
[i
].d_un
.d_ptr
);
2662 num_sym
= hash_table
[1];
2663 dump_elf_symbols (prof
, symtab
, num_sym
, strtab
, (void*)info
->dlpi_addr
);
2668 load_binaries (MonoProfiler
*prof
)
2670 dl_iterate_phdr (elf_dl_callback
, prof
);
2675 load_binaries (MonoProfiler
*prof
)
2682 symbol_for (uintptr_t code
)
2685 void *ip
= (void*)code
;
2687 if (dladdr (ip
, &di
)) {
2689 return di
.dli_sname
;
2692 names = backtrace_symbols (&ip, 1);
2694 const char* p = names [0];
2705 dump_unmanaged_coderefs (MonoProfiler
*prof
)
2708 const char* last_symbol
;
2709 uintptr_t addr
, page_end
;
2711 if (load_binaries (prof
))
2713 for (i
= 0; i
< size_code_pages
; ++i
) {
2715 if (!code_pages
[i
] || code_pages
[i
] & 1)
2718 addr
= CPAGE_ADDR (code_pages
[i
]);
2719 page_end
= addr
+ CPAGE_SIZE
;
2720 code_pages
[i
] |= 1;
2721 /* we dump the symbols for the whole page */
2722 for (; addr
< page_end
; addr
+= 16) {
2723 sym
= symbol_for (addr
);
2724 if (sym
&& sym
== last_symbol
)
2729 dump_usym (prof
, sym
, addr
, 0); /* let's not guess the size */
2730 //printf ("found symbol at %p: %s\n", (void*)addr, sym);
2736 mono_cpu_count (void)
2738 #ifdef PLATFORM_ANDROID
2739 /* Android tries really hard to save power by powering off CPUs on SMP phones which
2740 * means the normal way to query cpu count returns a wrong value with userspace API.
2741 * Instead we use /sys entries to query the actual hardware CPU count.
2744 char buffer
[8] = {'\0'};
2745 int present
= open ("/sys/devices/system/cpu/present", O_RDONLY
);
2746 /* Format of the /sys entry is a cpulist of indexes which in the case
2747 * of present is always of the form "0-(n-1)" when there is more than
2748 * 1 core, n being the number of CPU cores in the system. Otherwise
2749 * the value is simply 0
2751 if (present
!= -1 && read (present
, (char*)buffer
, sizeof (buffer
)) > 3)
2752 count
= strtol (((char*)buffer
) + 2, NULL
, 10);
2759 #if defined(HOST_ARM) || defined (HOST_ARM64)
2761 /* ARM platforms tries really hard to save power by powering off CPUs on SMP phones which
2762 * means the normal way to query cpu count returns a wrong value with userspace API. */
2764 #ifdef _SC_NPROCESSORS_CONF
2766 int count
= sysconf (_SC_NPROCESSORS_CONF
);
2774 #ifdef HAVE_SCHED_GETAFFINITY
2777 if (sched_getaffinity (getpid (), sizeof (set
), &set
) == 0)
2778 return CPU_COUNT (&set
);
2781 #ifdef _SC_NPROCESSORS_ONLN
2783 int count
= sysconf (_SC_NPROCESSORS_ONLN
);
2789 #endif /* defined(HOST_ARM) || defined (HOST_ARM64) */
2795 size_t len
= sizeof (int);
2798 if (sysctl (mib
, 2, &count
, &len
, NULL
, 0) == 0)
2805 GetSystemInfo (&info
);
2806 return info
.dwNumberOfProcessors
;
2810 static gboolean warned
;
2813 g_warning ("Don't know how to determine CPU count on this platform; assuming 1");
2820 typedef struct MonoCounterAgent
{
2821 MonoCounter
*counter
;
2822 // MonoCounterAgent specific data :
2827 struct MonoCounterAgent
*next
;
2830 static MonoCounterAgent
* counters
;
2831 static int counters_index
= 1;
2832 static mono_mutex_t counters_mutex
;
2835 counters_add_agent (MonoCounter
*counter
)
2837 if (InterlockedRead (&in_shutdown
))
2840 MonoCounterAgent
*agent
, *item
;
2842 mono_os_mutex_lock (&counters_mutex
);
2844 for (agent
= counters
; agent
; agent
= agent
->next
) {
2845 if (agent
->counter
== counter
) {
2846 agent
->value_size
= 0;
2848 g_free (agent
->value
);
2849 agent
->value
= NULL
;
2855 agent
= (MonoCounterAgent
*) g_malloc (sizeof (MonoCounterAgent
));
2856 agent
->counter
= counter
;
2857 agent
->value
= NULL
;
2858 agent
->value_size
= 0;
2859 agent
->index
= counters_index
++;
2873 mono_os_mutex_unlock (&counters_mutex
);
2877 counters_init_foreach_callback (MonoCounter
*counter
, gpointer data
)
2879 counters_add_agent (counter
);
2884 counters_init (MonoProfiler
*profiler
)
2886 mono_os_mutex_init (&counters_mutex
);
2888 mono_counters_on_register (&counters_add_agent
);
2889 mono_counters_foreach (counters_init_foreach_callback
, NULL
);
2893 counters_emit (MonoProfiler
*profiler
)
2895 MonoCounterAgent
*agent
;
2898 EVENT_SIZE
/* event */ +
2899 LEB128_SIZE
/* len */
2902 mono_os_mutex_lock (&counters_mutex
);
2904 for (agent
= counters
; agent
; agent
= agent
->next
) {
2909 LEB128_SIZE
/* section */ +
2910 strlen (mono_counter_get_name (agent
->counter
)) + 1 /* name */ +
2911 BYTE_SIZE
/* type */ +
2912 BYTE_SIZE
/* unit */ +
2913 BYTE_SIZE
/* variance */ +
2914 LEB128_SIZE
/* index */
2923 ENTER_LOG (&counter_descriptors_ctr
, logbuffer
, size
);
2925 emit_event (logbuffer
, TYPE_SAMPLE_COUNTERS_DESC
| TYPE_SAMPLE
);
2926 emit_value (logbuffer
, len
);
2928 for (agent
= counters
; agent
; agent
= agent
->next
) {
2934 name
= mono_counter_get_name (agent
->counter
);
2935 emit_value (logbuffer
, mono_counter_get_section (agent
->counter
));
2936 emit_string (logbuffer
, name
, strlen (name
) + 1);
2937 emit_byte (logbuffer
, mono_counter_get_type (agent
->counter
));
2938 emit_byte (logbuffer
, mono_counter_get_unit (agent
->counter
));
2939 emit_byte (logbuffer
, mono_counter_get_variance (agent
->counter
));
2940 emit_value (logbuffer
, agent
->index
);
2945 EXIT_LOG_EXPLICIT (DO_SEND
, NO_REQUESTS
);
2948 mono_os_mutex_unlock (&counters_mutex
);
2952 counters_sample (MonoProfiler
*profiler
, uint64_t timestamp
)
2954 MonoCounterAgent
*agent
;
2955 MonoCounter
*counter
;
2961 counters_emit (profiler
);
2964 buffer
= g_calloc (1, buffer_size
);
2966 mono_os_mutex_lock (&counters_mutex
);
2969 EVENT_SIZE
/* event */
2972 for (agent
= counters
; agent
; agent
= agent
->next
) {
2974 LEB128_SIZE
/* index */ +
2975 BYTE_SIZE
/* type */ +
2976 mono_counter_get_size (agent
->counter
) /* value */
2981 LEB128_SIZE
/* stop marker */
2984 ENTER_LOG (&counter_samples_ctr
, logbuffer
, size
);
2986 emit_event_time (logbuffer
, TYPE_SAMPLE_COUNTERS
| TYPE_SAMPLE
, timestamp
);
2988 for (agent
= counters
; agent
; agent
= agent
->next
) {
2991 counter
= agent
->counter
;
2993 size
= mono_counter_get_size (counter
);
2995 if (size
> buffer_size
) {
2997 buffer
= g_realloc (buffer
, buffer_size
);
3000 memset (buffer
, 0, buffer_size
);
3002 g_assert (mono_counters_sample (counter
, buffer
, size
));
3004 type
= mono_counter_get_type (counter
);
3006 if (!agent
->value
) {
3007 agent
->value
= g_calloc (1, size
);
3008 agent
->value_size
= size
;
3010 if (type
== MONO_COUNTER_STRING
) {
3011 if (strcmp (agent
->value
, buffer
) == 0)
3014 if (agent
->value_size
== size
&& memcmp (agent
->value
, buffer
, size
) == 0)
3019 emit_uvalue (logbuffer
, agent
->index
);
3020 emit_byte (logbuffer
, type
);
3022 case MONO_COUNTER_INT
:
3023 #if SIZEOF_VOID_P == 4
3024 case MONO_COUNTER_WORD
:
3026 emit_svalue (logbuffer
, *(int*)buffer
- *(int*)agent
->value
);
3028 case MONO_COUNTER_UINT
:
3029 emit_uvalue (logbuffer
, *(guint
*)buffer
- *(guint
*)agent
->value
);
3031 case MONO_COUNTER_TIME_INTERVAL
:
3032 case MONO_COUNTER_LONG
:
3033 #if SIZEOF_VOID_P == 8
3034 case MONO_COUNTER_WORD
:
3036 emit_svalue (logbuffer
, *(gint64
*)buffer
- *(gint64
*)agent
->value
);
3038 case MONO_COUNTER_ULONG
:
3039 emit_uvalue (logbuffer
, *(guint64
*)buffer
- *(guint64
*)agent
->value
);
3041 case MONO_COUNTER_DOUBLE
:
3042 emit_double (logbuffer
, *(double*)buffer
);
3044 case MONO_COUNTER_STRING
:
3046 emit_byte (logbuffer
, 0);
3048 emit_byte (logbuffer
, 1);
3049 emit_string (logbuffer
, (char*)buffer
, size
);
3053 g_assert_not_reached ();
3056 if (type
== MONO_COUNTER_STRING
&& size
> agent
->value_size
) {
3057 agent
->value
= g_realloc (agent
->value
, size
);
3058 agent
->value_size
= size
;
3062 memcpy (agent
->value
, buffer
, size
);
3066 emit_value (logbuffer
, 0);
3068 EXIT_LOG_EXPLICIT (DO_SEND
, NO_REQUESTS
);
3070 mono_os_mutex_unlock (&counters_mutex
);
3073 typedef struct _PerfCounterAgent PerfCounterAgent
;
3074 struct _PerfCounterAgent
{
3075 PerfCounterAgent
*next
;
3077 char *category_name
;
3086 static PerfCounterAgent
*perfcounters
= NULL
;
3089 perfcounters_emit (MonoProfiler
*profiler
)
3091 PerfCounterAgent
*pcagent
;
3094 EVENT_SIZE
/* event */ +
3095 LEB128_SIZE
/* len */
3098 for (pcagent
= perfcounters
; pcagent
; pcagent
= pcagent
->next
) {
3099 if (pcagent
->emitted
)
3103 LEB128_SIZE
/* section */ +
3104 strlen (pcagent
->category_name
) + 1 /* category name */ +
3105 strlen (pcagent
->name
) + 1 /* name */ +
3106 BYTE_SIZE
/* type */ +
3107 BYTE_SIZE
/* unit */ +
3108 BYTE_SIZE
/* variance */ +
3109 LEB128_SIZE
/* index */
3118 ENTER_LOG (&perfcounter_descriptors_ctr
, logbuffer
, size
);
3120 emit_event (logbuffer
, TYPE_SAMPLE_COUNTERS_DESC
| TYPE_SAMPLE
);
3121 emit_value (logbuffer
, len
);
3123 for (pcagent
= perfcounters
; pcagent
; pcagent
= pcagent
->next
) {
3124 if (pcagent
->emitted
)
3127 emit_value (logbuffer
, MONO_COUNTER_PERFCOUNTERS
);
3128 emit_string (logbuffer
, pcagent
->category_name
, strlen (pcagent
->category_name
) + 1);
3129 emit_string (logbuffer
, pcagent
->name
, strlen (pcagent
->name
) + 1);
3130 emit_byte (logbuffer
, MONO_COUNTER_LONG
);
3131 emit_byte (logbuffer
, MONO_COUNTER_RAW
);
3132 emit_byte (logbuffer
, MONO_COUNTER_VARIABLE
);
3133 emit_value (logbuffer
, pcagent
->index
);
3135 pcagent
->emitted
= 1;
3138 EXIT_LOG_EXPLICIT (DO_SEND
, NO_REQUESTS
);
3142 perfcounters_foreach (char *category_name
, char *name
, unsigned char type
, gint64 value
, gpointer user_data
)
3144 PerfCounterAgent
*pcagent
;
3146 for (pcagent
= perfcounters
; pcagent
; pcagent
= pcagent
->next
) {
3147 if (strcmp (pcagent
->category_name
, category_name
) != 0 || strcmp (pcagent
->name
, name
) != 0)
3149 if (pcagent
->value
== value
)
3152 pcagent
->value
= value
;
3153 pcagent
->updated
= 1;
3154 pcagent
->deleted
= 0;
3158 pcagent
= g_new0 (PerfCounterAgent
, 1);
3159 pcagent
->next
= perfcounters
;
3160 pcagent
->index
= counters_index
++;
3161 pcagent
->category_name
= g_strdup (category_name
);
3162 pcagent
->name
= g_strdup (name
);
3163 pcagent
->type
= (int) type
;
3164 pcagent
->value
= value
;
3165 pcagent
->emitted
= 0;
3166 pcagent
->updated
= 1;
3167 pcagent
->deleted
= 0;
3169 perfcounters
= pcagent
;
3175 perfcounters_sample (MonoProfiler
*profiler
, uint64_t timestamp
)
3177 PerfCounterAgent
*pcagent
;
3181 mono_os_mutex_lock (&counters_mutex
);
3183 /* mark all perfcounters as deleted, foreach will unmark them as necessary */
3184 for (pcagent
= perfcounters
; pcagent
; pcagent
= pcagent
->next
)
3185 pcagent
->deleted
= 1;
3187 mono_perfcounter_foreach (perfcounters_foreach
, perfcounters
);
3189 perfcounters_emit (profiler
);
3192 EVENT_SIZE
/* event */
3195 for (pcagent
= perfcounters
; pcagent
; pcagent
= pcagent
->next
) {
3196 if (pcagent
->deleted
|| !pcagent
->updated
)
3200 LEB128_SIZE
/* index */ +
3201 BYTE_SIZE
/* type */ +
3202 LEB128_SIZE
/* value */
3212 LEB128_SIZE
/* stop marker */
3215 ENTER_LOG (&perfcounter_samples_ctr
, logbuffer
, size
);
3217 emit_event_time (logbuffer
, TYPE_SAMPLE_COUNTERS
| TYPE_SAMPLE
, timestamp
);
3219 for (pcagent
= perfcounters
; pcagent
; pcagent
= pcagent
->next
) {
3220 if (pcagent
->deleted
|| !pcagent
->updated
)
3222 emit_uvalue (logbuffer
, pcagent
->index
);
3223 emit_byte (logbuffer
, MONO_COUNTER_LONG
);
3224 emit_svalue (logbuffer
, pcagent
->value
);
3226 pcagent
->updated
= 0;
3229 emit_value (logbuffer
, 0);
3231 EXIT_LOG_EXPLICIT (DO_SEND
, NO_REQUESTS
);
3234 mono_os_mutex_unlock (&counters_mutex
);
3238 counters_and_perfcounters_sample (MonoProfiler
*prof
)
3240 uint64_t now
= current_time ();
3242 counters_sample (prof
, now
);
3243 perfcounters_sample (prof
, now
);
3246 #define COVERAGE_DEBUG(x) if (debug_coverage) {x}
3247 static mono_mutex_t coverage_mutex
;
3248 static MonoConcurrentHashTable
*coverage_methods
= NULL
;
3249 static MonoConcurrentHashTable
*coverage_assemblies
= NULL
;
3250 static MonoConcurrentHashTable
*coverage_classes
= NULL
;
3252 static MonoConcurrentHashTable
*filtered_classes
= NULL
;
3253 static MonoConcurrentHashTable
*entered_methods
= NULL
;
3254 static MonoConcurrentHashTable
*image_to_methods
= NULL
;
3255 static MonoConcurrentHashTable
*suppressed_assemblies
= NULL
;
3256 static gboolean coverage_initialized
= FALSE
;
3258 static GPtrArray
*coverage_data
= NULL
;
3259 static int previous_offset
= 0;
3262 MonoLockFreeQueueNode node
;
3275 free_coverage_entry (gpointer data
, gpointer userdata
)
3277 CoverageEntry
*entry
= (CoverageEntry
*)data
;
3278 g_free (entry
->filename
);
3283 obtain_coverage_for_method (MonoProfiler
*prof
, const MonoProfileCoverageEntry
*entry
)
3285 int offset
= entry
->iloffset
- previous_offset
;
3286 CoverageEntry
*e
= g_new (CoverageEntry
, 1);
3288 previous_offset
= entry
->iloffset
;
3291 e
->counter
= entry
->counter
;
3292 e
->filename
= g_strdup(entry
->filename
? entry
->filename
: "");
3293 e
->line
= entry
->line
;
3294 e
->column
= entry
->col
;
3296 g_ptr_array_add (coverage_data
, e
);
3300 parse_generic_type_names(char *name
)
3302 char *new_name
, *ret
;
3303 int within_generic_declaration
= 0, generic_members
= 1;
3305 if (name
== NULL
|| *name
== '\0')
3306 return g_strdup ("");
3308 if (!(ret
= new_name
= (char *) g_calloc (strlen (name
) * 4 + 1, sizeof (char))))
3314 within_generic_declaration
= 1;
3318 within_generic_declaration
= 0;
3320 if (*(name
- 1) != '<') {
3322 *new_name
++ = '0' + generic_members
;
3324 memcpy (new_name
, "<>", 8);
3328 generic_members
= 0;
3336 if (!within_generic_declaration
)
3337 *new_name
++ = *name
;
3346 static int method_id
;
3348 build_method_buffer (gpointer key
, gpointer value
, gpointer userdata
)
3350 MonoMethod
*method
= (MonoMethod
*)value
;
3351 MonoProfiler
*prof
= (MonoProfiler
*)userdata
;
3355 const char *image_name
, *method_name
, *sig
, *first_filename
;
3358 previous_offset
= 0;
3359 coverage_data
= g_ptr_array_new ();
3361 mono_profiler_coverage_get (prof
, method
, obtain_coverage_for_method
);
3363 klass
= mono_method_get_class (method
);
3364 image
= mono_class_get_image (klass
);
3365 image_name
= mono_image_get_name (image
);
3367 sig
= mono_signature_get_desc (mono_method_signature (method
), TRUE
);
3368 class_name
= parse_generic_type_names (mono_type_get_name (mono_class_get_type (klass
)));
3369 method_name
= mono_method_get_name (method
);
3371 if (coverage_data
->len
!= 0) {
3372 CoverageEntry
*entry
= (CoverageEntry
*)coverage_data
->pdata
[0];
3373 first_filename
= entry
->filename
? entry
->filename
: "";
3375 first_filename
= "";
3377 image_name
= image_name
? image_name
: "";
3378 sig
= sig
? sig
: "";
3379 method_name
= method_name
? method_name
: "";
3381 ENTER_LOG (&coverage_methods_ctr
, logbuffer
,
3382 EVENT_SIZE
/* event */ +
3383 strlen (image_name
) + 1 /* image name */ +
3384 strlen (class_name
) + 1 /* class name */ +
3385 strlen (method_name
) + 1 /* method name */ +
3386 strlen (sig
) + 1 /* signature */ +
3387 strlen (first_filename
) + 1 /* first file name */ +
3388 LEB128_SIZE
/* token */ +
3389 LEB128_SIZE
/* method id */ +
3390 LEB128_SIZE
/* entries */
3393 emit_event (logbuffer
, TYPE_COVERAGE_METHOD
| TYPE_COVERAGE
);
3394 emit_string (logbuffer
, image_name
, strlen (image_name
) + 1);
3395 emit_string (logbuffer
, class_name
, strlen (class_name
) + 1);
3396 emit_string (logbuffer
, method_name
, strlen (method_name
) + 1);
3397 emit_string (logbuffer
, sig
, strlen (sig
) + 1);
3398 emit_string (logbuffer
, first_filename
, strlen (first_filename
) + 1);
3400 emit_uvalue (logbuffer
, mono_method_get_token (method
));
3401 emit_uvalue (logbuffer
, method_id
);
3402 emit_value (logbuffer
, coverage_data
->len
);
3404 EXIT_LOG_EXPLICIT (DO_SEND
, NO_REQUESTS
);
3406 for (i
= 0; i
< coverage_data
->len
; i
++) {
3407 CoverageEntry
*entry
= (CoverageEntry
*)coverage_data
->pdata
[i
];
3409 ENTER_LOG (&coverage_statements_ctr
, logbuffer
,
3410 EVENT_SIZE
/* event */ +
3411 LEB128_SIZE
/* method id */ +
3412 LEB128_SIZE
/* offset */ +
3413 LEB128_SIZE
/* counter */ +
3414 LEB128_SIZE
/* line */ +
3415 LEB128_SIZE
/* column */
3418 emit_event (logbuffer
, TYPE_COVERAGE_STATEMENT
| TYPE_COVERAGE
);
3419 emit_uvalue (logbuffer
, method_id
);
3420 emit_uvalue (logbuffer
, entry
->offset
);
3421 emit_uvalue (logbuffer
, entry
->counter
);
3422 emit_uvalue (logbuffer
, entry
->line
);
3423 emit_uvalue (logbuffer
, entry
->column
);
3425 EXIT_LOG_EXPLICIT (DO_SEND
, NO_REQUESTS
);
3430 g_free (class_name
);
3432 g_ptr_array_foreach (coverage_data
, free_coverage_entry
, NULL
);
3433 g_ptr_array_free (coverage_data
, TRUE
);
3434 coverage_data
= NULL
;
3437 /* This empties the queue */
3439 count_queue (MonoLockFreeQueue
*queue
)
3441 MonoLockFreeQueueNode
*node
;
3444 while ((node
= mono_lock_free_queue_dequeue (queue
))) {
3446 mono_thread_hazardous_try_free (node
, g_free
);
3453 build_class_buffer (gpointer key
, gpointer value
, gpointer userdata
)
3455 MonoClass
*klass
= (MonoClass
*)key
;
3456 MonoLockFreeQueue
*class_methods
= (MonoLockFreeQueue
*)value
;
3459 const char *assembly_name
;
3460 int number_of_methods
, partially_covered
;
3461 guint fully_covered
;
3463 image
= mono_class_get_image (klass
);
3464 assembly_name
= mono_image_get_name (image
);
3465 class_name
= mono_type_get_name (mono_class_get_type (klass
));
3467 assembly_name
= assembly_name
? assembly_name
: "";
3468 number_of_methods
= mono_class_num_methods (klass
);
3469 fully_covered
= count_queue (class_methods
);
3470 /* We don't handle partial covered yet */
3471 partially_covered
= 0;
3473 ENTER_LOG (&coverage_classes_ctr
, logbuffer
,
3474 EVENT_SIZE
/* event */ +
3475 strlen (assembly_name
) + 1 /* assembly name */ +
3476 strlen (class_name
) + 1 /* class name */ +
3477 LEB128_SIZE
/* no. methods */ +
3478 LEB128_SIZE
/* fully covered */ +
3479 LEB128_SIZE
/* partially covered */
3482 emit_event (logbuffer
, TYPE_COVERAGE_CLASS
| TYPE_COVERAGE
);
3483 emit_string (logbuffer
, assembly_name
, strlen (assembly_name
) + 1);
3484 emit_string (logbuffer
, class_name
, strlen (class_name
) + 1);
3485 emit_uvalue (logbuffer
, number_of_methods
);
3486 emit_uvalue (logbuffer
, fully_covered
);
3487 emit_uvalue (logbuffer
, partially_covered
);
3489 EXIT_LOG_EXPLICIT (DO_SEND
, NO_REQUESTS
);
3491 g_free (class_name
);
3495 get_coverage_for_image (MonoImage
*image
, int *number_of_methods
, guint
*fully_covered
, int *partially_covered
)
3497 MonoLockFreeQueue
*image_methods
= (MonoLockFreeQueue
*)mono_conc_hashtable_lookup (image_to_methods
, image
);
3499 *number_of_methods
= mono_image_get_table_rows (image
, MONO_TABLE_METHOD
);
3501 *fully_covered
= count_queue (image_methods
);
3505 // FIXME: We don't handle partially covered yet.
3506 *partially_covered
= 0;
3510 build_assembly_buffer (gpointer key
, gpointer value
, gpointer userdata
)
3512 MonoAssembly
*assembly
= (MonoAssembly
*)value
;
3513 MonoImage
*image
= mono_assembly_get_image (assembly
);
3514 const char *name
, *guid
, *filename
;
3515 int number_of_methods
= 0, partially_covered
= 0;
3516 guint fully_covered
= 0;
3518 name
= mono_image_get_name (image
);
3519 guid
= mono_image_get_guid (image
);
3520 filename
= mono_image_get_filename (image
);
3522 name
= name
? name
: "";
3523 guid
= guid
? guid
: "";
3524 filename
= filename
? filename
: "";
3526 get_coverage_for_image (image
, &number_of_methods
, &fully_covered
, &partially_covered
);
3528 ENTER_LOG (&coverage_assemblies_ctr
, logbuffer
,
3529 EVENT_SIZE
/* event */ +
3530 strlen (name
) + 1 /* name */ +
3531 strlen (guid
) + 1 /* guid */ +
3532 strlen (filename
) + 1 /* file name */ +
3533 LEB128_SIZE
/* no. methods */ +
3534 LEB128_SIZE
/* fully covered */ +
3535 LEB128_SIZE
/* partially covered */
3538 emit_event (logbuffer
, TYPE_COVERAGE_ASSEMBLY
| TYPE_COVERAGE
);
3539 emit_string (logbuffer
, name
, strlen (name
) + 1);
3540 emit_string (logbuffer
, guid
, strlen (guid
) + 1);
3541 emit_string (logbuffer
, filename
, strlen (filename
) + 1);
3542 emit_uvalue (logbuffer
, number_of_methods
);
3543 emit_uvalue (logbuffer
, fully_covered
);
3544 emit_uvalue (logbuffer
, partially_covered
);
3546 EXIT_LOG_EXPLICIT (DO_SEND
, NO_REQUESTS
);
3550 dump_coverage (MonoProfiler
*prof
)
3552 if (!coverage_initialized
)
3555 COVERAGE_DEBUG(fprintf (stderr
, "Coverage: Started dump\n");)
3558 mono_os_mutex_lock (&coverage_mutex
);
3559 mono_conc_hashtable_foreach (coverage_assemblies
, build_assembly_buffer
, NULL
);
3560 mono_conc_hashtable_foreach (coverage_classes
, build_class_buffer
, NULL
);
3561 mono_conc_hashtable_foreach (coverage_methods
, build_method_buffer
, prof
);
3562 mono_os_mutex_unlock (&coverage_mutex
);
3564 COVERAGE_DEBUG(fprintf (stderr
, "Coverage: Finished dump\n");)
3568 process_method_enter_coverage (MonoProfiler
*prof
, MonoMethod
*method
)
3573 if (!coverage_initialized
)
3576 klass
= mono_method_get_class (method
);
3577 image
= mono_class_get_image (klass
);
3579 if (mono_conc_hashtable_lookup (suppressed_assemblies
, (gpointer
) mono_image_get_name (image
)))
3582 mono_os_mutex_lock (&coverage_mutex
);
3583 mono_conc_hashtable_insert (entered_methods
, method
, method
);
3584 mono_os_mutex_unlock (&coverage_mutex
);
3587 static MonoLockFreeQueueNode
*
3588 create_method_node (MonoMethod
*method
)
3590 MethodNode
*node
= (MethodNode
*) g_malloc (sizeof (MethodNode
));
3591 mono_lock_free_queue_node_init ((MonoLockFreeQueueNode
*) node
, FALSE
);
3592 node
->method
= method
;
3594 return (MonoLockFreeQueueNode
*) node
;
3598 coverage_filter (MonoProfiler
*prof
, MonoMethod
*method
)
3603 MonoAssembly
*assembly
;
3604 MonoMethodHeader
*header
;
3605 guint32 iflags
, flags
, code_size
;
3606 char *fqn
, *classname
;
3607 gboolean has_positive
, found
;
3608 MonoLockFreeQueue
*image_methods
, *class_methods
;
3609 MonoLockFreeQueueNode
*node
;
3611 g_assert (coverage_initialized
&& "Why are we being asked for coverage filter info when we're not doing coverage?");
3613 COVERAGE_DEBUG(fprintf (stderr
, "Coverage filter for %s\n", mono_method_get_name (method
));)
3615 flags
= mono_method_get_flags (method
, &iflags
);
3616 if ((iflags
& 0x1000 /*METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL*/) ||
3617 (flags
& 0x2000 /*METHOD_ATTRIBUTE_PINVOKE_IMPL*/)) {
3618 COVERAGE_DEBUG(fprintf (stderr
, " Internal call or pinvoke - ignoring\n");)
3622 // Don't need to do anything else if we're already tracking this method
3623 if (mono_conc_hashtable_lookup (coverage_methods
, method
)) {
3624 COVERAGE_DEBUG(fprintf (stderr
, " Already tracking\n");)
3628 klass
= mono_method_get_class (method
);
3629 image
= mono_class_get_image (klass
);
3631 // Don't handle coverage for the core assemblies
3632 if (mono_conc_hashtable_lookup (suppressed_assemblies
, (gpointer
) mono_image_get_name (image
)) != NULL
)
3635 if (prof
->coverage_filters
) {
3636 /* Check already filtered classes first */
3637 if (mono_conc_hashtable_lookup (filtered_classes
, klass
)) {
3638 COVERAGE_DEBUG(fprintf (stderr
, " Already filtered\n");)
3642 classname
= mono_type_get_name (mono_class_get_type (klass
));
3644 fqn
= g_strdup_printf ("[%s]%s", mono_image_get_name (image
), classname
);
3646 COVERAGE_DEBUG(fprintf (stderr
, " Looking for %s in filter\n", fqn
);)
3647 // Check positive filters first
3648 has_positive
= FALSE
;
3650 for (guint i
= 0; i
< prof
->coverage_filters
->len
; ++i
) {
3651 char *filter
= (char *)g_ptr_array_index (prof
->coverage_filters
, i
);
3653 if (filter
[0] == '+') {
3654 filter
= &filter
[1];
3656 COVERAGE_DEBUG(fprintf (stderr
, " Checking against +%s ...", filter
);)
3658 if (strstr (fqn
, filter
) != NULL
) {
3659 COVERAGE_DEBUG(fprintf (stderr
, "matched\n");)
3662 COVERAGE_DEBUG(fprintf (stderr
, "no match\n");)
3664 has_positive
= TRUE
;
3668 if (has_positive
&& !found
) {
3669 COVERAGE_DEBUG(fprintf (stderr
, " Positive match was not found\n");)
3671 mono_os_mutex_lock (&coverage_mutex
);
3672 mono_conc_hashtable_insert (filtered_classes
, klass
, klass
);
3673 mono_os_mutex_unlock (&coverage_mutex
);
3680 for (guint i
= 0; i
< prof
->coverage_filters
->len
; ++i
) {
3681 // FIXME: Is substring search sufficient?
3682 char *filter
= (char *)g_ptr_array_index (prof
->coverage_filters
, i
);
3683 if (filter
[0] == '+')
3687 filter
= &filter
[1];
3688 COVERAGE_DEBUG(fprintf (stderr
, " Checking against -%s ...", filter
);)
3690 if (strstr (fqn
, filter
) != NULL
) {
3691 COVERAGE_DEBUG(fprintf (stderr
, "matched\n");)
3693 mono_os_mutex_lock (&coverage_mutex
);
3694 mono_conc_hashtable_insert (filtered_classes
, klass
, klass
);
3695 mono_os_mutex_unlock (&coverage_mutex
);
3701 COVERAGE_DEBUG(fprintf (stderr
, "no match\n");)
3709 COVERAGE_DEBUG(fprintf (stderr
, " Handling coverage for %s\n", mono_method_get_name (method
));)
3710 header
= mono_method_get_header_checked (method
, &error
);
3711 mono_error_cleanup (&error
);
3713 mono_method_header_get_code (header
, &code_size
, NULL
);
3715 assembly
= mono_image_get_assembly (image
);
3717 // Need to keep the assemblies around for as long as they are kept in the hashtable
3718 // Nunit, for example, has a habit of unloading them before the coverage statistics are
3719 // generated causing a crash. See https://bugzilla.xamarin.com/show_bug.cgi?id=39325
3720 mono_assembly_addref (assembly
);
3722 mono_os_mutex_lock (&coverage_mutex
);
3723 mono_conc_hashtable_insert (coverage_methods
, method
, method
);
3724 mono_conc_hashtable_insert (coverage_assemblies
, assembly
, assembly
);
3725 mono_os_mutex_unlock (&coverage_mutex
);
3727 image_methods
= (MonoLockFreeQueue
*)mono_conc_hashtable_lookup (image_to_methods
, image
);
3729 if (image_methods
== NULL
) {
3730 image_methods
= (MonoLockFreeQueue
*) g_malloc (sizeof (MonoLockFreeQueue
));
3731 mono_lock_free_queue_init (image_methods
);
3732 mono_os_mutex_lock (&coverage_mutex
);
3733 mono_conc_hashtable_insert (image_to_methods
, image
, image_methods
);
3734 mono_os_mutex_unlock (&coverage_mutex
);
3737 node
= create_method_node (method
);
3738 mono_lock_free_queue_enqueue (image_methods
, node
);
3740 class_methods
= (MonoLockFreeQueue
*)mono_conc_hashtable_lookup (coverage_classes
, klass
);
3742 if (class_methods
== NULL
) {
3743 class_methods
= (MonoLockFreeQueue
*) g_malloc (sizeof (MonoLockFreeQueue
));
3744 mono_lock_free_queue_init (class_methods
);
3745 mono_os_mutex_lock (&coverage_mutex
);
3746 mono_conc_hashtable_insert (coverage_classes
, klass
, class_methods
);
3747 mono_os_mutex_unlock (&coverage_mutex
);
3750 node
= create_method_node (method
);
3751 mono_lock_free_queue_enqueue (class_methods
, node
);
3756 #define LINE_BUFFER_SIZE 4096
3757 /* Max file limit of 128KB */
3758 #define MAX_FILE_SIZE 128 * 1024
3760 get_file_content (FILE *stream
)
3765 int res
, offset
= 0;
3767 res
= fseek (stream
, 0, SEEK_END
);
3771 filesize
= ftell (stream
);
3775 res
= fseek (stream
, 0, SEEK_SET
);
3779 if (filesize
> MAX_FILE_SIZE
)
3782 buffer
= (char *) g_malloc ((filesize
+ 1) * sizeof (char));
3783 while ((bytes_read
= fread (buffer
+ offset
, 1, LINE_BUFFER_SIZE
, stream
)) > 0)
3784 offset
+= bytes_read
;
3786 /* NULL terminate our buffer */
3787 buffer
[filesize
] = '\0';
3792 get_next_line (char *contents
, char **next_start
)
3796 if (p
== NULL
|| *p
== '\0') {
3801 while (*p
!= '\n' && *p
!= '\0')
3806 *next_start
= p
+ 1;
3814 init_suppressed_assemblies (void)
3820 suppressed_assemblies
= mono_conc_hashtable_new (g_str_hash
, g_str_equal
);
3821 sa_file
= fopen (SUPPRESSION_DIR
"/mono-profiler-log.suppression", "r");
3822 if (sa_file
== NULL
)
3825 /* Don't need to free @content as it is referred to by the lines stored in @suppressed_assemblies */
3826 content
= get_file_content (sa_file
);
3827 if (content
== NULL
) {
3828 g_error ("mono-profiler-log.suppression is greater than 128kb - aborting\n");
3831 while ((line
= get_next_line (content
, &content
))) {
3832 line
= g_strchomp (g_strchug (line
));
3833 /* No locking needed as we're doing initialization */
3834 mono_conc_hashtable_insert (suppressed_assemblies
, line
, line
);
3841 coverage_init (MonoProfiler
*prof
)
3843 g_assert (!coverage_initialized
&& "Why are we initializing coverage twice?");
3845 COVERAGE_DEBUG(fprintf (stderr
, "Coverage initialized\n");)
3847 mono_os_mutex_init (&coverage_mutex
);
3848 coverage_methods
= mono_conc_hashtable_new (NULL
, NULL
);
3849 coverage_assemblies
= mono_conc_hashtable_new (NULL
, NULL
);
3850 coverage_classes
= mono_conc_hashtable_new (NULL
, NULL
);
3851 filtered_classes
= mono_conc_hashtable_new (NULL
, NULL
);
3852 entered_methods
= mono_conc_hashtable_new (NULL
, NULL
);
3853 image_to_methods
= mono_conc_hashtable_new (NULL
, NULL
);
3854 init_suppressed_assemblies ();
3856 coverage_initialized
= TRUE
;
3860 unref_coverage_assemblies (gpointer key
, gpointer value
, gpointer userdata
)
3862 MonoAssembly
*assembly
= (MonoAssembly
*)value
;
3863 mono_assembly_close (assembly
);
3867 free_sample_hit (gpointer p
)
3869 mono_lock_free_free (p
, SAMPLE_BLOCK_SIZE
);
3873 cleanup_reusable_samples (MonoProfiler
*prof
)
3877 while ((sample
= (SampleHit
*) mono_lock_free_queue_dequeue (&prof
->sample_reuse_queue
)))
3878 mono_thread_hazardous_try_free (sample
, free_sample_hit
);
3882 log_shutdown (MonoProfiler
*prof
)
3884 InterlockedWrite (&in_shutdown
, 1);
3887 counters_and_perfcounters_sample (prof
);
3889 dump_coverage (prof
);
3893 if (write (prof
->pipes
[1], &c
, 1) != 1) {
3894 fprintf (stderr
, "Could not write to pipe: %s\n", strerror (errno
));
3898 mono_native_thread_join (prof
->helper_thread
);
3900 mono_os_mutex_destroy (&counters_mutex
);
3902 MonoCounterAgent
*mc_next
;
3904 for (MonoCounterAgent
*cur
= counters
; cur
; cur
= mc_next
) {
3905 mc_next
= cur
->next
;
3909 PerfCounterAgent
*pc_next
;
3911 for (PerfCounterAgent
*cur
= perfcounters
; cur
; cur
= pc_next
) {
3912 pc_next
= cur
->next
;
3917 * Ensure that we empty the LLS completely, even if some nodes are
3918 * not immediately removed upon calling mono_lls_remove (), by
3919 * iterating until the head is NULL.
3921 while (profiler_thread_list
.head
) {
3922 MONO_LLS_FOREACH_SAFE (&profiler_thread_list
, MonoProfilerThread
, thread
) {
3923 g_assert (thread
->attached
&& "Why is a thread in the LLS not attached?");
3925 remove_thread (thread
);
3926 } MONO_LLS_FOREACH_SAFE_END
3930 * Ensure that all threads have been freed, so that we don't miss any
3931 * buffers when we shut down the writer thread below.
3933 mono_thread_hazardous_try_free_all ();
3935 InterlockedWrite (&prof
->run_dumper_thread
, 0);
3936 mono_os_sem_post (&prof
->dumper_queue_sem
);
3937 mono_native_thread_join (prof
->dumper_thread
);
3938 mono_os_sem_destroy (&prof
->dumper_queue_sem
);
3940 InterlockedWrite (&prof
->run_writer_thread
, 0);
3941 mono_os_sem_post (&prof
->writer_queue_sem
);
3942 mono_native_thread_join (prof
->writer_thread
);
3943 mono_os_sem_destroy (&prof
->writer_queue_sem
);
3946 * Free all writer queue entries, and ensure that all sample hits will be
3947 * added to the sample reuse queue.
3949 mono_thread_hazardous_try_free_all ();
3951 cleanup_reusable_samples (prof
);
3954 * Finally, make sure that all sample hits are freed. This should cover all
3955 * hazardous data from the profiler. We can now be sure that the runtime
3956 * won't later invoke free functions in the profiler library after it has
3959 mono_thread_hazardous_try_free_all ();
3961 g_assert (!InterlockedRead (&buffer_rwlock_count
) && "Why is the reader count still non-zero?");
3962 g_assert (!InterlockedReadPointer (&buffer_rwlock_exclusive
) && "Why does someone still hold the exclusive lock?");
3964 #if defined (HAVE_SYS_ZLIB)
3966 gzclose (prof
->gzfile
);
3968 if (prof
->pipe_output
)
3969 pclose (prof
->file
);
3971 fclose (prof
->file
);
3973 mono_conc_hashtable_destroy (prof
->method_table
);
3974 mono_os_mutex_destroy (&prof
->method_table_mutex
);
3976 if (coverage_initialized
) {
3977 mono_os_mutex_lock (&coverage_mutex
);
3978 mono_conc_hashtable_foreach (coverage_assemblies
, unref_coverage_assemblies
, prof
);
3979 mono_os_mutex_unlock (&coverage_mutex
);
3981 mono_conc_hashtable_destroy (coverage_methods
);
3982 mono_conc_hashtable_destroy (coverage_assemblies
);
3983 mono_conc_hashtable_destroy (coverage_classes
);
3984 mono_conc_hashtable_destroy (filtered_classes
);
3986 mono_conc_hashtable_destroy (entered_methods
);
3987 mono_conc_hashtable_destroy (image_to_methods
);
3988 mono_conc_hashtable_destroy (suppressed_assemblies
);
3989 mono_os_mutex_destroy (&coverage_mutex
);
3994 g_free (prof
->args
);
3999 new_filename (const char* filename
)
4001 time_t t
= time (NULL
);
4002 int pid
= process_id ();
4007 int count_dates
= 0;
4011 for (p
= filename
; *p
; p
++) {
4022 if (!count_dates
&& !count_pids
)
4023 return pstrdup (filename
);
4024 snprintf (pid_buf
, sizeof (pid_buf
), "%d", pid
);
4026 snprintf (time_buf
, sizeof (time_buf
), "%d%02d%02d%02d%02d%02d",
4027 1900 + ts
->tm_year
, 1 + ts
->tm_mon
, ts
->tm_mday
, ts
->tm_hour
, ts
->tm_min
, ts
->tm_sec
);
4028 s_date
= strlen (time_buf
);
4029 s_pid
= strlen (pid_buf
);
4030 d
= res
= (char *) g_malloc (strlen (filename
) + s_date
* count_dates
+ s_pid
* count_pids
);
4031 for (p
= filename
; *p
; p
++) {
4038 strcpy (d
, time_buf
);
4041 } else if (*p
== 'p') {
4042 strcpy (d
, pid_buf
);
4045 } else if (*p
== '%') {
4058 add_to_fd_set (fd_set
*set
, int fd
, int *max_fd
)
4061 * This should only trigger for the basic FDs (server socket, pipes) at
4062 * startup if for some mysterious reason they're too large. In this case,
4063 * the profiler really can't function, and we're better off printing an
4064 * error and exiting.
4066 if (fd
>= FD_SETSIZE
) {
4067 fprintf (stderr
, "File descriptor is out of bounds for fd_set: %d\n", fd
);
4078 helper_thread (void *arg
)
4080 MonoProfiler
*prof
= (MonoProfiler
*) arg
;
4082 mono_threads_attach_tools_thread ();
4083 mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler helper");
4085 MonoProfilerThread
*thread
= init_thread (prof
, FALSE
);
4087 GArray
*command_sockets
= g_array_new (FALSE
, FALSE
, sizeof (int));
4095 add_to_fd_set (&rfds
, prof
->server_socket
, &max_fd
);
4096 add_to_fd_set (&rfds
, prof
->pipes
[0], &max_fd
);
4098 for (gint i
= 0; i
< command_sockets
->len
; i
++)
4099 add_to_fd_set (&rfds
, g_array_index (command_sockets
, int, i
), &max_fd
);
4101 struct timeval tv
= { .tv_sec
= 1, .tv_usec
= 0 };
4103 // Sleep for 1sec or until a file descriptor has data.
4104 if (select (max_fd
+ 1, &rfds
, NULL
, NULL
, &tv
) == -1) {
4108 fprintf (stderr
, "Error in mono-profiler-log server: %s", strerror (errno
));
4113 counters_and_perfcounters_sample (prof
);
4115 buffer_lock_excl ();
4117 sync_point (SYNC_POINT_PERIODIC
);
4119 buffer_unlock_excl ();
4121 // Are we shutting down?
4122 if (FD_ISSET (prof
->pipes
[0], &rfds
)) {
4124 read (prof
->pipes
[0], &c
, 1);
4128 for (gint i
= 0; i
< command_sockets
->len
; i
++) {
4129 int fd
= g_array_index (command_sockets
, int, i
);
4131 if (!FD_ISSET (fd
, &rfds
))
4135 int len
= read (fd
, buf
, sizeof (buf
) - 1);
4141 // The other end disconnected.
4142 g_array_remove_index (command_sockets
, i
);
4150 if (!strcmp (buf
, "heapshot\n") && hs_mode_ondemand
) {
4151 // Rely on the finalization callbacks invoking process_requests ().
4152 heapshot_requested
= 1;
4153 mono_gc_finalize_notify ();
4157 if (FD_ISSET (prof
->server_socket
, &rfds
)) {
4158 int fd
= accept (prof
->server_socket
, NULL
, NULL
);
4161 if (fd
>= FD_SETSIZE
)
4164 g_array_append_val (command_sockets
, fd
);
4169 for (gint i
= 0; i
< command_sockets
->len
; i
++)
4170 close (g_array_index (command_sockets
, int, i
));
4172 g_array_free (command_sockets
, TRUE
);
4174 send_log_unsafe (FALSE
);
4175 deinit_thread (thread
);
4177 mono_thread_info_detach ();
4183 start_helper_thread (MonoProfiler
* prof
)
4185 if (pipe (prof
->pipes
) == -1) {
4186 fprintf (stderr
, "Cannot create pipe: %s\n", strerror (errno
));
4190 prof
->server_socket
= socket (PF_INET
, SOCK_STREAM
, 0);
4192 if (prof
->server_socket
== -1) {
4193 fprintf (stderr
, "Cannot create server socket: %s\n", strerror (errno
));
4197 struct sockaddr_in server_address
;
4199 memset (&server_address
, 0, sizeof (server_address
));
4200 server_address
.sin_family
= AF_INET
;
4201 server_address
.sin_addr
.s_addr
= INADDR_ANY
;
4202 server_address
.sin_port
= htons (prof
->command_port
);
4204 if (bind (prof
->server_socket
, (struct sockaddr
*) &server_address
, sizeof (server_address
)) == -1) {
4205 fprintf (stderr
, "Cannot bind server socket on port %d: %s\n", prof
->command_port
, strerror (errno
));
4206 close (prof
->server_socket
);
4210 if (listen (prof
->server_socket
, 1) == -1) {
4211 fprintf (stderr
, "Cannot listen on server socket: %s\n", strerror (errno
));
4212 close (prof
->server_socket
);
4216 socklen_t slen
= sizeof (server_address
);
4218 if (getsockname (prof
->server_socket
, (struct sockaddr
*) &server_address
, &slen
)) {
4219 fprintf (stderr
, "Could not get assigned port: %s\n", strerror (errno
));
4220 close (prof
->server_socket
);
4224 prof
->command_port
= ntohs (server_address
.sin_port
);
4226 if (!mono_native_thread_create (&prof
->helper_thread
, helper_thread
, prof
)) {
4227 fprintf (stderr
, "Could not start helper thread\n");
4228 close (prof
->server_socket
);
4234 free_writer_entry (gpointer p
)
4236 mono_lock_free_free (p
, WRITER_ENTRY_BLOCK_SIZE
);
4240 handle_writer_queue_entry (MonoProfiler
*prof
)
4242 WriterQueueEntry
*entry
;
4244 if ((entry
= (WriterQueueEntry
*) mono_lock_free_queue_dequeue (&prof
->writer_queue
))) {
4245 if (!entry
->methods
)
4248 gboolean wrote_methods
= FALSE
;
4251 * Encode the method events in a temporary log buffer that we
4252 * flush to disk before the main buffer, ensuring that all
4253 * methods have metadata emitted before they're referenced.
4255 * We use a 'proper' thread-local buffer for this as opposed
4256 * to allocating and freeing a buffer by hand because the call
4257 * to mono_method_full_name () below may trigger class load
4258 * events when it retrieves the signature of the method. So a
4259 * thread-local buffer needs to exist when such events occur.
4261 for (guint i
= 0; i
< entry
->methods
->len
; i
++) {
4262 MethodInfo
*info
= (MethodInfo
*) g_ptr_array_index (entry
->methods
, i
);
4264 if (mono_conc_hashtable_lookup (prof
->method_table
, info
->method
))
4265 goto free_info
; // This method already has metadata emitted.
4268 * Other threads use this hash table to get a general
4269 * idea of whether a method has already been emitted to
4270 * the stream. Due to the way we add to this table, it
4271 * can easily happen that multiple threads queue up the
4272 * same methods, but that's OK since eventually all
4273 * methods will be in this table and the thread-local
4274 * method lists will just be empty for the rest of the
4277 mono_os_mutex_lock (&prof
->method_table_mutex
);
4278 mono_conc_hashtable_insert (prof
->method_table
, info
->method
, info
->method
);
4279 mono_os_mutex_unlock (&prof
->method_table_mutex
);
4281 char *name
= mono_method_full_name (info
->method
, 1);
4282 int nlen
= strlen (name
) + 1;
4283 void *cstart
= info
->ji
? mono_jit_info_get_code_start (info
->ji
) : NULL
;
4284 int csize
= info
->ji
? mono_jit_info_get_code_size (info
->ji
) : 0;
4286 ENTER_LOG (&method_jits_ctr
, logbuffer
,
4287 EVENT_SIZE
/* event */ +
4288 LEB128_SIZE
/* method */ +
4289 LEB128_SIZE
/* start */ +
4290 LEB128_SIZE
/* size */ +
4294 emit_event_time (logbuffer
, TYPE_JIT
| TYPE_METHOD
, info
->time
);
4295 emit_method_inner (logbuffer
, info
->method
);
4296 emit_ptr (logbuffer
, cstart
);
4297 emit_value (logbuffer
, csize
);
4299 memcpy (logbuffer
->cursor
, name
, nlen
);
4300 logbuffer
->cursor
+= nlen
;
4302 EXIT_LOG_EXPLICIT (NO_SEND
, NO_REQUESTS
);
4306 wrote_methods
= TRUE
;
4312 g_ptr_array_free (entry
->methods
, TRUE
);
4314 if (wrote_methods
) {
4315 dump_buffer_threadless (prof
, PROF_TLS_GET ()->buffer
);
4316 init_buffer_state (PROF_TLS_GET ());
4320 dump_buffer (prof
, entry
->buffer
);
4322 mono_thread_hazardous_try_free (entry
, free_writer_entry
);
4331 writer_thread (void *arg
)
4333 MonoProfiler
*prof
= (MonoProfiler
*)arg
;
4335 mono_threads_attach_tools_thread ();
4336 mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler writer");
4340 MonoProfilerThread
*thread
= init_thread (prof
, FALSE
);
4342 while (InterlockedRead (&prof
->run_writer_thread
)) {
4343 mono_os_sem_wait (&prof
->writer_queue_sem
, MONO_SEM_FLAGS_NONE
);
4344 handle_writer_queue_entry (prof
);
4347 /* Drain any remaining entries on shutdown. */
4348 while (handle_writer_queue_entry (prof
));
4350 free_buffer (thread
->buffer
, thread
->buffer
->size
);
4351 deinit_thread (thread
);
4353 mono_thread_info_detach ();
4359 start_writer_thread (MonoProfiler
* prof
)
4361 InterlockedWrite (&prof
->run_writer_thread
, 1);
4363 if (!mono_native_thread_create (&prof
->writer_thread
, writer_thread
, prof
)) {
4364 fprintf (stderr
, "Could not start writer thread\n");
4370 reuse_sample_hit (gpointer p
)
4372 SampleHit
*sample
= p
;
4374 mono_lock_free_queue_node_unpoison (&sample
->node
);
4375 mono_lock_free_queue_enqueue (&sample
->prof
->sample_reuse_queue
, &sample
->node
);
4379 handle_dumper_queue_entry (MonoProfiler
*prof
)
4383 if ((sample
= (SampleHit
*) mono_lock_free_queue_dequeue (&prof
->dumper_queue
))) {
4384 for (int i
= 0; i
< sample
->count
; ++i
) {
4385 MonoMethod
*method
= sample
->frames
[i
].method
;
4386 MonoDomain
*domain
= sample
->frames
[i
].domain
;
4387 void *address
= sample
->frames
[i
].base_address
;
4390 g_assert (domain
&& "What happened to the domain pointer?");
4391 g_assert (address
&& "What happened to the instruction pointer?");
4393 MonoJitInfo
*ji
= mono_jit_info_table_find (domain
, (char *) address
);
4396 sample
->frames
[i
].method
= mono_jit_info_get_method (ji
);
4400 ENTER_LOG (&sample_hits_ctr
, logbuffer
,
4401 EVENT_SIZE
/* event */ +
4402 BYTE_SIZE
/* type */ +
4403 LEB128_SIZE
/* tid */ +
4404 LEB128_SIZE
/* count */ +
4406 LEB128_SIZE
/* ip */
4408 LEB128_SIZE
/* managed count */ +
4410 LEB128_SIZE
/* method */
4414 emit_event_time (logbuffer
, TYPE_SAMPLE
| TYPE_SAMPLE_HIT
, sample
->time
);
4415 emit_byte (logbuffer
, SAMPLE_CYCLES
);
4416 emit_ptr (logbuffer
, (void *) sample
->tid
);
4417 emit_value (logbuffer
, 1);
4419 // TODO: Actual native unwinding.
4420 for (int i
= 0; i
< 1; ++i
) {
4421 emit_ptr (logbuffer
, sample
->ip
);
4422 add_code_pointer ((uintptr_t) sample
->ip
);
4425 /* new in data version 6 */
4426 emit_uvalue (logbuffer
, sample
->count
);
4428 for (int i
= 0; i
< sample
->count
; ++i
)
4429 emit_method (logbuffer
, sample
->frames
[i
].method
);
4431 EXIT_LOG_EXPLICIT (DO_SEND
, NO_REQUESTS
);
4433 mono_thread_hazardous_try_free (sample
, reuse_sample_hit
);
4435 dump_unmanaged_coderefs (prof
);
4442 dumper_thread (void *arg
)
4444 MonoProfiler
*prof
= (MonoProfiler
*)arg
;
4446 mono_threads_attach_tools_thread ();
4447 mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler dumper");
4449 MonoProfilerThread
*thread
= init_thread (prof
, FALSE
);
4451 while (InterlockedRead (&prof
->run_dumper_thread
)) {
4452 mono_os_sem_wait (&prof
->dumper_queue_sem
, MONO_SEM_FLAGS_NONE
);
4453 handle_dumper_queue_entry (prof
);
4456 /* Drain any remaining entries on shutdown. */
4457 while (handle_dumper_queue_entry (prof
));
4459 send_log_unsafe (FALSE
);
4460 deinit_thread (thread
);
4462 mono_thread_info_detach ();
4468 start_dumper_thread (MonoProfiler
* prof
)
4470 InterlockedWrite (&prof
->run_dumper_thread
, 1);
4472 if (!mono_native_thread_create (&prof
->dumper_thread
, dumper_thread
, prof
)) {
4473 fprintf (stderr
, "Could not start dumper thread\n");
4479 register_counter (const char *name
, gint32
*counter
)
4481 mono_counters_register (name
, MONO_COUNTER_UINT
| MONO_COUNTER_PROFILER
| MONO_COUNTER_MONOTONIC
, counter
);
4485 runtime_initialized (MonoProfiler
*profiler
)
4487 InterlockedWrite (&runtime_inited
, 1);
4489 register_counter ("Sample events allocated", &sample_allocations_ctr
);
4490 register_counter ("Log buffers allocated", &buffer_allocations_ctr
);
4492 register_counter ("Event: Sync points", &sync_points_ctr
);
4493 register_counter ("Event: Heap objects", &heap_objects_ctr
);
4494 register_counter ("Event: Heap starts", &heap_starts_ctr
);
4495 register_counter ("Event: Heap ends", &heap_ends_ctr
);
4496 register_counter ("Event: Heap roots", &heap_roots_ctr
);
4497 register_counter ("Event: GC events", &gc_events_ctr
);
4498 register_counter ("Event: GC resizes", &gc_resizes_ctr
);
4499 register_counter ("Event: GC allocations", &gc_allocs_ctr
);
4500 register_counter ("Event: GC moves", &gc_moves_ctr
);
4501 register_counter ("Event: GC handle creations", &gc_handle_creations_ctr
);
4502 register_counter ("Event: GC handle deletions", &gc_handle_deletions_ctr
);
4503 register_counter ("Event: GC finalize starts", &finalize_begins_ctr
);
4504 register_counter ("Event: GC finalize ends", &finalize_ends_ctr
);
4505 register_counter ("Event: GC finalize object starts", &finalize_object_begins_ctr
);
4506 register_counter ("Event: GC finalize object ends", &finalize_object_ends_ctr
);
4507 register_counter ("Event: Image loads", &image_loads_ctr
);
4508 register_counter ("Event: Image unloads", &image_unloads_ctr
);
4509 register_counter ("Event: Assembly loads", &assembly_loads_ctr
);
4510 register_counter ("Event: Assembly unloads", &assembly_unloads_ctr
);
4511 register_counter ("Event: Class loads", &class_loads_ctr
);
4512 register_counter ("Event: Class unloads", &class_unloads_ctr
);
4513 register_counter ("Event: Method entries", &method_entries_ctr
);
4514 register_counter ("Event: Method exits", &method_exits_ctr
);
4515 register_counter ("Event: Method exception leaves", &method_exception_exits_ctr
);
4516 register_counter ("Event: Method JITs", &method_jits_ctr
);
4517 register_counter ("Event: Code buffers", &code_buffers_ctr
);
4518 register_counter ("Event: Exception throws", &exception_throws_ctr
);
4519 register_counter ("Event: Exception clauses", &exception_clauses_ctr
);
4520 register_counter ("Event: Monitor contentions", &monitor_contentions_ctr
);
4521 register_counter ("Event: Monitor acquisitions", &monitor_acquisitions_ctr
);
4522 register_counter ("Event: Monitor failures", &monitor_failures_ctr
);
4523 register_counter ("Event: Thread starts", &thread_starts_ctr
);
4524 register_counter ("Event: Thread ends", &thread_ends_ctr
);
4525 register_counter ("Event: Thread names", &thread_names_ctr
);
4526 register_counter ("Event: Domain loads", &domain_loads_ctr
);
4527 register_counter ("Event: Domain unloads", &domain_unloads_ctr
);
4528 register_counter ("Event: Domain names", &domain_names_ctr
);
4529 register_counter ("Event: Context loads", &context_loads_ctr
);
4530 register_counter ("Event: Context unloads", &context_unloads_ctr
);
4531 register_counter ("Event: Sample binaries", &sample_ubins_ctr
);
4532 register_counter ("Event: Sample symbols", &sample_usyms_ctr
);
4533 register_counter ("Event: Sample hits", &sample_hits_ctr
);
4534 register_counter ("Event: Counter descriptors", &counter_descriptors_ctr
);
4535 register_counter ("Event: Counter samples", &counter_samples_ctr
);
4536 register_counter ("Event: Performance counter descriptors", &perfcounter_descriptors_ctr
);
4537 register_counter ("Event: Performance counter samples", &perfcounter_samples_ctr
);
4538 register_counter ("Event: Coverage methods", &coverage_methods_ctr
);
4539 register_counter ("Event: Coverage statements", &coverage_statements_ctr
);
4540 register_counter ("Event: Coverage classes", &coverage_classes_ctr
);
4541 register_counter ("Event: Coverage assemblies", &coverage_assemblies_ctr
);
4543 counters_init (profiler
);
4546 * We must start the helper thread before the writer thread. This is
4547 * because the helper thread sets up the command port which is written to
4548 * the log header by the writer thread.
4550 start_helper_thread (profiler
);
4551 start_writer_thread (profiler
);
4552 start_dumper_thread (profiler
);
4555 static MonoProfiler
*
4556 create_profiler (const char *args
, const char *filename
, GPtrArray
*filters
)
4560 int force_delete
= 0;
4561 prof
= (MonoProfiler
*) g_calloc (1, sizeof (MonoProfiler
));
4563 prof
->args
= pstrdup (args
);
4564 prof
->command_port
= command_port
;
4565 if (filename
&& *filename
== '-') {
4571 filename
= "|mprof-report -";
4573 filename
= "output.mlpd";
4574 nf
= (char*)filename
;
4576 nf
= new_filename (filename
);
4578 int s
= strlen (nf
) + 32;
4579 char *p
= (char *) g_malloc (s
);
4580 snprintf (p
, s
, "|mprof-report '--out=%s' -", nf
);
4586 prof
->file
= popen (nf
+ 1, "w");
4587 prof
->pipe_output
= 1;
4588 } else if (*nf
== '#') {
4589 int fd
= strtol (nf
+ 1, NULL
, 10);
4590 prof
->file
= fdopen (fd
, "a");
4594 prof
->file
= fopen (nf
, "wb");
4597 fprintf (stderr
, "Cannot create profiler output: %s\n", nf
);
4601 #if defined (HAVE_SYS_ZLIB)
4603 prof
->gzfile
= gzdopen (fileno (prof
->file
), "wb");
4607 * If you hit this assert while increasing MAX_FRAMES, you need to increase
4608 * SAMPLE_BLOCK_SIZE as well.
4610 g_assert (SAMPLE_SLOT_SIZE (MAX_FRAMES
) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (SAMPLE_BLOCK_SIZE
));
4612 // FIXME: We should free this stuff too.
4613 mono_lock_free_allocator_init_size_class (&prof
->sample_size_class
, SAMPLE_SLOT_SIZE (num_frames
), SAMPLE_BLOCK_SIZE
);
4614 mono_lock_free_allocator_init_allocator (&prof
->sample_allocator
, &prof
->sample_size_class
, MONO_MEM_ACCOUNT_PROFILER
);
4616 mono_lock_free_queue_init (&prof
->sample_reuse_queue
);
4618 g_assert (sizeof (WriterQueueEntry
) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (WRITER_ENTRY_BLOCK_SIZE
));
4620 // FIXME: We should free this stuff too.
4621 mono_lock_free_allocator_init_size_class (&prof
->writer_entry_size_class
, sizeof (WriterQueueEntry
), WRITER_ENTRY_BLOCK_SIZE
);
4622 mono_lock_free_allocator_init_allocator (&prof
->writer_entry_allocator
, &prof
->writer_entry_size_class
, MONO_MEM_ACCOUNT_PROFILER
);
4624 mono_lock_free_queue_init (&prof
->writer_queue
);
4625 mono_os_sem_init (&prof
->writer_queue_sem
, 0);
4627 mono_lock_free_queue_init (&prof
->dumper_queue
);
4628 mono_os_sem_init (&prof
->dumper_queue_sem
, 0);
4630 mono_os_mutex_init (&prof
->method_table_mutex
);
4631 prof
->method_table
= mono_conc_hashtable_new (NULL
, NULL
);
4634 coverage_init (prof
);
4635 prof
->coverage_filters
= filters
;
4637 prof
->startup_time
= current_time ();
4644 printf ("Log profiler version %d.%d (format: %d)\n", LOG_VERSION_MAJOR
, LOG_VERSION_MINOR
, LOG_DATA_VERSION
);
4645 printf ("Usage: mono --profile=log[:OPTION1[,OPTION2...]] program.exe\n");
4646 printf ("Options:\n");
4647 printf ("\thelp show this usage info\n");
4648 printf ("\t[no]alloc enable/disable recording allocation info\n");
4649 printf ("\t[no]calls enable/disable recording enter/leave method events\n");
4650 printf ("\theapshot[=MODE] record heap shot info (by default at each major collection)\n");
4651 printf ("\t MODE: every XXms milliseconds, every YYgc collections, ondemand\n");
4652 printf ("\tcounters sample counters every 1s\n");
4653 printf ("\tsample[=TYPE] use statistical sampling mode (by default cycles/100)\n");
4654 printf ("\t TYPE: cycles,instr,cacherefs,cachemiss,branches,branchmiss\n");
4655 printf ("\t TYPE can be followed by /FREQUENCY\n");
4656 printf ("\tmaxframes=NUM collect up to NUM stack frames\n");
4657 printf ("\tcalldepth=NUM ignore method events for call chain depth bigger than NUM\n");
4658 printf ("\toutput=FILENAME write the data to file FILENAME (-FILENAME to overwrite)\n");
4659 printf ("\toutput=|PROGRAM write the data to the stdin of PROGRAM\n");
4660 printf ("\t %%t is subtituted with date and time, %%p with the pid\n");
4661 printf ("\treport create a report instead of writing the raw data to a file\n");
4662 printf ("\tzip compress the output data\n");
4663 printf ("\tport=PORTNUM use PORTNUM for the listening command server\n");
4664 printf ("\tcoverage enable collection of code coverage data\n");
4665 printf ("\tcovfilter=ASSEMBLY add an assembly to the code coverage filters\n");
4666 printf ("\t add a + to include the assembly or a - to exclude it\n");
4667 printf ("\t filter=-mscorlib\n");
4668 printf ("\tcovfilter-file=FILE use FILE to generate the list of assemblies to be filtered\n");
4674 match_option (const char* p
, const char *opt
, char **rval
)
4676 int len
= strlen (opt
);
4677 if (strncmp (p
, opt
, len
) == 0) {
4679 if (p
[len
] == '=' && p
[len
+ 1]) {
4680 const char *opt
= p
+ len
+ 1;
4681 const char *end
= strchr (opt
, ',');
4689 val
= (char *) g_malloc (l
+ 1);
4690 memcpy (val
, opt
, l
);
4695 if (p
[len
] == 0 || p
[len
] == ',') {
4697 return p
+ len
+ (p
[len
] == ',');
4711 set_sample_freq (char *val
)
4721 // Is it only the frequency (new option style)?
4725 // Skip the sample type for backwards compatibility.
4726 while (isalpha (*p
))
4729 // Skip the forward slash only if we got a sample type.
4730 if (p
!= val
&& *p
== '/') {
4736 sample_freq
= strtoul (p
, &end
, 10);
4751 set_hsmode (char* val
, int allow_empty
)
4755 if (allow_empty
&& !val
)
4757 if (strcmp (val
, "ondemand") == 0) {
4758 hs_mode_ondemand
= 1;
4762 count
= strtoul (val
, &end
, 10);
4765 if (strcmp (end
, "ms") == 0)
4767 else if (strcmp (end
, "gc") == 0)
4775 * declaration to silence the compiler: this is the entry point that
4776 * mono will load from the shared library and call.
4779 mono_profiler_startup (const char *desc
);
4782 mono_profiler_startup_log (const char *desc
);
4785 * this is the entry point that will be used when the profiler
4786 * is embedded inside the main executable.
4789 mono_profiler_startup_log (const char *desc
)
4791 mono_profiler_startup (desc
);
4795 mono_profiler_startup (const char *desc
)
4798 GPtrArray
*filters
= NULL
;
4799 char *filename
= NULL
;
4802 int calls_enabled
= 0;
4803 int allocs_enabled
= 0;
4804 int events
= MONO_PROFILE_GC
|MONO_PROFILE_ALLOCATIONS
|
4805 MONO_PROFILE_GC_MOVES
|MONO_PROFILE_CLASS_EVENTS
|MONO_PROFILE_THREADS
|
4806 MONO_PROFILE_ENTER_LEAVE
|MONO_PROFILE_JIT_COMPILATION
|MONO_PROFILE_EXCEPTIONS
|
4807 MONO_PROFILE_MONITOR_EVENTS
|MONO_PROFILE_MODULE_EVENTS
|MONO_PROFILE_GC_ROOTS
|
4808 MONO_PROFILE_INS_COVERAGE
|MONO_PROFILE_APPDOMAIN_EVENTS
|MONO_PROFILE_CONTEXT_EVENTS
|
4809 MONO_PROFILE_ASSEMBLY_EVENTS
|MONO_PROFILE_GC_FINALIZATION
;
4811 max_allocated_sample_hits
= mono_cpu_count () * 1000;
4814 if (strncmp (p
, "log", 3))
4819 for (; *p
; p
= opt
) {
4825 if ((opt
= match_option (p
, "help", NULL
)) != p
) {
4829 if ((opt
= match_option (p
, "calls", NULL
)) != p
) {
4833 if ((opt
= match_option (p
, "nocalls", NULL
)) != p
) {
4834 events
&= ~MONO_PROFILE_ENTER_LEAVE
;
4838 if ((opt
= match_option (p
, "alloc", NULL
)) != p
) {
4842 if ((opt
= match_option (p
, "noalloc", NULL
)) != p
) {
4843 events
&= ~MONO_PROFILE_ALLOCATIONS
;
4844 events
&= ~MONO_PROFILE_GC_MOVES
;
4847 if ((opt
= match_option (p
, "nocounters", NULL
)) != p
) {
4851 if ((opt
= match_option (p
, "time", &val
)) != p
) {
4852 // For backwards compatibility.
4853 if (strcmp (val
, "fast") && strcmp (val
, "null"))
4858 if ((opt
= match_option (p
, "report", NULL
)) != p
) {
4862 if ((opt
= match_option (p
, "debug", NULL
)) != p
) {
4866 if ((opt
= match_option (p
, "sampling-real", NULL
)) != p
) {
4867 sampling_mode
= MONO_PROFILER_STAT_MODE_REAL
;
4870 if ((opt
= match_option (p
, "sampling-process", NULL
)) != p
) {
4871 sampling_mode
= MONO_PROFILER_STAT_MODE_PROCESS
;
4874 if ((opt
= match_option (p
, "heapshot", &val
)) != p
) {
4875 events
&= ~MONO_PROFILE_ALLOCATIONS
;
4876 events
&= ~MONO_PROFILE_GC_MOVES
;
4877 events
&= ~MONO_PROFILE_ENTER_LEAVE
;
4880 set_hsmode (val
, 1);
4883 if ((opt
= match_option (p
, "sample", &val
)) != p
) {
4884 events
&= ~MONO_PROFILE_ALLOCATIONS
;
4885 events
&= ~MONO_PROFILE_GC_MOVES
;
4886 events
&= ~MONO_PROFILE_ENTER_LEAVE
;
4888 set_sample_freq (val
);
4891 if ((opt
= match_option (p
, "zip", NULL
)) != p
) {
4895 if ((opt
= match_option (p
, "output", &val
)) != p
) {
4899 if ((opt
= match_option (p
, "port", &val
)) != p
) {
4901 command_port
= strtoul (val
, &end
, 10);
4905 if ((opt
= match_option (p
, "maxframes", &val
)) != p
) {
4907 num_frames
= strtoul (val
, &end
, 10);
4908 if (num_frames
> MAX_FRAMES
)
4909 num_frames
= MAX_FRAMES
;
4911 notraces
= num_frames
== 0;
4914 if ((opt
= match_option (p
, "maxsamples", &val
)) != p
) {
4916 max_allocated_sample_hits
= strtoul (val
, &end
, 10);
4917 if (!max_allocated_sample_hits
)
4918 max_allocated_sample_hits
= G_MAXINT32
;
4922 if ((opt
= match_option (p
, "calldepth", &val
)) != p
) {
4924 max_call_depth
= strtoul (val
, &end
, 10);
4928 if ((opt
= match_option (p
, "counters", NULL
)) != p
) {
4929 // For backwards compatibility.
4932 if ((opt
= match_option (p
, "coverage", NULL
)) != p
) {
4934 events
|= MONO_PROFILE_ENTER_LEAVE
;
4935 debug_coverage
= (g_getenv ("MONO_PROFILER_DEBUG_COVERAGE") != NULL
);
4938 if ((opt
= match_option (p
, "onlycoverage", NULL
)) != p
) {
4939 only_coverage
= TRUE
;
4942 if ((opt
= match_option (p
, "covfilter-file", &val
)) != p
) {
4944 char *line
, *content
;
4946 if (filters
== NULL
)
4947 filters
= g_ptr_array_new ();
4949 filter_file
= fopen (val
, "r");
4950 if (filter_file
== NULL
) {
4951 fprintf (stderr
, "Unable to open %s\n", val
);
4955 /* Don't need to free content as it is referred to by the lines stored in @filters */
4956 content
= get_file_content (filter_file
);
4957 if (content
== NULL
)
4958 fprintf (stderr
, "WARNING: %s is greater than 128kb - ignoring\n", val
);
4960 while ((line
= get_next_line (content
, &content
)))
4961 g_ptr_array_add (filters
, g_strchug (g_strchomp (line
)));
4963 fclose (filter_file
);
4966 if ((opt
= match_option (p
, "covfilter", &val
)) != p
) {
4967 if (filters
== NULL
)
4968 filters
= g_ptr_array_new ();
4970 g_ptr_array_add (filters
, val
);
4979 if (calls_enabled
) {
4980 events
|= MONO_PROFILE_ENTER_LEAVE
;
4984 if (allocs_enabled
) {
4985 events
|= MONO_PROFILE_ALLOCATIONS
;
4986 events
|= MONO_PROFILE_GC_MOVES
;
4989 // Only activate the bare minimum events the profiler needs to function.
4990 if (only_coverage
) {
4992 fprintf (stderr
, "The onlycoverage option is only valid when paired with the coverage option\n");
4997 events
= MONO_PROFILE_GC
| MONO_PROFILE_THREADS
| MONO_PROFILE_ENTER_LEAVE
| MONO_PROFILE_INS_COVERAGE
;
5004 prof
= create_profiler (desc
, filename
, filters
);
5010 mono_lls_init (&profiler_thread_list
, NULL
);
5012 init_thread (prof
, TRUE
);
5014 mono_profiler_install (prof
, log_shutdown
);
5015 mono_profiler_install_gc (gc_event
, gc_resize
);
5016 mono_profiler_install_allocation (gc_alloc
);
5017 mono_profiler_install_gc_moves (gc_moves
);
5018 mono_profiler_install_gc_roots (gc_handle
, gc_roots
);
5019 mono_profiler_install_gc_finalize (finalize_begin
, finalize_object_begin
, finalize_object_end
, finalize_end
);
5020 mono_profiler_install_appdomain (NULL
, domain_loaded
, domain_unloaded
, NULL
);
5021 mono_profiler_install_appdomain_name (domain_name
);
5022 mono_profiler_install_context (context_loaded
, context_unloaded
);
5023 mono_profiler_install_class (NULL
, class_loaded
, class_unloaded
, NULL
);
5024 mono_profiler_install_module (NULL
, image_loaded
, image_unloaded
, NULL
);
5025 mono_profiler_install_assembly (NULL
, assembly_loaded
, assembly_unloaded
, NULL
);
5026 mono_profiler_install_thread (thread_start
, thread_end
);
5027 mono_profiler_install_thread_name (thread_name
);
5028 mono_profiler_install_enter_leave (method_enter
, method_leave
);
5029 mono_profiler_install_jit_end (method_jitted
);
5030 mono_profiler_install_code_buffer_new (code_buffer_new
);
5031 mono_profiler_install_exception (throw_exc
, method_exc_leave
, clause_exc
);
5032 mono_profiler_install_monitor (monitor_event
);
5033 mono_profiler_install_runtime_initialized (runtime_initialized
);
5035 mono_profiler_install_coverage_filter (coverage_filter
);
5037 if (do_mono_sample
&& sample_freq
) {
5038 events
|= MONO_PROFILE_STATISTICAL
;
5039 mono_profiler_set_statistical_mode (sampling_mode
, sample_freq
);
5040 mono_profiler_install_statistical (mono_sample_hit
);
5043 mono_profiler_set_events ((MonoProfileFlags
)events
);