2 * log.c: mono log profiler
5 * Paolo Molaro (lupus@ximian.com)
6 * Alex Rønne Petersen (alexrp@xamarin.com)
8 * Copyright 2010 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include <mono/metadata/assembly.h>
15 #include <mono/metadata/class-internals.h>
16 #include <mono/metadata/debug-helpers.h>
17 #include <mono/metadata/loader.h>
18 #include <mono/metadata/metadata-internals.h>
19 #include <mono/metadata/mono-config.h>
20 #include <mono/metadata/mono-gc.h>
21 #include <mono/metadata/mono-perfcounters.h>
22 #include <mono/metadata/tabledefs.h>
23 #include <mono/mini/jit.h>
24 #include <mono/utils/atomic.h>
25 #include <mono/utils/hazard-pointer.h>
26 #include <mono/utils/lock-free-alloc.h>
27 #include <mono/utils/lock-free-queue.h>
28 #include <mono/utils/mono-conc-hashtable.h>
29 #include <mono/utils/mono-coop-mutex.h>
30 #include <mono/utils/mono-counters.h>
31 #include <mono/utils/mono-logger-internals.h>
32 #include <mono/utils/mono-linked-list-set.h>
33 #include <mono/utils/mono-membar.h>
34 #include <mono/utils/mono-mmap.h>
35 #include <mono/utils/mono-os-mutex.h>
36 #include <mono/utils/mono-os-semaphore.h>
37 #include <mono/utils/mono-threads.h>
38 #include <mono/utils/mono-threads-api.h>
51 #if defined(__APPLE__)
52 #include <mach/mach_time.h>
54 #include <netinet/in.h>
55 #ifdef HAVE_SYS_MMAN_H
58 #include <sys/socket.h>
59 #if defined (HAVE_SYS_ZLIB)
63 // Statistics for internal profiler data structures.
64 static gint32 sample_allocations_ctr
,
65 buffer_allocations_ctr
;
67 // Statistics for profiler events.
68 static gint32 sync_points_ctr
,
77 gc_handle_creations_ctr
,
78 gc_handle_deletions_ctr
,
81 finalize_object_begins_ctr
,
82 finalize_object_ends_ctr
,
91 method_exception_exits_ctr
,
95 exception_clauses_ctr
,
108 counter_descriptors_ctr
,
110 perfcounter_descriptors_ctr
,
111 perfcounter_samples_ctr
,
112 coverage_methods_ctr
,
113 coverage_statements_ctr
,
114 coverage_classes_ctr
,
115 coverage_assemblies_ctr
;
117 // Pending data to be written to the log, for a single thread.
118 // Threads periodically flush their own LogBuffers by calling safe_send
119 typedef struct _LogBuffer LogBuffer
;
121 // Next (older) LogBuffer in processing queue
126 gboolean has_ptr_base
;
128 uintptr_t method_base
;
129 uintptr_t last_method
;
133 // Bytes allocated for this LogBuffer
136 // Start of currently unused space in buffer
137 unsigned char* cursor
;
139 // Pointer to start-of-structure-plus-size (for convenience)
140 unsigned char* buf_end
;
142 // Start of data in buffer. Contents follow "buffer format" described above.
143 unsigned char buf
[1];
147 MonoLinkedListSetNode node
;
149 // Was this thread added to the LLS?
152 // The current log buffer for this thread.
155 // Methods referenced by events in `buffer`, see `MethodInfo`.
158 // Current call depth for enter/leave events.
161 // Indicates whether this thread is currently writing to its `buffer`.
164 // Has this thread written a thread end event to `buffer`?
167 // Stored in `buffer_lock_state` to take the exclusive lock.
169 } MonoProfilerThread
;
171 // Default value in `profiler_tls` for new threads.
172 #define MONO_PROFILER_THREAD_ZERO ((MonoProfilerThread *) NULL)
174 // This is written to `profiler_tls` to indicate that a thread has stopped.
175 #define MONO_PROFILER_THREAD_DEAD ((MonoProfilerThread *) -1)
177 // Do not use these TLS macros directly unless you know what you're doing.
181 #define PROF_TLS_SET(VAL) (TlsSetValue (profiler_tls, (VAL)))
182 #define PROF_TLS_GET() ((MonoProfilerThread *) TlsGetValue (profiler_tls))
183 #define PROF_TLS_INIT() (profiler_tls = TlsAlloc ())
184 #define PROF_TLS_FREE() (TlsFree (profiler_tls))
186 static DWORD profiler_tls
;
190 #define PROF_TLS_SET(VAL) (profiler_tls = (VAL))
191 #define PROF_TLS_GET() (profiler_tls)
192 #define PROF_TLS_INIT()
193 #define PROF_TLS_FREE()
195 static __thread MonoProfilerThread
*profiler_tls
;
199 #define PROF_TLS_SET(VAL) (pthread_setspecific (profiler_tls, (VAL)))
200 #define PROF_TLS_GET() ((MonoProfilerThread *) pthread_getspecific (profiler_tls))
201 #define PROF_TLS_INIT() (pthread_key_create (&profiler_tls, NULL))
202 #define PROF_TLS_FREE() (pthread_key_delete (profiler_tls))
204 static pthread_key_t profiler_tls
;
211 return (uintptr_t) mono_native_thread_id_get ();
218 return (uintptr_t) GetCurrentProcessId ();
220 return (uintptr_t) getpid ();
224 #define ENABLED(EVT) (!!(log_config.effective_mask & (EVT)))
225 #define ENABLE(EVT) do { log_config.effective_mask |= (EVT); } while (0)
226 #define DISABLE(EVT) do { log_config.effective_mask &= ~(EVT); } while (0)
229 * These macros should be used when writing an event to a log buffer. They
230 * take care of a bunch of stuff that can be repetitive and error-prone, such
231 * as attaching the current thread, acquiring/releasing the buffer lock,
232 * incrementing the event counter, expanding the log buffer, etc. They also
233 * create a scope so that it's harder to leak the LogBuffer pointer, which can
234 * be problematic as the pointer is unstable when the buffer lock isn't
237 * If the calling thread is already attached, these macros will not alter its
238 * attach mode (i.e. whether it's added to the LLS). If the thread is not
239 * attached, init_thread () will be called with add_to_lls = TRUE.
242 #define ENTER_LOG(COUNTER, BUFFER, SIZE) \
244 MonoProfilerThread *thread__ = get_thread (); \
245 g_assert (!thread__->busy && "Why are we trying to write a new event while already writing one?"); \
246 thread__->busy = TRUE; \
247 mono_atomic_inc_i32 ((COUNTER)); \
248 if (thread__->attached) \
250 LogBuffer *BUFFER = ensure_logbuf_unsafe (thread__, (SIZE))
252 #define EXIT_LOG_EXPLICIT(SEND) \
254 send_log_unsafe (TRUE); \
255 if (thread__->attached) \
257 thread__->busy = FALSE; \
260 // Pass these to EXIT_LOG_EXPLICIT () for easier reading.
262 #define NO_SEND FALSE
264 #define EXIT_LOG EXIT_LOG_EXPLICIT (DO_SEND)
266 typedef struct _BinaryObject BinaryObject
;
267 struct _BinaryObject
{
273 typedef struct MonoCounterAgent
{
274 MonoCounter
*counter
;
275 // MonoCounterAgent specific data :
280 struct MonoCounterAgent
*next
;
283 typedef struct _PerfCounterAgent PerfCounterAgent
;
284 struct _PerfCounterAgent
{
285 PerfCounterAgent
*next
;
295 struct _MonoProfiler
{
296 MonoProfilerHandle handle
;
299 #if defined (HAVE_SYS_ZLIB)
304 uint64_t startup_time
;
308 mach_timebase_info_data_t timebase_info
;
309 #elif defined (HOST_WIN32)
310 LARGE_INTEGER pcounter_freq
;
318 MonoLinkedListSet profiler_thread_list
;
319 volatile gint32 buffer_lock_state
;
320 volatile gint32 buffer_lock_exclusive_intent
;
322 volatile gint32 runtime_inited
;
323 volatile gint32 in_shutdown
;
325 MonoNativeThreadId helper_thread
;
327 MonoNativeThreadId writer_thread
;
328 volatile gint32 run_writer_thread
;
329 MonoLockFreeQueue writer_queue
;
330 MonoSemType writer_queue_sem
;
332 MonoLockFreeAllocSizeClass writer_entry_size_class
;
333 MonoLockFreeAllocator writer_entry_allocator
;
335 MonoConcurrentHashTable
*method_table
;
336 mono_mutex_t method_table_mutex
;
338 MonoNativeThreadId dumper_thread
;
339 volatile gint32 run_dumper_thread
;
340 MonoLockFreeQueue dumper_queue
;
341 MonoSemType dumper_queue_sem
;
343 MonoLockFreeAllocSizeClass sample_size_class
;
344 MonoLockFreeAllocator sample_allocator
;
345 MonoLockFreeQueue sample_reuse_queue
;
347 BinaryObject
*binary_objects
;
349 volatile gint32 heapshot_requested
;
351 guint64 last_hs_time
;
352 gboolean do_heap_walk
;
354 mono_mutex_t counters_mutex
;
355 MonoCounterAgent
*counters
;
356 PerfCounterAgent
*perfcounters
;
357 guint32 counters_index
;
359 mono_mutex_t coverage_mutex
;
360 GPtrArray
*coverage_data
;
362 GPtrArray
*coverage_filters
;
363 MonoConcurrentHashTable
*coverage_filtered_classes
;
364 MonoConcurrentHashTable
*coverage_suppressed_assemblies
;
366 MonoConcurrentHashTable
*coverage_methods
;
367 MonoConcurrentHashTable
*coverage_assemblies
;
368 MonoConcurrentHashTable
*coverage_classes
;
370 MonoConcurrentHashTable
*coverage_image_to_methods
;
372 guint32 coverage_previous_offset
;
373 guint32 coverage_method_id
;
375 MonoCoopMutex api_mutex
;
378 static ProfilerConfig log_config
;
379 static struct _MonoProfiler log_profiler
;
382 MonoLockFreeQueueNode node
;
387 #define WRITER_ENTRY_BLOCK_SIZE (mono_pagesize ())
395 #define TICKS_PER_SEC 1000000000LL
401 uint64_t time
= mach_absolute_time ();
403 time
*= log_profiler
.timebase_info
.numer
;
404 time
/= log_profiler
.timebase_info
.denom
;
407 #elif defined (HOST_WIN32)
410 QueryPerformanceCounter (&value
);
412 return value
.QuadPart
* TICKS_PER_SEC
/ log_profiler
.pcounter_freq
.QuadPart
;
413 #elif defined (CLOCK_MONOTONIC)
414 struct timespec tspec
;
416 clock_gettime (CLOCK_MONOTONIC
, &tspec
);
418 return ((uint64_t) tspec
.tv_sec
* TICKS_PER_SEC
+ tspec
.tv_nsec
);
422 gettimeofday (&tv
, NULL
);
424 return ((uint64_t) tv
.tv_sec
* TICKS_PER_SEC
+ tv
.tv_usec
* 1000);
432 mach_timebase_info (&log_profiler
.timebase_info
);
433 #elif defined (HOST_WIN32)
434 QueryPerformanceFrequency (&log_profiler
.pcounter_freq
);
437 uint64_t time_start
= current_time ();
439 for (int i
= 0; i
< 256; ++i
)
442 uint64_t time_end
= current_time ();
444 log_profiler
.timer_overhead
= (time_end
- time_start
) / 256;
448 pstrdup (const char *s
)
450 int len
= strlen (s
) + 1;
451 char *p
= (char *) g_malloc (len
);
456 #define BUFFER_SIZE (4096 * 16)
458 /* Worst-case size in bytes of a 64-bit value encoded with LEB128. */
459 #define LEB128_SIZE 10
461 /* Size of a value encoded as a single byte. */
462 #undef BYTE_SIZE // mach/i386/vm_param.h on OS X defines this to 8, but it isn't used for anything.
465 /* Size in bytes of the event prefix (ID + time). */
466 #define EVENT_SIZE (BYTE_SIZE + LEB128_SIZE)
469 alloc_buffer (int size
)
471 return mono_valloc (NULL
, size
, MONO_MMAP_READ
| MONO_MMAP_WRITE
| MONO_MMAP_ANON
| MONO_MMAP_PRIVATE
, MONO_MEM_ACCOUNT_PROFILER
);
475 free_buffer (void *buf
, int size
)
477 mono_vfree (buf
, size
, MONO_MEM_ACCOUNT_PROFILER
);
481 create_buffer (uintptr_t tid
, int bytes
)
483 LogBuffer
* buf
= (LogBuffer
*) alloc_buffer (MAX (BUFFER_SIZE
, bytes
));
485 mono_atomic_inc_i32 (&buffer_allocations_ctr
);
487 buf
->size
= BUFFER_SIZE
;
488 buf
->time_base
= current_time ();
489 buf
->last_time
= buf
->time_base
;
490 buf
->buf_end
= (unsigned char *) buf
+ buf
->size
;
491 buf
->cursor
= buf
->buf
;
492 buf
->thread_id
= tid
;
498 * Must be called with the reader lock held if thread is the current thread, or
499 * the exclusive lock if thread is a different thread. However, if thread is
500 * the current thread, and init_thread () was called with add_to_lls = FALSE,
501 * then no locking is necessary.
504 init_buffer_state (MonoProfilerThread
*thread
)
506 thread
->buffer
= create_buffer (thread
->node
.key
, 0);
507 thread
->methods
= NULL
;
511 clear_hazard_pointers (MonoThreadHazardPointers
*hp
)
513 mono_hazard_pointer_clear (hp
, 0);
514 mono_hazard_pointer_clear (hp
, 1);
515 mono_hazard_pointer_clear (hp
, 2);
518 static MonoProfilerThread
*
519 init_thread (gboolean add_to_lls
)
521 MonoProfilerThread
*thread
= PROF_TLS_GET ();
523 g_assert (thread
!= MONO_PROFILER_THREAD_DEAD
&& "Why are we trying to resurrect a stopped thread?");
526 * Sometimes we may try to initialize a thread twice. One example is the
527 * main thread: We initialize it when setting up the profiler, but we will
528 * also get a thread_start () callback for it. Another example is when
529 * attaching new threads to the runtime: We may get a gc_alloc () callback
530 * for that thread's thread object (where we initialize it), soon followed
531 * by a thread_start () callback.
533 * These cases are harmless anyhow. Just return if we've already done the
534 * initialization work.
536 if (thread
!= MONO_PROFILER_THREAD_ZERO
)
539 thread
= g_malloc (sizeof (MonoProfilerThread
));
540 thread
->node
.key
= thread_id ();
541 thread
->attached
= add_to_lls
;
542 thread
->call_depth
= 0;
543 thread
->busy
= FALSE
;
544 thread
->ended
= FALSE
;
546 init_buffer_state (thread
);
548 thread
->small_id
= mono_thread_info_register_small_id ();
551 * Some internal profiler threads don't need to be cleaned up
552 * by the main thread on shutdown.
555 MonoThreadHazardPointers
*hp
= mono_hazard_pointer_get ();
556 g_assert (mono_lls_insert (&log_profiler
.profiler_thread_list
, hp
, &thread
->node
) && "Why can't we insert the thread in the LLS?");
557 clear_hazard_pointers (hp
);
560 PROF_TLS_SET (thread
);
565 // Only valid if init_thread () was called with add_to_lls = FALSE.
567 deinit_thread (MonoProfilerThread
*thread
)
569 g_assert (!thread
->attached
&& "Why are we manually freeing an attached thread?");
572 PROF_TLS_SET (MONO_PROFILER_THREAD_DEAD
);
575 static MonoProfilerThread
*
578 return init_thread (TRUE
);
581 // Only valid if init_thread () was called with add_to_lls = FALSE.
583 ensure_logbuf_unsafe (MonoProfilerThread
*thread
, int bytes
)
585 LogBuffer
*old
= thread
->buffer
;
587 if (old
->cursor
+ bytes
< old
->buf_end
)
590 LogBuffer
*new_
= create_buffer (thread
->node
.key
, bytes
);
592 thread
->buffer
= new_
;
598 * This is a reader/writer spin lock of sorts used to protect log buffers.
599 * When a thread modifies its own log buffer, it increments the reader
600 * count. When a thread wants to access log buffers of other threads, it
601 * takes the exclusive lock.
603 * `buffer_lock_state` holds the reader count in its lower 16 bits, and
604 * the small ID of the thread currently holding the exclusive (writer)
605 * lock in its upper 16 bits. Both can be zero. It's important that the
606 * whole lock state is a single word that can be read/written atomically
607 * to avoid race conditions where there could end up being readers while
608 * the writer lock is held.
610 * The lock is writer-biased. When a thread wants to take the exclusive
611 * lock, it increments `buffer_lock_exclusive_intent` which will make new
612 * readers spin until it's back to zero, then takes the exclusive lock
613 * once the reader count has reached zero. After releasing the exclusive
614 * lock, it decrements `buffer_lock_exclusive_intent`, which, when it
615 * reaches zero again, allows readers to increment the reader count.
617 * The writer bias is necessary because we take the exclusive lock in
618 * `gc_event ()` during STW. If the writer bias was not there, and a
619 * program had a large number of threads, STW-induced pauses could be
620 * significantly longer than they have to be. Also, we emit periodic
621 * sync points from the helper thread, which requires taking the
622 * exclusive lock, and we need those to arrive with a reasonably
623 * consistent frequency so that readers don't have to queue up too many
624 * events between sync points.
626 * The lock does not support recursion.
633 * If the thread holding the exclusive lock tries to modify the
634 * reader count, just make it a no-op. This way, we also avoid
635 * invoking the GC safe point macros below, which could break if
636 * done from a thread that is currently the initiator of STW.
638 * In other words, we rely on the fact that the GC thread takes
639 * the exclusive lock in the gc_event () callback when the world
642 if (mono_atomic_load_i32 (&log_profiler
.buffer_lock_state
) != get_thread ()->small_id
<< 16) {
649 // Hold off if a thread wants to take the exclusive lock.
650 while (mono_atomic_load_i32 (&log_profiler
.buffer_lock_exclusive_intent
))
651 mono_thread_info_yield ();
653 old
= mono_atomic_load_i32 (&log_profiler
.buffer_lock_state
);
655 // Is a thread holding the exclusive lock?
657 mono_thread_info_yield ();
662 } while (mono_atomic_cas_i32 (&log_profiler
.buffer_lock_state
, new_
, old
) != old
);
667 mono_memory_barrier ();
673 mono_memory_barrier ();
675 gint32 state
= mono_atomic_load_i32 (&log_profiler
.buffer_lock_state
);
677 // See the comment in buffer_lock ().
678 if (state
== get_thread ()->small_id
<< 16)
681 g_assert (state
&& "Why are we decrementing a zero reader count?");
682 g_assert (!(state
>> 16) && "Why is the exclusive lock held?");
684 mono_atomic_dec_i32 (&log_profiler
.buffer_lock_state
);
688 buffer_lock_excl (void)
690 gint32 new_
= get_thread ()->small_id
<< 16;
692 g_assert (mono_atomic_load_i32 (&log_profiler
.buffer_lock_state
) != new_
&& "Why are we taking the exclusive lock twice?");
694 mono_atomic_inc_i32 (&log_profiler
.buffer_lock_exclusive_intent
);
698 while (mono_atomic_cas_i32 (&log_profiler
.buffer_lock_state
, new_
, 0))
699 mono_thread_info_yield ();
703 mono_memory_barrier ();
707 buffer_unlock_excl (void)
709 mono_memory_barrier ();
711 gint32 state
= mono_atomic_load_i32 (&log_profiler
.buffer_lock_state
);
712 gint32 excl
= state
>> 16;
714 g_assert (excl
&& "Why is the exclusive lock not held?");
715 g_assert (excl
== get_thread ()->small_id
&& "Why does another thread hold the exclusive lock?");
716 g_assert (!(state
& 0xFFFF) && "Why are there readers when the exclusive lock is held?");
718 mono_atomic_store_i32 (&log_profiler
.buffer_lock_state
, 0);
719 mono_atomic_dec_i32 (&log_profiler
.buffer_lock_exclusive_intent
);
723 encode_uleb128 (uint64_t value
, uint8_t *buf
, uint8_t **endbuf
)
728 uint8_t b
= value
& 0x7f;
731 if (value
!= 0) /* more bytes to come */
741 encode_sleb128 (intptr_t value
, uint8_t *buf
, uint8_t **endbuf
)
744 int negative
= (value
< 0);
745 unsigned int size
= sizeof (intptr_t) * 8;
753 /* the following is unnecessary if the
754 * implementation of >>= uses an arithmetic rather
755 * than logical shift for a signed left operand
759 value
|= - ((intptr_t) 1 <<(size
- 7));
761 /* sign bit of byte is second high order bit (0x40) */
762 if ((value
== 0 && !(byte
& 0x40)) ||
763 (value
== -1 && (byte
& 0x40)))
775 emit_byte (LogBuffer
*logbuffer
, int value
)
777 logbuffer
->cursor
[0] = value
;
780 g_assert (logbuffer
->cursor
<= logbuffer
->buf_end
&& "Why are we writing past the buffer end?");
784 emit_value (LogBuffer
*logbuffer
, int value
)
786 encode_uleb128 (value
, logbuffer
->cursor
, &logbuffer
->cursor
);
788 g_assert (logbuffer
->cursor
<= logbuffer
->buf_end
&& "Why are we writing past the buffer end?");
792 emit_time (LogBuffer
*logbuffer
, uint64_t value
)
794 uint64_t tdiff
= value
- logbuffer
->last_time
;
795 encode_uleb128 (tdiff
, logbuffer
->cursor
, &logbuffer
->cursor
);
796 logbuffer
->last_time
= value
;
798 g_assert (logbuffer
->cursor
<= logbuffer
->buf_end
&& "Why are we writing past the buffer end?");
802 emit_event_time (LogBuffer
*logbuffer
, int event
, uint64_t time
)
804 emit_byte (logbuffer
, event
);
805 emit_time (logbuffer
, time
);
809 emit_event (LogBuffer
*logbuffer
, int event
)
811 emit_event_time (logbuffer
, event
, current_time ());
815 emit_svalue (LogBuffer
*logbuffer
, int64_t value
)
817 encode_sleb128 (value
, logbuffer
->cursor
, &logbuffer
->cursor
);
819 g_assert (logbuffer
->cursor
<= logbuffer
->buf_end
&& "Why are we writing past the buffer end?");
823 emit_uvalue (LogBuffer
*logbuffer
, uint64_t value
)
825 encode_uleb128 (value
, logbuffer
->cursor
, &logbuffer
->cursor
);
827 g_assert (logbuffer
->cursor
<= logbuffer
->buf_end
&& "Why are we writing past the buffer end?");
831 emit_ptr (LogBuffer
*logbuffer
, const void *ptr
)
833 if (!logbuffer
->has_ptr_base
) {
834 logbuffer
->ptr_base
= (uintptr_t) ptr
;
835 logbuffer
->has_ptr_base
= TRUE
;
838 emit_svalue (logbuffer
, (intptr_t) ptr
- logbuffer
->ptr_base
);
840 g_assert (logbuffer
->cursor
<= logbuffer
->buf_end
&& "Why are we writing past the buffer end?");
844 emit_method_inner (LogBuffer
*logbuffer
, void *method
)
846 if (!logbuffer
->method_base
) {
847 logbuffer
->method_base
= (intptr_t) method
;
848 logbuffer
->last_method
= (intptr_t) method
;
851 encode_sleb128 ((intptr_t) ((char *) method
- (char *) logbuffer
->last_method
), logbuffer
->cursor
, &logbuffer
->cursor
);
852 logbuffer
->last_method
= (intptr_t) method
;
854 g_assert (logbuffer
->cursor
<= logbuffer
->buf_end
&& "Why are we writing past the buffer end?");
857 // The reader lock must be held.
859 register_method_local (MonoMethod
*method
, MonoJitInfo
*ji
)
861 MonoProfilerThread
*thread
= get_thread ();
863 if (!mono_conc_hashtable_lookup (log_profiler
.method_table
, method
)) {
864 MethodInfo
*info
= (MethodInfo
*) g_malloc (sizeof (MethodInfo
));
866 info
->method
= method
;
868 info
->time
= current_time ();
870 GPtrArray
*arr
= thread
->methods
? thread
->methods
: (thread
->methods
= g_ptr_array_new ());
871 g_ptr_array_add (arr
, info
);
876 emit_method (LogBuffer
*logbuffer
, MonoMethod
*method
)
878 register_method_local (method
, NULL
);
879 emit_method_inner (logbuffer
, method
);
883 emit_obj (LogBuffer
*logbuffer
, void *ptr
)
885 if (!logbuffer
->obj_base
)
886 logbuffer
->obj_base
= (uintptr_t) ptr
>> 3;
888 emit_svalue (logbuffer
, ((uintptr_t) ptr
>> 3) - logbuffer
->obj_base
);
890 g_assert (logbuffer
->cursor
<= logbuffer
->buf_end
&& "Why are we writing past the buffer end?");
894 emit_string (LogBuffer
*logbuffer
, const char *str
, size_t size
)
898 for (; i
< size
; i
++) {
901 emit_byte (logbuffer
, str
[i
]);
904 emit_byte (logbuffer
, '\0');
908 emit_double (LogBuffer
*logbuffer
, double value
)
911 unsigned char buffer
[8];
912 memcpy (buffer
, &value
, 8);
913 #if G_BYTE_ORDER == G_BIG_ENDIAN
914 for (i
= 7; i
>= 0; i
--)
916 for (i
= 0; i
< 8; i
++)
918 emit_byte (logbuffer
, buffer
[i
]);
922 write_int16 (char *buf
, int32_t value
)
925 for (i
= 0; i
< 2; ++i
) {
933 write_int32 (char *buf
, int32_t value
)
936 for (i
= 0; i
< 4; ++i
) {
944 write_int64 (char *buf
, int64_t value
)
947 for (i
= 0; i
< 8; ++i
) {
955 write_header_string (char *p
, const char *str
)
957 size_t len
= strlen (str
) + 1;
959 p
= write_int32 (p
, len
);
968 const char *args
= log_profiler
.args
;
969 const char *arch
= mono_config_get_cpu ();
970 const char *os
= mono_config_get_os ();
972 char *hbuf
= g_malloc (
973 sizeof (gint32
) /* header id */ +
974 sizeof (gint8
) /* major version */ +
975 sizeof (gint8
) /* minor version */ +
976 sizeof (gint8
) /* data version */ +
977 sizeof (gint8
) /* word size */ +
978 sizeof (gint64
) /* startup time */ +
979 sizeof (gint32
) /* timer overhead */ +
980 sizeof (gint32
) /* flags */ +
981 sizeof (gint32
) /* process id */ +
982 sizeof (gint16
) /* command port */ +
983 sizeof (gint32
) + strlen (args
) + 1 /* arguments */ +
984 sizeof (gint32
) + strlen (arch
) + 1 /* architecture */ +
985 sizeof (gint32
) + strlen (os
) + 1 /* operating system */
989 p
= write_int32 (p
, LOG_HEADER_ID
);
990 *p
++ = LOG_VERSION_MAJOR
;
991 *p
++ = LOG_VERSION_MINOR
;
992 *p
++ = LOG_DATA_VERSION
;
993 *p
++ = sizeof (void *);
994 p
= write_int64 (p
, ((uint64_t) time (NULL
)) * 1000);
995 p
= write_int32 (p
, log_profiler
.timer_overhead
);
996 p
= write_int32 (p
, 0); /* flags */
997 p
= write_int32 (p
, process_id ());
998 p
= write_int16 (p
, log_profiler
.command_port
);
999 p
= write_header_string (p
, args
);
1000 p
= write_header_string (p
, arch
);
1001 p
= write_header_string (p
, os
);
1003 #if defined (HAVE_SYS_ZLIB)
1004 if (log_profiler
.gzfile
) {
1005 gzwrite (log_profiler
.gzfile
, hbuf
, p
- hbuf
);
1009 fwrite (hbuf
, p
- hbuf
, 1, log_profiler
.file
);
1010 fflush (log_profiler
.file
);
1017 * Must be called with the reader lock held if thread is the current thread, or
1018 * the exclusive lock if thread is a different thread. However, if thread is
1019 * the current thread, and init_thread () was called with add_to_lls = FALSE,
1020 * then no locking is necessary.
1023 send_buffer (MonoProfilerThread
*thread
)
1025 WriterQueueEntry
*entry
= mono_lock_free_alloc (&log_profiler
.writer_entry_allocator
);
1026 entry
->methods
= thread
->methods
;
1027 entry
->buffer
= thread
->buffer
;
1029 mono_lock_free_queue_node_init (&entry
->node
, FALSE
);
1031 mono_lock_free_queue_enqueue (&log_profiler
.writer_queue
, &entry
->node
);
1032 mono_os_sem_post (&log_profiler
.writer_queue_sem
);
1036 free_thread (gpointer p
)
1038 MonoProfilerThread
*thread
= p
;
1040 if (!thread
->ended
) {
1042 * The thread is being cleaned up by the main thread during
1043 * shutdown. This typically happens for internal runtime
1044 * threads. We need to synthesize a thread end event.
1047 mono_atomic_inc_i32 (&thread_ends_ctr
);
1049 LogBuffer
*buf
= ensure_logbuf_unsafe (thread
,
1050 EVENT_SIZE
/* event */ +
1051 BYTE_SIZE
/* type */ +
1052 LEB128_SIZE
/* tid */
1055 emit_event (buf
, TYPE_END_UNLOAD
| TYPE_METADATA
);
1056 emit_byte (buf
, TYPE_THREAD
);
1057 emit_ptr (buf
, (void *) thread
->node
.key
);
1060 send_buffer (thread
);
1066 remove_thread (MonoProfilerThread
*thread
)
1068 MonoThreadHazardPointers
*hp
= mono_hazard_pointer_get ();
1070 if (mono_lls_remove (&log_profiler
.profiler_thread_list
, hp
, &thread
->node
))
1071 mono_thread_hazardous_try_free (thread
, free_thread
);
1073 clear_hazard_pointers (hp
);
1077 dump_buffer (LogBuffer
*buf
)
1083 dump_buffer (buf
->next
);
1085 if (buf
->cursor
- buf
->buf
) {
1086 p
= write_int32 (p
, BUF_ID
);
1087 p
= write_int32 (p
, buf
->cursor
- buf
->buf
);
1088 p
= write_int64 (p
, buf
->time_base
);
1089 p
= write_int64 (p
, buf
->ptr_base
);
1090 p
= write_int64 (p
, buf
->obj_base
);
1091 p
= write_int64 (p
, buf
->thread_id
);
1092 p
= write_int64 (p
, buf
->method_base
);
1094 #if defined (HAVE_SYS_ZLIB)
1095 if (log_profiler
.gzfile
) {
1096 gzwrite (log_profiler
.gzfile
, hbuf
, p
- hbuf
);
1097 gzwrite (log_profiler
.gzfile
, buf
->buf
, buf
->cursor
- buf
->buf
);
1101 fwrite (hbuf
, p
- hbuf
, 1, log_profiler
.file
);
1102 fwrite (buf
->buf
, buf
->cursor
- buf
->buf
, 1, log_profiler
.file
);
1103 fflush (log_profiler
.file
);
1107 free_buffer (buf
, buf
->size
);
1111 dump_buffer_threadless (LogBuffer
*buf
)
1113 for (LogBuffer
*iter
= buf
; iter
; iter
= iter
->next
)
1114 iter
->thread_id
= 0;
1119 // Only valid if init_thread () was called with add_to_lls = FALSE.
1121 send_log_unsafe (gboolean if_needed
)
1123 MonoProfilerThread
*thread
= get_thread ();
1125 if (!if_needed
|| (if_needed
&& thread
->buffer
->next
)) {
1126 if (!thread
->attached
)
1127 for (LogBuffer
*iter
= thread
->buffer
; iter
; iter
= iter
->next
)
1128 iter
->thread_id
= 0;
1130 send_buffer (thread
);
1131 init_buffer_state (thread
);
1135 // Assumes that the exclusive lock is held.
1137 sync_point_flush (void)
1139 g_assert (mono_atomic_load_i32 (&log_profiler
.buffer_lock_state
) == get_thread ()->small_id
<< 16 && "Why don't we hold the exclusive lock?");
1141 MONO_LLS_FOREACH_SAFE (&log_profiler
.profiler_thread_list
, MonoProfilerThread
, thread
) {
1142 g_assert (thread
->attached
&& "Why is a thread in the LLS not attached?");
1144 send_buffer (thread
);
1145 init_buffer_state (thread
);
1146 } MONO_LLS_FOREACH_SAFE_END
1149 // Assumes that the exclusive lock is held.
1151 sync_point_mark (MonoProfilerSyncPointType type
)
1153 g_assert (mono_atomic_load_i32 (&log_profiler
.buffer_lock_state
) == get_thread ()->small_id
<< 16 && "Why don't we hold the exclusive lock?");
1155 ENTER_LOG (&sync_points_ctr
, logbuffer
,
1156 EVENT_SIZE
/* event */ +
1157 BYTE_SIZE
/* type */
1160 emit_event (logbuffer
, TYPE_META
| TYPE_SYNC_POINT
);
1161 emit_byte (logbuffer
, type
);
1163 EXIT_LOG_EXPLICIT (NO_SEND
);
1165 send_log_unsafe (FALSE
);
1168 // Assumes that the exclusive lock is held.
1170 sync_point (MonoProfilerSyncPointType type
)
1172 sync_point_flush ();
1173 sync_point_mark (type
);
1177 gc_reference (MonoObject
*obj
, MonoClass
*klass
, uintptr_t size
, uintptr_t num
, MonoObject
**refs
, uintptr_t *offsets
, void *data
)
1179 /* account for object alignment in the heap */
1183 ENTER_LOG (&heap_objects_ctr
, logbuffer
,
1184 EVENT_SIZE
/* event */ +
1185 LEB128_SIZE
/* obj */ +
1186 LEB128_SIZE
/* vtable */ +
1187 LEB128_SIZE
/* size */ +
1188 LEB128_SIZE
/* num */ +
1190 LEB128_SIZE
/* offset */ +
1191 LEB128_SIZE
/* ref */
1195 emit_event (logbuffer
, TYPE_HEAP_OBJECT
| TYPE_HEAP
);
1196 emit_obj (logbuffer
, obj
);
1197 emit_ptr (logbuffer
, mono_object_get_vtable (obj
));
1198 emit_value (logbuffer
, size
);
1199 emit_value (logbuffer
, num
);
1201 uintptr_t last_offset
= 0;
1203 for (int i
= 0; i
< num
; ++i
) {
1204 emit_value (logbuffer
, offsets
[i
] - last_offset
);
1205 last_offset
= offsets
[i
];
1206 emit_obj (logbuffer
, refs
[i
]);
1215 gc_roots (MonoProfiler
*prof
, uint64_t num
, const mono_byte
*const *addresses
, MonoObject
*const *objects
)
1217 ENTER_LOG (&heap_roots_ctr
, logbuffer
,
1218 EVENT_SIZE
/* event */ +
1219 LEB128_SIZE
/* num */ +
1221 LEB128_SIZE
/* address */ +
1222 LEB128_SIZE
/* object */
1226 emit_event (logbuffer
, TYPE_HEAP_ROOT
| TYPE_HEAP
);
1227 emit_value (logbuffer
, num
);
1229 for (int i
= 0; i
< num
; ++i
) {
1230 emit_ptr (logbuffer
, addresses
[i
]);
1231 emit_obj (logbuffer
, objects
[i
]);
1238 gc_root_register (MonoProfiler
*prof
, const mono_byte
*start
, size_t size
, MonoGCRootSource source
, const void *key
, const char *name
)
1240 // We don't write raw domain/context pointers in metadata events.
1242 case MONO_ROOT_SOURCE_DOMAIN
:
1244 key
= (void *)(uintptr_t) mono_domain_get_id ((MonoDomain
*) key
);
1246 case MONO_ROOT_SOURCE_CONTEXT_STATIC
:
1247 key
= (void *)(uintptr_t) mono_context_get_id ((MonoAppContext
*) key
);
1253 int name_len
= name
? strlen (name
) + 1 : 0;
1255 ENTER_LOG (&heap_roots_ctr
, logbuffer
,
1256 EVENT_SIZE
/* event */ +
1257 LEB128_SIZE
/* start */ +
1258 LEB128_SIZE
/* size */ +
1259 BYTE_SIZE
/* source */ +
1260 LEB128_SIZE
/* key */ +
1264 emit_event (logbuffer
, TYPE_HEAP_ROOT_REGISTER
| TYPE_HEAP
);
1265 emit_ptr (logbuffer
, start
);
1266 emit_uvalue (logbuffer
, size
);
1267 emit_byte (logbuffer
, source
);
1268 emit_ptr (logbuffer
, key
);
1269 emit_string (logbuffer
, name
, name_len
);
1275 gc_root_deregister (MonoProfiler
*prof
, const mono_byte
*start
)
1277 ENTER_LOG (&heap_roots_ctr
, logbuffer
,
1278 EVENT_SIZE
/* event */ +
1279 LEB128_SIZE
/* start */
1282 emit_event (logbuffer
, TYPE_HEAP_ROOT_UNREGISTER
| TYPE_HEAP
);
1283 emit_ptr (logbuffer
, start
);
1289 trigger_heapshot (void)
1291 // Rely on the finalization callback triggering a GC.
1292 mono_atomic_store_i32 (&log_profiler
.heapshot_requested
, 1);
1293 mono_gc_finalize_notify ();
1297 process_heapshot (void)
1299 if (mono_atomic_load_i32 (&log_profiler
.heapshot_requested
))
1300 mono_gc_collect (mono_gc_max_generation ());
1303 #define ALL_GC_EVENTS_MASK (PROFLOG_GC_EVENTS | PROFLOG_GC_MOVE_EVENTS | PROFLOG_GC_ROOT_EVENTS)
1306 gc_event (MonoProfiler
*profiler
, MonoProfilerGCEvent ev
, uint32_t generation
, gboolean is_serial
)
1308 gboolean is_major
= generation
== mono_gc_max_generation ();
1310 if (ENABLED (PROFLOG_GC_EVENTS
)) {
1311 ENTER_LOG (&gc_events_ctr
, logbuffer
,
1312 EVENT_SIZE
/* event */ +
1313 BYTE_SIZE
/* gc event */ +
1314 BYTE_SIZE
/* generation */
1317 emit_event (logbuffer
, TYPE_GC_EVENT
| TYPE_GC
);
1318 emit_byte (logbuffer
, ev
);
1319 emit_byte (logbuffer
, generation
);
1325 case MONO_GC_EVENT_PRE_STOP_WORLD_LOCKED
:
1326 switch (log_config
.hs_mode
) {
1327 case MONO_PROFILER_HEAPSHOT_NONE
:
1328 log_profiler
.do_heap_walk
= FALSE
;
1330 case MONO_PROFILER_HEAPSHOT_MAJOR
:
1331 log_profiler
.do_heap_walk
= is_major
;
1333 case MONO_PROFILER_HEAPSHOT_ON_DEMAND
:
1336 case MONO_PROFILER_HEAPSHOT_X_GC
:
1337 log_profiler
.do_heap_walk
= !(log_profiler
.gc_count
% log_config
.hs_freq_gc
);
1339 case MONO_PROFILER_HEAPSHOT_X_MS
:
1340 log_profiler
.do_heap_walk
= (current_time () - log_profiler
.last_hs_time
) / 1000 * 1000 >= log_config
.hs_freq_ms
;
1343 g_assert_not_reached ();
1347 * heapshot_requested is set either because a heapshot was triggered
1348 * manually (through the API or command server) or because we're doing
1349 * a shutdown heapshot. Either way, a manually triggered heapshot
1350 * overrides any decision we made in the switch above.
1352 if (is_major
&& is_serial
&& mono_atomic_load_i32 (&log_profiler
.heapshot_requested
)) {
1353 log_profiler
.do_heap_walk
= TRUE
;
1354 } else if (log_profiler
.do_heap_walk
&& (!is_major
|| !is_serial
)) {
1355 /* Do a heap walk later, when we get invoked from the finalizer in serial mode */
1356 trigger_heapshot ();
1357 log_profiler
.do_heap_walk
= FALSE
;
1360 if (ENABLED (PROFLOG_GC_ROOT_EVENTS
) &&
1361 (log_config
.always_do_root_report
|| log_profiler
.do_heap_walk
))
1362 mono_profiler_set_gc_roots_callback (log_profiler
.handle
, gc_roots
);
1365 * Ensure that no thread can be in the middle of writing to
1366 * a buffer when the world stops...
1368 buffer_lock_excl ();
1371 case MONO_GC_EVENT_POST_STOP_WORLD
:
1373 * ... So that we now have a consistent view of all buffers.
1374 * This allows us to flush them. We need to do this because
1375 * they may contain object allocation events that need to be
1376 * committed to the log file before any object move events
1377 * that will be produced during this GC.
1379 if (ENABLED (ALL_GC_EVENTS_MASK
))
1380 sync_point (SYNC_POINT_WORLD_STOP
);
1382 // Surround heapshots with HEAP_START/HEAP_END events.
1383 if (log_profiler
.do_heap_walk
) {
1384 ENTER_LOG (&heap_starts_ctr
, logbuffer
,
1385 EVENT_SIZE
/* event */
1388 emit_event (logbuffer
, TYPE_HEAP_START
| TYPE_HEAP
);
1394 case MONO_GC_EVENT_START
:
1396 log_profiler
.gc_count
++;
1399 case MONO_GC_EVENT_PRE_START_WORLD
:
1400 mono_profiler_set_gc_roots_callback (log_profiler
.handle
, NULL
);
1402 if (log_profiler
.do_heap_walk
) {
1403 g_assert (is_major
&& is_serial
);
1404 mono_gc_walk_heap (0, gc_reference
, NULL
);
1406 ENTER_LOG (&heap_ends_ctr
, logbuffer
,
1407 EVENT_SIZE
/* event */
1410 emit_event (logbuffer
, TYPE_HEAP_END
| TYPE_HEAP
);
1414 log_profiler
.do_heap_walk
= FALSE
;
1415 log_profiler
.last_hs_time
= current_time ();
1417 mono_atomic_store_i32 (&log_profiler
.heapshot_requested
, 0);
1421 * Similarly, we must now make sure that any object moves
1422 * written to the GC thread's buffer are flushed. Otherwise,
1423 * object allocation events for certain addresses could come
1424 * after the move events that made those addresses available.
1426 if (ENABLED (ALL_GC_EVENTS_MASK
))
1427 sync_point_mark (SYNC_POINT_WORLD_START
);
1430 case MONO_GC_EVENT_POST_START_WORLD_UNLOCKED
:
1432 * Finally, it is safe to allow other threads to write to
1433 * their buffers again.
1435 buffer_unlock_excl ();
1444 gc_resize (MonoProfiler
*profiler
, uintptr_t new_size
)
1446 ENTER_LOG (&gc_resizes_ctr
, logbuffer
,
1447 EVENT_SIZE
/* event */ +
1448 LEB128_SIZE
/* new size */
1451 emit_event (logbuffer
, TYPE_GC_RESIZE
| TYPE_GC
);
1452 emit_uvalue (logbuffer
, new_size
);
1459 MonoMethod
* methods
[MAX_FRAMES
];
1460 int32_t il_offsets
[MAX_FRAMES
];
1461 int32_t native_offsets
[MAX_FRAMES
];
1465 walk_stack (MonoMethod
*method
, int32_t native_offset
, int32_t il_offset
, mono_bool managed
, void* data
)
1467 FrameData
*frame
= (FrameData
*)data
;
1468 if (method
&& frame
->count
< log_config
.num_frames
) {
1469 frame
->il_offsets
[frame
->count
] = il_offset
;
1470 frame
->native_offsets
[frame
->count
] = native_offset
;
1471 frame
->methods
[frame
->count
++] = method
;
1473 return frame
->count
== log_config
.num_frames
;
1477 * a note about stack walks: they can cause more profiler events to fire,
1478 * so we need to make sure they don't happen after we started emitting an
1479 * event, hence the collect_bt/emit_bt split.
1482 collect_bt (FrameData
*data
)
1485 mono_stack_walk_no_il (walk_stack
, data
);
1489 emit_bt (LogBuffer
*logbuffer
, FrameData
*data
)
1491 emit_value (logbuffer
, data
->count
);
1494 emit_method (logbuffer
, data
->methods
[--data
->count
]);
1498 gc_alloc (MonoProfiler
*prof
, MonoObject
*obj
)
1500 int do_bt
= (!log_config
.enter_leave
&& mono_atomic_load_i32 (&log_profiler
.runtime_inited
) && log_config
.num_frames
) ? TYPE_ALLOC_BT
: 0;
1502 uintptr_t len
= mono_object_get_size (obj
);
1503 /* account for object alignment in the heap */
1510 ENTER_LOG (&gc_allocs_ctr
, logbuffer
,
1511 EVENT_SIZE
/* event */ +
1512 LEB128_SIZE
/* vtable */ +
1513 LEB128_SIZE
/* obj */ +
1514 LEB128_SIZE
/* size */ +
1516 LEB128_SIZE
/* count */ +
1518 LEB128_SIZE
/* method */
1523 emit_event (logbuffer
, do_bt
| TYPE_ALLOC
);
1524 emit_ptr (logbuffer
, mono_object_get_vtable (obj
));
1525 emit_obj (logbuffer
, obj
);
1526 emit_value (logbuffer
, len
);
1529 emit_bt (logbuffer
, &data
);
1535 gc_moves (MonoProfiler
*prof
, MonoObject
*const *objects
, uint64_t num
)
1537 ENTER_LOG (&gc_moves_ctr
, logbuffer
,
1538 EVENT_SIZE
/* event */ +
1539 LEB128_SIZE
/* num */ +
1541 LEB128_SIZE
/* object */
1545 emit_event (logbuffer
, TYPE_GC_MOVE
| TYPE_GC
);
1546 emit_value (logbuffer
, num
);
1548 for (int i
= 0; i
< num
; ++i
)
1549 emit_obj (logbuffer
, objects
[i
]);
1555 gc_handle (MonoProfiler
*prof
, int op
, MonoGCHandleType type
, uint32_t handle
, MonoObject
*obj
)
1557 int do_bt
= !log_config
.enter_leave
&& mono_atomic_load_i32 (&log_profiler
.runtime_inited
) && log_config
.num_frames
;
1563 gint32
*ctr
= op
== MONO_PROFILER_GC_HANDLE_CREATED
? &gc_handle_creations_ctr
: &gc_handle_deletions_ctr
;
1565 ENTER_LOG (ctr
, logbuffer
,
1566 EVENT_SIZE
/* event */ +
1567 LEB128_SIZE
/* type */ +
1568 LEB128_SIZE
/* handle */ +
1569 (op
== MONO_PROFILER_GC_HANDLE_CREATED
? (
1570 LEB128_SIZE
/* obj */
1573 LEB128_SIZE
/* count */ +
1575 LEB128_SIZE
/* method */
1580 if (op
== MONO_PROFILER_GC_HANDLE_CREATED
)
1581 emit_event (logbuffer
, (do_bt
? TYPE_GC_HANDLE_CREATED_BT
: TYPE_GC_HANDLE_CREATED
) | TYPE_GC
);
1582 else if (op
== MONO_PROFILER_GC_HANDLE_DESTROYED
)
1583 emit_event (logbuffer
, (do_bt
? TYPE_GC_HANDLE_DESTROYED_BT
: TYPE_GC_HANDLE_DESTROYED
) | TYPE_GC
);
1585 g_assert_not_reached ();
1587 emit_value (logbuffer
, type
);
1588 emit_value (logbuffer
, handle
);
1590 if (op
== MONO_PROFILER_GC_HANDLE_CREATED
)
1591 emit_obj (logbuffer
, obj
);
1594 emit_bt (logbuffer
, &data
);
1600 gc_handle_created (MonoProfiler
*prof
, uint32_t handle
, MonoGCHandleType type
, MonoObject
*obj
)
1602 gc_handle (prof
, MONO_PROFILER_GC_HANDLE_CREATED
, type
, handle
, obj
);
1606 gc_handle_deleted (MonoProfiler
*prof
, uint32_t handle
, MonoGCHandleType type
)
1608 gc_handle (prof
, MONO_PROFILER_GC_HANDLE_DESTROYED
, type
, handle
, NULL
);
1612 finalize_begin (MonoProfiler
*prof
)
1614 ENTER_LOG (&finalize_begins_ctr
, buf
,
1615 EVENT_SIZE
/* event */
1618 emit_event (buf
, TYPE_GC_FINALIZE_START
| TYPE_GC
);
1624 finalize_end (MonoProfiler
*prof
)
1626 process_heapshot ();
1628 if (ENABLED (PROFLOG_GC_FINALIZATION_EVENTS
)) {
1629 ENTER_LOG (&finalize_ends_ctr
, buf
,
1630 EVENT_SIZE
/* event */
1633 emit_event (buf
, TYPE_GC_FINALIZE_END
| TYPE_GC
);
1640 finalize_object_begin (MonoProfiler
*prof
, MonoObject
*obj
)
1642 ENTER_LOG (&finalize_object_begins_ctr
, buf
,
1643 EVENT_SIZE
/* event */ +
1644 LEB128_SIZE
/* obj */
1647 emit_event (buf
, TYPE_GC_FINALIZE_OBJECT_START
| TYPE_GC
);
1648 emit_obj (buf
, obj
);
1654 finalize_object_end (MonoProfiler
*prof
, MonoObject
*obj
)
1656 ENTER_LOG (&finalize_object_ends_ctr
, buf
,
1657 EVENT_SIZE
/* event */ +
1658 LEB128_SIZE
/* obj */
1661 emit_event (buf
, TYPE_GC_FINALIZE_OBJECT_END
| TYPE_GC
);
1662 emit_obj (buf
, obj
);
1668 push_nesting (char *p
, MonoClass
*klass
)
1673 nesting
= mono_class_get_nesting_type (klass
);
1675 p
= push_nesting (p
, nesting
);
1679 name
= mono_class_get_name (klass
);
1680 nspace
= mono_class_get_namespace (klass
);
1683 p
+= strlen (nspace
);
1693 type_name (MonoClass
*klass
)
1697 push_nesting (buf
, klass
);
1698 p
= (char *) g_malloc (strlen (buf
) + 1);
1704 image_loaded (MonoProfiler
*prof
, MonoImage
*image
)
1706 const char *name
= mono_image_get_filename (image
);
1707 int nlen
= strlen (name
) + 1;
1709 ENTER_LOG (&image_loads_ctr
, logbuffer
,
1710 EVENT_SIZE
/* event */ +
1711 BYTE_SIZE
/* type */ +
1712 LEB128_SIZE
/* image */ +
1716 emit_event (logbuffer
, TYPE_END_LOAD
| TYPE_METADATA
);
1717 emit_byte (logbuffer
, TYPE_IMAGE
);
1718 emit_ptr (logbuffer
, image
);
1719 memcpy (logbuffer
->cursor
, name
, nlen
);
1720 logbuffer
->cursor
+= nlen
;
1726 image_unloaded (MonoProfiler
*prof
, MonoImage
*image
)
1728 const char *name
= mono_image_get_filename (image
);
1729 int nlen
= strlen (name
) + 1;
1731 ENTER_LOG (&image_unloads_ctr
, logbuffer
,
1732 EVENT_SIZE
/* event */ +
1733 BYTE_SIZE
/* type */ +
1734 LEB128_SIZE
/* image */ +
1738 emit_event (logbuffer
, TYPE_END_UNLOAD
| TYPE_METADATA
);
1739 emit_byte (logbuffer
, TYPE_IMAGE
);
1740 emit_ptr (logbuffer
, image
);
1741 memcpy (logbuffer
->cursor
, name
, nlen
);
1742 logbuffer
->cursor
+= nlen
;
1748 assembly_loaded (MonoProfiler
*prof
, MonoAssembly
*assembly
)
1750 char *name
= mono_stringify_assembly_name (mono_assembly_get_name (assembly
));
1751 int nlen
= strlen (name
) + 1;
1752 MonoImage
*image
= mono_assembly_get_image (assembly
);
1754 ENTER_LOG (&assembly_loads_ctr
, logbuffer
,
1755 EVENT_SIZE
/* event */ +
1756 BYTE_SIZE
/* type */ +
1757 LEB128_SIZE
/* assembly */ +
1758 LEB128_SIZE
/* image */ +
1762 emit_event (logbuffer
, TYPE_END_LOAD
| TYPE_METADATA
);
1763 emit_byte (logbuffer
, TYPE_ASSEMBLY
);
1764 emit_ptr (logbuffer
, assembly
);
1765 emit_ptr (logbuffer
, image
);
1766 memcpy (logbuffer
->cursor
, name
, nlen
);
1767 logbuffer
->cursor
+= nlen
;
1775 assembly_unloaded (MonoProfiler
*prof
, MonoAssembly
*assembly
)
1777 char *name
= mono_stringify_assembly_name (mono_assembly_get_name (assembly
));
1778 int nlen
= strlen (name
) + 1;
1779 MonoImage
*image
= mono_assembly_get_image (assembly
);
1781 ENTER_LOG (&assembly_unloads_ctr
, logbuffer
,
1782 EVENT_SIZE
/* event */ +
1783 BYTE_SIZE
/* type */ +
1784 LEB128_SIZE
/* assembly */ +
1785 LEB128_SIZE
/* image */ +
1789 emit_event (logbuffer
, TYPE_END_UNLOAD
| TYPE_METADATA
);
1790 emit_byte (logbuffer
, TYPE_ASSEMBLY
);
1791 emit_ptr (logbuffer
, assembly
);
1792 emit_ptr (logbuffer
, image
);
1793 memcpy (logbuffer
->cursor
, name
, nlen
);
1794 logbuffer
->cursor
+= nlen
;
1802 class_loaded (MonoProfiler
*prof
, MonoClass
*klass
)
1806 if (mono_atomic_load_i32 (&log_profiler
.runtime_inited
))
1807 name
= mono_type_get_name (mono_class_get_type (klass
));
1809 name
= type_name (klass
);
1811 int nlen
= strlen (name
) + 1;
1812 MonoImage
*image
= mono_class_get_image (klass
);
1814 ENTER_LOG (&class_loads_ctr
, logbuffer
,
1815 EVENT_SIZE
/* event */ +
1816 BYTE_SIZE
/* type */ +
1817 LEB128_SIZE
/* klass */ +
1818 LEB128_SIZE
/* image */ +
1822 emit_event (logbuffer
, TYPE_END_LOAD
| TYPE_METADATA
);
1823 emit_byte (logbuffer
, TYPE_CLASS
);
1824 emit_ptr (logbuffer
, klass
);
1825 emit_ptr (logbuffer
, image
);
1826 memcpy (logbuffer
->cursor
, name
, nlen
);
1827 logbuffer
->cursor
+= nlen
;
1831 if (mono_atomic_load_i32 (&log_profiler
.runtime_inited
))
1838 vtable_loaded (MonoProfiler
*prof
, MonoVTable
*vtable
)
1840 MonoClass
*klass
= mono_vtable_class (vtable
);
1841 MonoDomain
*domain
= mono_vtable_domain (vtable
);
1842 uint32_t domain_id
= domain
? mono_domain_get_id (domain
) : 0;
1844 ENTER_LOG (&vtable_loads_ctr
, logbuffer
,
1845 EVENT_SIZE
/* event */ +
1846 BYTE_SIZE
/* type */ +
1847 LEB128_SIZE
/* vtable */ +
1848 LEB128_SIZE
/* domain id */ +
1849 LEB128_SIZE
/* klass */
1852 emit_event (logbuffer
, TYPE_END_LOAD
| TYPE_METADATA
);
1853 emit_byte (logbuffer
, TYPE_VTABLE
);
1854 emit_ptr (logbuffer
, vtable
);
1855 emit_ptr (logbuffer
, (void *)(uintptr_t) domain_id
);
1856 emit_ptr (logbuffer
, klass
);
1862 method_enter (MonoProfiler
*prof
, MonoMethod
*method
, MonoProfilerCallContext
*ctx
)
1864 if (get_thread ()->call_depth
++ <= log_config
.max_call_depth
) {
1865 ENTER_LOG (&method_entries_ctr
, logbuffer
,
1866 EVENT_SIZE
/* event */ +
1867 LEB128_SIZE
/* method */
1870 emit_event (logbuffer
, TYPE_ENTER
| TYPE_METHOD
);
1871 emit_method (logbuffer
, method
);
1878 method_leave (MonoProfiler
*prof
, MonoMethod
*method
, MonoProfilerCallContext
*ctx
)
1880 if (--get_thread ()->call_depth
<= log_config
.max_call_depth
) {
1881 ENTER_LOG (&method_exits_ctr
, logbuffer
,
1882 EVENT_SIZE
/* event */ +
1883 LEB128_SIZE
/* method */
1886 emit_event (logbuffer
, TYPE_LEAVE
| TYPE_METHOD
);
1887 emit_method (logbuffer
, method
);
1894 tail_call (MonoProfiler
*prof
, MonoMethod
*method
, MonoMethod
*target
)
1896 method_leave (prof
, method
, NULL
);
1900 method_exc_leave (MonoProfiler
*prof
, MonoMethod
*method
, MonoObject
*exc
)
1902 if (--get_thread ()->call_depth
<= log_config
.max_call_depth
) {
1903 ENTER_LOG (&method_exception_exits_ctr
, logbuffer
,
1904 EVENT_SIZE
/* event */ +
1905 LEB128_SIZE
/* method */
1908 emit_event (logbuffer
, TYPE_EXC_LEAVE
| TYPE_METHOD
);
1909 emit_method (logbuffer
, method
);
1915 static MonoProfilerCallInstrumentationFlags
1916 method_filter (MonoProfiler
*prof
, MonoMethod
*method
)
1918 if (log_config
.callspec
.len
> 0 &&
1919 !mono_callspec_eval (method
, &log_config
.callspec
))
1920 return MONO_PROFILER_CALL_INSTRUMENTATION_NONE
;
1922 return MONO_PROFILER_CALL_INSTRUMENTATION_ENTER
|
1923 MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE
|
1924 MONO_PROFILER_CALL_INSTRUMENTATION_TAIL_CALL
|
1925 MONO_PROFILER_CALL_INSTRUMENTATION_EXCEPTION_LEAVE
;
1929 method_jitted (MonoProfiler
*prof
, MonoMethod
*method
, MonoJitInfo
*ji
)
1933 register_method_local (method
, ji
);
1939 code_buffer_new (MonoProfiler
*prof
, const mono_byte
*buffer
, uint64_t size
, MonoProfilerCodeBufferType type
, const void *data
)
1944 if (type
== MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE
) {
1945 name
= (const char *) data
;
1946 nlen
= strlen (name
) + 1;
1952 ENTER_LOG (&code_buffers_ctr
, logbuffer
,
1953 EVENT_SIZE
/* event */ +
1954 BYTE_SIZE
/* type */ +
1955 LEB128_SIZE
/* buffer */ +
1956 LEB128_SIZE
/* size */ +
1962 emit_event (logbuffer
, TYPE_JITHELPER
| TYPE_RUNTIME
);
1963 emit_byte (logbuffer
, type
);
1964 emit_ptr (logbuffer
, buffer
);
1965 emit_value (logbuffer
, size
);
1968 memcpy (logbuffer
->cursor
, name
, nlen
);
1969 logbuffer
->cursor
+= nlen
;
1976 throw_exc (MonoProfiler
*prof
, MonoObject
*object
)
1978 int do_bt
= (!log_config
.enter_leave
&& mono_atomic_load_i32 (&log_profiler
.runtime_inited
) && log_config
.num_frames
) ? TYPE_THROW_BT
: 0;
1984 ENTER_LOG (&exception_throws_ctr
, logbuffer
,
1985 EVENT_SIZE
/* event */ +
1986 LEB128_SIZE
/* object */ +
1988 LEB128_SIZE
/* count */ +
1990 LEB128_SIZE
/* method */
1995 emit_event (logbuffer
, do_bt
| TYPE_EXCEPTION
);
1996 emit_obj (logbuffer
, object
);
1999 emit_bt (logbuffer
, &data
);
2005 clause_exc (MonoProfiler
*prof
, MonoMethod
*method
, uint32_t clause_num
, MonoExceptionEnum clause_type
, MonoObject
*exc
)
2007 ENTER_LOG (&exception_clauses_ctr
, logbuffer
,
2008 EVENT_SIZE
/* event */ +
2009 BYTE_SIZE
/* clause type */ +
2010 LEB128_SIZE
/* clause num */ +
2011 LEB128_SIZE
/* method */ +
2012 LEB128_SIZE
/* exc */
2015 emit_event (logbuffer
, TYPE_EXCEPTION
| TYPE_CLAUSE
);
2016 emit_byte (logbuffer
, clause_type
);
2017 emit_value (logbuffer
, clause_num
);
2018 emit_method (logbuffer
, method
);
2019 emit_obj (logbuffer
, exc
);
2025 monitor_event (MonoProfiler
*profiler
, MonoObject
*object
, MonoProfilerMonitorEvent ev
)
2027 int do_bt
= (!log_config
.enter_leave
&& mono_atomic_load_i32 (&log_profiler
.runtime_inited
) && log_config
.num_frames
) ? TYPE_MONITOR_BT
: 0;
2033 ENTER_LOG (&monitor_events_ctr
, logbuffer
,
2034 EVENT_SIZE
/* event */ +
2035 BYTE_SIZE
/* ev */ +
2036 LEB128_SIZE
/* object */ +
2038 LEB128_SIZE
/* count */ +
2040 LEB128_SIZE
/* method */
2045 emit_event (logbuffer
, do_bt
| TYPE_MONITOR
);
2046 emit_byte (logbuffer
, ev
);
2047 emit_obj (logbuffer
, object
);
2050 emit_bt (logbuffer
, &data
);
2056 monitor_contention (MonoProfiler
*prof
, MonoObject
*object
)
2058 monitor_event (prof
, object
, MONO_PROFILER_MONITOR_CONTENTION
);
2062 monitor_acquired (MonoProfiler
*prof
, MonoObject
*object
)
2064 monitor_event (prof
, object
, MONO_PROFILER_MONITOR_DONE
);
2068 monitor_failed (MonoProfiler
*prof
, MonoObject
*object
)
2070 monitor_event (prof
, object
, MONO_PROFILER_MONITOR_FAIL
);
2074 thread_start (MonoProfiler
*prof
, uintptr_t tid
)
2076 ENTER_LOG (&thread_starts_ctr
, logbuffer
,
2077 EVENT_SIZE
/* event */ +
2078 BYTE_SIZE
/* type */ +
2079 LEB128_SIZE
/* tid */
2082 emit_event (logbuffer
, TYPE_END_LOAD
| TYPE_METADATA
);
2083 emit_byte (logbuffer
, TYPE_THREAD
);
2084 emit_ptr (logbuffer
, (void*) tid
);
2090 thread_end (MonoProfiler
*prof
, uintptr_t tid
)
2092 ENTER_LOG (&thread_ends_ctr
, logbuffer
,
2093 EVENT_SIZE
/* event */ +
2094 BYTE_SIZE
/* type */ +
2095 LEB128_SIZE
/* tid */
2098 emit_event (logbuffer
, TYPE_END_UNLOAD
| TYPE_METADATA
);
2099 emit_byte (logbuffer
, TYPE_THREAD
);
2100 emit_ptr (logbuffer
, (void*) tid
);
2102 EXIT_LOG_EXPLICIT (NO_SEND
);
2104 MonoProfilerThread
*thread
= get_thread ();
2106 thread
->ended
= TRUE
;
2107 remove_thread (thread
);
2109 PROF_TLS_SET (MONO_PROFILER_THREAD_DEAD
);
2113 thread_name (MonoProfiler
*prof
, uintptr_t tid
, const char *name
)
2115 int len
= strlen (name
) + 1;
2117 ENTER_LOG (&thread_names_ctr
, logbuffer
,
2118 EVENT_SIZE
/* event */ +
2119 BYTE_SIZE
/* type */ +
2120 LEB128_SIZE
/* tid */ +
2124 emit_event (logbuffer
, TYPE_METADATA
);
2125 emit_byte (logbuffer
, TYPE_THREAD
);
2126 emit_ptr (logbuffer
, (void*)tid
);
2127 memcpy (logbuffer
->cursor
, name
, len
);
2128 logbuffer
->cursor
+= len
;
2134 domain_loaded (MonoProfiler
*prof
, MonoDomain
*domain
)
2136 ENTER_LOG (&domain_loads_ctr
, logbuffer
,
2137 EVENT_SIZE
/* event */ +
2138 BYTE_SIZE
/* type */ +
2139 LEB128_SIZE
/* domain id */
2142 emit_event (logbuffer
, TYPE_END_LOAD
| TYPE_METADATA
);
2143 emit_byte (logbuffer
, TYPE_DOMAIN
);
2144 emit_ptr (logbuffer
, (void*)(uintptr_t) mono_domain_get_id (domain
));
2150 domain_unloaded (MonoProfiler
*prof
, MonoDomain
*domain
)
2152 ENTER_LOG (&domain_unloads_ctr
, logbuffer
,
2153 EVENT_SIZE
/* event */ +
2154 BYTE_SIZE
/* type */ +
2155 LEB128_SIZE
/* domain id */
2158 emit_event (logbuffer
, TYPE_END_UNLOAD
| TYPE_METADATA
);
2159 emit_byte (logbuffer
, TYPE_DOMAIN
);
2160 emit_ptr (logbuffer
, (void*)(uintptr_t) mono_domain_get_id (domain
));
2166 domain_name (MonoProfiler
*prof
, MonoDomain
*domain
, const char *name
)
2168 int nlen
= strlen (name
) + 1;
2170 ENTER_LOG (&domain_names_ctr
, logbuffer
,
2171 EVENT_SIZE
/* event */ +
2172 BYTE_SIZE
/* type */ +
2173 LEB128_SIZE
/* domain id */ +
2177 emit_event (logbuffer
, TYPE_METADATA
);
2178 emit_byte (logbuffer
, TYPE_DOMAIN
);
2179 emit_ptr (logbuffer
, (void*)(uintptr_t) mono_domain_get_id (domain
));
2180 memcpy (logbuffer
->cursor
, name
, nlen
);
2181 logbuffer
->cursor
+= nlen
;
2187 context_loaded (MonoProfiler
*prof
, MonoAppContext
*context
)
2189 ENTER_LOG (&context_loads_ctr
, logbuffer
,
2190 EVENT_SIZE
/* event */ +
2191 BYTE_SIZE
/* type */ +
2192 LEB128_SIZE
/* context id */ +
2193 LEB128_SIZE
/* domain id */
2196 emit_event (logbuffer
, TYPE_END_LOAD
| TYPE_METADATA
);
2197 emit_byte (logbuffer
, TYPE_CONTEXT
);
2198 emit_ptr (logbuffer
, (void*)(uintptr_t) mono_context_get_id (context
));
2199 emit_ptr (logbuffer
, (void*)(uintptr_t) mono_context_get_domain_id (context
));
2205 context_unloaded (MonoProfiler
*prof
, MonoAppContext
*context
)
2207 ENTER_LOG (&context_unloads_ctr
, logbuffer
,
2208 EVENT_SIZE
/* event */ +
2209 BYTE_SIZE
/* type */ +
2210 LEB128_SIZE
/* context id */ +
2211 LEB128_SIZE
/* domain id */
2214 emit_event (logbuffer
, TYPE_END_UNLOAD
| TYPE_METADATA
);
2215 emit_byte (logbuffer
, TYPE_CONTEXT
);
2216 emit_ptr (logbuffer
, (void*)(uintptr_t) mono_context_get_id (context
));
2217 emit_ptr (logbuffer
, (void*)(uintptr_t) mono_context_get_domain_id (context
));
2230 MonoLockFreeQueueNode node
;
2235 AsyncFrameInfo frames
[MONO_ZERO_LEN_ARRAY
];
2239 async_walk_stack (MonoMethod
*method
, MonoDomain
*domain
, void *base_address
, int offset
, void *data
)
2241 SampleHit
*sample
= (SampleHit
*) data
;
2243 if (sample
->count
< log_config
.num_frames
) {
2244 int i
= sample
->count
;
2246 sample
->frames
[i
].method
= method
;
2247 sample
->frames
[i
].domain
= domain
;
2248 sample
->frames
[i
].base_address
= base_address
;
2249 sample
->frames
[i
].offset
= offset
;
2254 return sample
->count
== log_config
.num_frames
;
2257 #define SAMPLE_SLOT_SIZE(FRAMES) (sizeof (SampleHit) + sizeof (AsyncFrameInfo) * (FRAMES - MONO_ZERO_LEN_ARRAY))
2258 #define SAMPLE_BLOCK_SIZE (mono_pagesize ())
2261 enqueue_sample_hit (gpointer p
)
2263 SampleHit
*sample
= p
;
2265 mono_lock_free_queue_node_unpoison (&sample
->node
);
2266 mono_lock_free_queue_enqueue (&log_profiler
.dumper_queue
, &sample
->node
);
2267 mono_os_sem_post (&log_profiler
.dumper_queue_sem
);
2271 mono_sample_hit (MonoProfiler
*profiler
, const mono_byte
*ip
, const void *context
)
2274 * Please note: We rely on the runtime loading the profiler with
2275 * MONO_DL_EAGER (RTLD_NOW) so that references to runtime functions within
2276 * this function (and its siblings) are resolved when the profiler is
2277 * loaded. Otherwise, we would potentially invoke the dynamic linker when
2278 * invoking runtime functions, which is not async-signal-safe.
2281 if (mono_atomic_load_i32 (&log_profiler
.in_shutdown
))
2284 SampleHit
*sample
= (SampleHit
*) mono_lock_free_queue_dequeue (&profiler
->sample_reuse_queue
);
2288 * If we're out of reusable sample events and we're not allowed to
2289 * allocate more, we have no choice but to drop the event.
2291 if (mono_atomic_load_i32 (&sample_allocations_ctr
) >= log_config
.max_allocated_sample_hits
)
2294 sample
= mono_lock_free_alloc (&profiler
->sample_allocator
);
2295 mono_lock_free_queue_node_init (&sample
->node
, TRUE
);
2297 mono_atomic_inc_i32 (&sample_allocations_ctr
);
2301 mono_stack_walk_async_safe (&async_walk_stack
, (void *) context
, sample
);
2303 sample
->time
= current_time ();
2304 sample
->tid
= thread_id ();
2307 mono_thread_hazardous_try_free (sample
, enqueue_sample_hit
);
2310 static uintptr_t *code_pages
= 0;
2311 static int num_code_pages
= 0;
2312 static int size_code_pages
= 0;
2313 #define CPAGE_SHIFT (9)
2314 #define CPAGE_SIZE (1 << CPAGE_SHIFT)
2315 #define CPAGE_MASK (~(CPAGE_SIZE - 1))
2316 #define CPAGE_ADDR(p) ((p) & CPAGE_MASK)
2319 add_code_page (uintptr_t *hash
, uintptr_t hsize
, uintptr_t page
)
2322 uintptr_t start_pos
;
2323 start_pos
= (page
>> CPAGE_SHIFT
) % hsize
;
2326 if (hash
[i
] && CPAGE_ADDR (hash
[i
]) == CPAGE_ADDR (page
)) {
2328 } else if (!hash
[i
]) {
2335 } while (i
!= start_pos
);
2336 g_assert_not_reached ();
2341 add_code_pointer (uintptr_t ip
)
2344 if (num_code_pages
* 2 >= size_code_pages
) {
2346 uintptr_t old_size
= size_code_pages
;
2347 size_code_pages
*= 2;
2348 if (size_code_pages
== 0)
2349 size_code_pages
= 16;
2350 n
= (uintptr_t *) g_calloc (sizeof (uintptr_t) * size_code_pages
, 1);
2351 for (i
= 0; i
< old_size
; ++i
) {
2353 add_code_page (n
, size_code_pages
, code_pages
[i
]);
2356 g_free (code_pages
);
2359 num_code_pages
+= add_code_page (code_pages
, size_code_pages
, ip
& CPAGE_MASK
);
2362 /* ELF code crashes on some systems. */
2363 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2366 dump_ubin (const char *filename
, uintptr_t load_addr
, uint64_t offset
, uintptr_t size
)
2368 int len
= strlen (filename
) + 1;
2370 ENTER_LOG (&sample_ubins_ctr
, logbuffer
,
2371 EVENT_SIZE
/* event */ +
2372 LEB128_SIZE
/* load address */ +
2373 LEB128_SIZE
/* offset */ +
2374 LEB128_SIZE
/* size */ +
2378 emit_event (logbuffer
, TYPE_SAMPLE
| TYPE_SAMPLE_UBIN
);
2379 emit_ptr (logbuffer
, load_addr
);
2380 emit_uvalue (logbuffer
, offset
);
2381 emit_uvalue (logbuffer
, size
);
2382 memcpy (logbuffer
->cursor
, filename
, len
);
2383 logbuffer
->cursor
+= len
;
2390 dump_usym (const char *name
, uintptr_t value
, uintptr_t size
)
2392 int len
= strlen (name
) + 1;
2394 ENTER_LOG (&sample_usyms_ctr
, logbuffer
,
2395 EVENT_SIZE
/* event */ +
2396 LEB128_SIZE
/* value */ +
2397 LEB128_SIZE
/* size */ +
2401 emit_event (logbuffer
, TYPE_SAMPLE
| TYPE_SAMPLE_USYM
);
2402 emit_ptr (logbuffer
, (void*)value
);
2403 emit_value (logbuffer
, size
);
2404 memcpy (logbuffer
->cursor
, name
, len
);
2405 logbuffer
->cursor
+= len
;
2410 /* ELF code crashes on some systems. */
2411 //#if defined(ELFMAG0)
2414 #if SIZEOF_VOID_P == 4
2415 #define ELF_WSIZE 32
2417 #define ELF_WSIZE 64
2420 #define ElfW(type) _ElfW (Elf, ELF_WSIZE, type)
2421 #define _ElfW(e,w,t) _ElfW_1 (e, w, _##t)
2422 #define _ElfW_1(e,w,t) e##w##t
2426 dump_elf_symbols (ElfW(Sym
) *symbols
, int num_symbols
, const char *strtab
, void *load_addr
)
2429 for (i
= 0; i
< num_symbols
; ++i
) {
2431 sym
= strtab
+ symbols
[i
].st_name
;
2432 if (!symbols
[i
].st_name
|| !symbols
[i
].st_size
|| (symbols
[i
].st_info
& 0xf) != STT_FUNC
)
2434 dump_usym (sym
, (uintptr_t)load_addr
+ symbols
[i
].st_value
, symbols
[i
].st_size
);
2439 read_elf_symbols (MonoProfiler
*prof
, const char *filename
, void *load_addr
)
2446 ElfW(Shdr
) *sheader
;
2447 ElfW(Shdr
) *shstrtabh
;
2448 ElfW(Shdr
) *symtabh
= NULL
;
2449 ElfW(Shdr
) *strtabh
= NULL
;
2450 ElfW(Sym
) *symbols
= NULL
;
2454 fd
= open (filename
, O_RDONLY
);
2457 if (fstat (fd
, &statb
) != 0) {
2461 file_size
= statb
.st_size
;
2462 data
= mmap (NULL
, file_size
, PROT_READ
, MAP_PRIVATE
, fd
, 0);
2464 if (data
== MAP_FAILED
)
2467 if (header
->e_ident
[EI_MAG0
] != ELFMAG0
||
2468 header
->e_ident
[EI_MAG1
] != ELFMAG1
||
2469 header
->e_ident
[EI_MAG2
] != ELFMAG2
||
2470 header
->e_ident
[EI_MAG3
] != ELFMAG3
) {
2471 munmap (data
, file_size
);
2474 sheader
= (void*)((char*)data
+ header
->e_shoff
);
2475 shstrtabh
= (void*)((char*)sheader
+ (header
->e_shentsize
* header
->e_shstrndx
));
2476 strtab
= (const char*)data
+ shstrtabh
->sh_offset
;
2477 for (i
= 0; i
< header
->e_shnum
; ++i
) {
2478 if (sheader
->sh_type
== SHT_SYMTAB
) {
2480 strtabh
= (void*)((char*)data
+ header
->e_shoff
+ sheader
->sh_link
* header
->e_shentsize
);
2483 sheader
= (void*)((char*)sheader
+ header
->e_shentsize
);
2485 if (!symtabh
|| !strtabh
) {
2486 munmap (data
, file_size
);
2489 strtab
= (const char*)data
+ strtabh
->sh_offset
;
2490 num_symbols
= symtabh
->sh_size
/ symtabh
->sh_entsize
;
2491 symbols
= (void*)((char*)data
+ symtabh
->sh_offset
);
2492 dump_elf_symbols (symbols
, num_symbols
, strtab
, load_addr
);
2493 munmap (data
, file_size
);
2498 /* ELF code crashes on some systems. */
2499 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2502 elf_dl_callback (struct dl_phdr_info
*info
, size_t size
, void *data
)
2505 const char *filename
;
2507 char *a
= (void*)info
->dlpi_addr
;
2509 ElfW(Dyn
) *dyn
= NULL
;
2510 ElfW(Sym
) *symtab
= NULL
;
2511 ElfW(Word
) *hash_table
= NULL
;
2512 ElfW(Ehdr
) *header
= NULL
;
2513 const char* strtab
= NULL
;
2514 for (obj
= log_profiler
.binary_objects
; obj
; obj
= obj
->next
) {
2518 filename
= info
->dlpi_name
;
2521 if (!info
->dlpi_addr
&& !filename
[0]) {
2522 int l
= readlink ("/proc/self/exe", buf
, sizeof (buf
) - 1);
2528 obj
= g_calloc (sizeof (BinaryObject
), 1);
2529 obj
->addr
= (void*)info
->dlpi_addr
;
2530 obj
->name
= pstrdup (filename
);
2531 obj
->next
= log_profiler
.binary_objects
;
2532 log_profiler
.binary_objects
= obj
;
2534 for (i
= 0; i
< info
->dlpi_phnum
; ++i
) {
2535 if (info
->dlpi_phdr
[i
].p_type
== PT_LOAD
&& !header
) {
2536 header
= (ElfW(Ehdr
)*)(info
->dlpi_addr
+ info
->dlpi_phdr
[i
].p_vaddr
);
2537 if (header
->e_ident
[EI_MAG0
] != ELFMAG0
||
2538 header
->e_ident
[EI_MAG1
] != ELFMAG1
||
2539 header
->e_ident
[EI_MAG2
] != ELFMAG2
||
2540 header
->e_ident
[EI_MAG3
] != ELFMAG3
) {
2543 dump_ubin (filename
, info
->dlpi_addr
+ info
->dlpi_phdr
[i
].p_vaddr
, info
->dlpi_phdr
[i
].p_offset
, info
->dlpi_phdr
[i
].p_memsz
);
2544 } else if (info
->dlpi_phdr
[i
].p_type
== PT_DYNAMIC
) {
2545 dyn
= (ElfW(Dyn
) *)(info
->dlpi_addr
+ info
->dlpi_phdr
[i
].p_vaddr
);
2548 if (read_elf_symbols (filename
, (void*)info
->dlpi_addr
))
2550 if (!info
->dlpi_name
|| !info
->dlpi_name
[0])
2554 for (i
= 0; dyn
[i
].d_tag
!= DT_NULL
; ++i
) {
2555 if (dyn
[i
].d_tag
== DT_SYMTAB
) {
2556 symtab
= (ElfW(Sym
) *)(a
+ dyn
[i
].d_un
.d_ptr
);
2557 } else if (dyn
[i
].d_tag
== DT_HASH
) {
2558 hash_table
= (ElfW(Word
) *)(a
+ dyn
[i
].d_un
.d_ptr
);
2559 } else if (dyn
[i
].d_tag
== DT_STRTAB
) {
2560 strtab
= (const char*)(a
+ dyn
[i
].d_un
.d_ptr
);
2565 num_sym
= hash_table
[1];
2566 dump_elf_symbols (symtab
, num_sym
, strtab
, (void*)info
->dlpi_addr
);
2571 load_binaries (void)
2573 dl_iterate_phdr (elf_dl_callback
, NULL
);
2578 load_binaries (void)
2585 symbol_for (uintptr_t code
)
2588 void *ip
= (void*)code
;
2590 if (dladdr (ip
, &di
)) {
2592 return di
.dli_sname
;
2595 names = backtrace_symbols (&ip, 1);
2597 const char* p = names [0];
2608 dump_unmanaged_coderefs (void)
2611 const char* last_symbol
;
2612 uintptr_t addr
, page_end
;
2614 if (load_binaries ())
2616 for (i
= 0; i
< size_code_pages
; ++i
) {
2618 if (!code_pages
[i
] || code_pages
[i
] & 1)
2621 addr
= CPAGE_ADDR (code_pages
[i
]);
2622 page_end
= addr
+ CPAGE_SIZE
;
2623 code_pages
[i
] |= 1;
2624 /* we dump the symbols for the whole page */
2625 for (; addr
< page_end
; addr
+= 16) {
2626 sym
= symbol_for (addr
);
2627 if (sym
&& sym
== last_symbol
)
2632 dump_usym (sym
, addr
, 0); /* let's not guess the size */
2638 counters_add_agent (MonoCounter
*counter
)
2640 if (mono_atomic_load_i32 (&log_profiler
.in_shutdown
))
2643 MonoCounterAgent
*agent
, *item
;
2645 mono_os_mutex_lock (&log_profiler
.counters_mutex
);
2647 for (agent
= log_profiler
.counters
; agent
; agent
= agent
->next
) {
2648 if (agent
->counter
== counter
) {
2649 agent
->value_size
= 0;
2651 g_free (agent
->value
);
2652 agent
->value
= NULL
;
2658 agent
= (MonoCounterAgent
*) g_malloc (sizeof (MonoCounterAgent
));
2659 agent
->counter
= counter
;
2660 agent
->value
= NULL
;
2661 agent
->value_size
= 0;
2662 agent
->index
= log_profiler
.counters_index
++;
2663 agent
->emitted
= FALSE
;
2666 if (!log_profiler
.counters
) {
2667 log_profiler
.counters
= agent
;
2669 item
= log_profiler
.counters
;
2676 mono_os_mutex_unlock (&log_profiler
.counters_mutex
);
2680 counters_init_foreach_callback (MonoCounter
*counter
, gpointer data
)
2682 counters_add_agent (counter
);
2687 counters_init (void)
2689 mono_os_mutex_init (&log_profiler
.counters_mutex
);
2691 log_profiler
.counters_index
= 1;
2693 mono_counters_on_register (&counters_add_agent
);
2694 mono_counters_foreach (counters_init_foreach_callback
, NULL
);
2698 counters_emit (void)
2700 MonoCounterAgent
*agent
;
2703 EVENT_SIZE
/* event */ +
2704 LEB128_SIZE
/* len */
2707 mono_os_mutex_lock (&log_profiler
.counters_mutex
);
2709 for (agent
= log_profiler
.counters
; agent
; agent
= agent
->next
) {
2714 LEB128_SIZE
/* section */ +
2715 strlen (mono_counter_get_name (agent
->counter
)) + 1 /* name */ +
2716 BYTE_SIZE
/* type */ +
2717 BYTE_SIZE
/* unit */ +
2718 BYTE_SIZE
/* variance */ +
2719 LEB128_SIZE
/* index */
2728 ENTER_LOG (&counter_descriptors_ctr
, logbuffer
, size
);
2730 emit_event (logbuffer
, TYPE_SAMPLE_COUNTERS_DESC
| TYPE_SAMPLE
);
2731 emit_value (logbuffer
, len
);
2733 for (agent
= log_profiler
.counters
; agent
; agent
= agent
->next
) {
2739 name
= mono_counter_get_name (agent
->counter
);
2740 emit_value (logbuffer
, mono_counter_get_section (agent
->counter
));
2741 emit_string (logbuffer
, name
, strlen (name
) + 1);
2742 emit_value (logbuffer
, mono_counter_get_type (agent
->counter
));
2743 emit_value (logbuffer
, mono_counter_get_unit (agent
->counter
));
2744 emit_value (logbuffer
, mono_counter_get_variance (agent
->counter
));
2745 emit_value (logbuffer
, agent
->index
);
2747 agent
->emitted
= TRUE
;
2753 mono_os_mutex_unlock (&log_profiler
.counters_mutex
);
2757 counters_sample (uint64_t timestamp
)
2759 MonoCounterAgent
*agent
;
2760 MonoCounter
*counter
;
2769 buffer
= g_calloc (1, buffer_size
);
2771 mono_os_mutex_lock (&log_profiler
.counters_mutex
);
2774 EVENT_SIZE
/* event */
2777 for (agent
= log_profiler
.counters
; agent
; agent
= agent
->next
) {
2779 * FIXME: This calculation is incorrect for string counters since
2780 * mono_counter_get_size () just returns 0 in that case. We should
2781 * address this if we ever actually add any string counters to Mono.
2785 LEB128_SIZE
/* index */ +
2786 BYTE_SIZE
/* type */ +
2787 mono_counter_get_size (agent
->counter
) /* value */
2792 LEB128_SIZE
/* stop marker */
2795 ENTER_LOG (&counter_samples_ctr
, logbuffer
, size
);
2797 emit_event_time (logbuffer
, TYPE_SAMPLE_COUNTERS
| TYPE_SAMPLE
, timestamp
);
2799 for (agent
= log_profiler
.counters
; agent
; agent
= agent
->next
) {
2802 counter
= agent
->counter
;
2804 size
= mono_counter_get_size (counter
);
2806 if (size
> buffer_size
) {
2808 buffer
= g_realloc (buffer
, buffer_size
);
2811 memset (buffer
, 0, buffer_size
);
2813 g_assert (mono_counters_sample (counter
, buffer
, size
));
2815 type
= mono_counter_get_type (counter
);
2817 if (!agent
->value
) {
2818 agent
->value
= g_calloc (1, size
);
2819 agent
->value_size
= size
;
2821 if (type
== MONO_COUNTER_STRING
) {
2822 if (strcmp (agent
->value
, buffer
) == 0)
2825 if (agent
->value_size
== size
&& memcmp (agent
->value
, buffer
, size
) == 0)
2830 emit_uvalue (logbuffer
, agent
->index
);
2831 emit_value (logbuffer
, type
);
2833 case MONO_COUNTER_INT
:
2834 #if SIZEOF_VOID_P == 4
2835 case MONO_COUNTER_WORD
:
2837 emit_svalue (logbuffer
, *(int*)buffer
- *(int*)agent
->value
);
2839 case MONO_COUNTER_UINT
:
2840 emit_uvalue (logbuffer
, *(guint
*)buffer
- *(guint
*)agent
->value
);
2842 case MONO_COUNTER_TIME_INTERVAL
:
2843 case MONO_COUNTER_LONG
:
2844 #if SIZEOF_VOID_P == 8
2845 case MONO_COUNTER_WORD
:
2847 emit_svalue (logbuffer
, *(gint64
*)buffer
- *(gint64
*)agent
->value
);
2849 case MONO_COUNTER_ULONG
:
2850 emit_uvalue (logbuffer
, *(guint64
*)buffer
- *(guint64
*)agent
->value
);
2852 case MONO_COUNTER_DOUBLE
:
2853 emit_double (logbuffer
, *(double*)buffer
);
2855 case MONO_COUNTER_STRING
:
2857 emit_byte (logbuffer
, 0);
2859 emit_byte (logbuffer
, 1);
2860 emit_string (logbuffer
, (char*)buffer
, size
);
2864 g_assert_not_reached ();
2867 if (type
== MONO_COUNTER_STRING
&& size
> agent
->value_size
) {
2868 agent
->value
= g_realloc (agent
->value
, size
);
2869 agent
->value_size
= size
;
2873 memcpy (agent
->value
, buffer
, size
);
2877 emit_value (logbuffer
, 0);
2881 mono_os_mutex_unlock (&log_profiler
.counters_mutex
);
2885 perfcounters_emit (void)
2887 PerfCounterAgent
*pcagent
;
2890 EVENT_SIZE
/* event */ +
2891 LEB128_SIZE
/* len */
2894 for (pcagent
= log_profiler
.perfcounters
; pcagent
; pcagent
= pcagent
->next
) {
2895 if (pcagent
->emitted
)
2899 LEB128_SIZE
/* section */ +
2900 strlen (pcagent
->category_name
) + 1 /* category name */ +
2901 strlen (pcagent
->name
) + 1 /* name */ +
2902 BYTE_SIZE
/* type */ +
2903 BYTE_SIZE
/* unit */ +
2904 BYTE_SIZE
/* variance */ +
2905 LEB128_SIZE
/* index */
2914 ENTER_LOG (&perfcounter_descriptors_ctr
, logbuffer
, size
);
2916 emit_event (logbuffer
, TYPE_SAMPLE_COUNTERS_DESC
| TYPE_SAMPLE
);
2917 emit_value (logbuffer
, len
);
2919 for (pcagent
= log_profiler
.perfcounters
; pcagent
; pcagent
= pcagent
->next
) {
2920 if (pcagent
->emitted
)
2923 emit_value (logbuffer
, MONO_COUNTER_PERFCOUNTERS
);
2924 emit_string (logbuffer
, pcagent
->category_name
, strlen (pcagent
->category_name
) + 1);
2925 emit_string (logbuffer
, pcagent
->name
, strlen (pcagent
->name
) + 1);
2926 emit_byte (logbuffer
, MONO_COUNTER_LONG
);
2927 emit_byte (logbuffer
, MONO_COUNTER_RAW
);
2928 emit_byte (logbuffer
, MONO_COUNTER_VARIABLE
);
2929 emit_value (logbuffer
, pcagent
->index
);
2931 pcagent
->emitted
= TRUE
;
2938 perfcounters_foreach (char *category_name
, char *name
, unsigned char type
, gint64 value
, gpointer user_data
)
2940 PerfCounterAgent
*pcagent
;
2942 for (pcagent
= log_profiler
.perfcounters
; pcagent
; pcagent
= pcagent
->next
) {
2943 if (strcmp (pcagent
->category_name
, category_name
) != 0 || strcmp (pcagent
->name
, name
) != 0)
2945 if (pcagent
->value
== value
)
2948 pcagent
->value
= value
;
2949 pcagent
->updated
= TRUE
;
2950 pcagent
->deleted
= FALSE
;
2954 pcagent
= g_new0 (PerfCounterAgent
, 1);
2955 pcagent
->next
= log_profiler
.perfcounters
;
2956 pcagent
->index
= log_profiler
.counters_index
++;
2957 pcagent
->category_name
= g_strdup (category_name
);
2958 pcagent
->name
= g_strdup (name
);
2959 pcagent
->value
= value
;
2960 pcagent
->emitted
= FALSE
;
2961 pcagent
->updated
= TRUE
;
2962 pcagent
->deleted
= FALSE
;
2964 log_profiler
.perfcounters
= pcagent
;
2970 perfcounters_sample (uint64_t timestamp
)
2972 PerfCounterAgent
*pcagent
;
2976 mono_os_mutex_lock (&log_profiler
.counters_mutex
);
2978 /* mark all perfcounters as deleted, foreach will unmark them as necessary */
2979 for (pcagent
= log_profiler
.perfcounters
; pcagent
; pcagent
= pcagent
->next
)
2980 pcagent
->deleted
= TRUE
;
2982 mono_perfcounter_foreach (perfcounters_foreach
, NULL
);
2984 perfcounters_emit ();
2987 EVENT_SIZE
/* event */
2990 for (pcagent
= log_profiler
.perfcounters
; pcagent
; pcagent
= pcagent
->next
) {
2991 if (pcagent
->deleted
|| !pcagent
->updated
)
2995 LEB128_SIZE
/* index */ +
2996 BYTE_SIZE
/* type */ +
2997 LEB128_SIZE
/* value */
3007 LEB128_SIZE
/* stop marker */
3010 ENTER_LOG (&perfcounter_samples_ctr
, logbuffer
, size
);
3012 emit_event_time (logbuffer
, TYPE_SAMPLE_COUNTERS
| TYPE_SAMPLE
, timestamp
);
3014 for (pcagent
= log_profiler
.perfcounters
; pcagent
; pcagent
= pcagent
->next
) {
3015 if (pcagent
->deleted
|| !pcagent
->updated
)
3017 emit_uvalue (logbuffer
, pcagent
->index
);
3018 emit_byte (logbuffer
, MONO_COUNTER_LONG
);
3019 emit_svalue (logbuffer
, pcagent
->value
);
3021 pcagent
->updated
= FALSE
;
3024 emit_value (logbuffer
, 0);
3029 mono_os_mutex_unlock (&log_profiler
.counters_mutex
);
3033 counters_and_perfcounters_sample (void)
3035 uint64_t now
= current_time ();
3037 counters_sample (now
);
3038 perfcounters_sample (now
);
3042 MonoLockFreeQueueNode node
;
3055 free_coverage_entry (gpointer data
, gpointer userdata
)
3057 CoverageEntry
*entry
= (CoverageEntry
*)data
;
3058 g_free (entry
->filename
);
3063 obtain_coverage_for_method (MonoProfiler
*prof
, const MonoProfilerCoverageData
*entry
)
3065 int offset
= entry
->il_offset
- log_profiler
.coverage_previous_offset
;
3066 CoverageEntry
*e
= g_new (CoverageEntry
, 1);
3068 log_profiler
.coverage_previous_offset
= entry
->il_offset
;
3071 e
->counter
= entry
->counter
;
3072 e
->filename
= g_strdup(entry
->file_name
? entry
->file_name
: "");
3073 e
->line
= entry
->line
;
3074 e
->column
= entry
->column
;
3076 g_ptr_array_add (log_profiler
.coverage_data
, e
);
3080 parse_generic_type_names(char *name
)
3082 char *new_name
, *ret
;
3083 int within_generic_declaration
= 0, generic_members
= 1;
3085 if (name
== NULL
|| *name
== '\0')
3086 return g_strdup ("");
3088 if (!(ret
= new_name
= (char *) g_calloc (strlen (name
) * 4 + 1, sizeof (char))))
3094 within_generic_declaration
= 1;
3098 within_generic_declaration
= 0;
3100 if (*(name
- 1) != '<') {
3102 *new_name
++ = '0' + generic_members
;
3104 memcpy (new_name
, "<>", 8);
3108 generic_members
= 0;
3116 if (!within_generic_declaration
)
3117 *new_name
++ = *name
;
3127 build_method_buffer (gpointer key
, gpointer value
, gpointer userdata
)
3129 MonoMethod
*method
= (MonoMethod
*)value
;
3133 const char *image_name
, *method_name
, *sig
, *first_filename
;
3136 log_profiler
.coverage_previous_offset
= 0;
3137 log_profiler
.coverage_data
= g_ptr_array_new ();
3139 mono_profiler_get_coverage_data (log_profiler
.handle
, method
, obtain_coverage_for_method
);
3141 klass
= mono_method_get_class (method
);
3142 image
= mono_class_get_image (klass
);
3143 image_name
= mono_image_get_name (image
);
3145 sig
= mono_signature_get_desc (mono_method_signature (method
), TRUE
);
3146 class_name
= parse_generic_type_names (mono_type_get_name (mono_class_get_type (klass
)));
3147 method_name
= mono_method_get_name (method
);
3149 if (log_profiler
.coverage_data
->len
!= 0) {
3150 CoverageEntry
*entry
= (CoverageEntry
*)log_profiler
.coverage_data
->pdata
[0];
3151 first_filename
= entry
->filename
? entry
->filename
: "";
3153 first_filename
= "";
3155 image_name
= image_name
? image_name
: "";
3156 sig
= sig
? sig
: "";
3157 method_name
= method_name
? method_name
: "";
3159 ENTER_LOG (&coverage_methods_ctr
, logbuffer
,
3160 EVENT_SIZE
/* event */ +
3161 strlen (image_name
) + 1 /* image name */ +
3162 strlen (class_name
) + 1 /* class name */ +
3163 strlen (method_name
) + 1 /* method name */ +
3164 strlen (sig
) + 1 /* signature */ +
3165 strlen (first_filename
) + 1 /* first file name */ +
3166 LEB128_SIZE
/* token */ +
3167 LEB128_SIZE
/* method id */ +
3168 LEB128_SIZE
/* entries */
3171 emit_event (logbuffer
, TYPE_COVERAGE_METHOD
| TYPE_COVERAGE
);
3172 emit_string (logbuffer
, image_name
, strlen (image_name
) + 1);
3173 emit_string (logbuffer
, class_name
, strlen (class_name
) + 1);
3174 emit_string (logbuffer
, method_name
, strlen (method_name
) + 1);
3175 emit_string (logbuffer
, sig
, strlen (sig
) + 1);
3176 emit_string (logbuffer
, first_filename
, strlen (first_filename
) + 1);
3178 emit_uvalue (logbuffer
, mono_method_get_token (method
));
3179 emit_uvalue (logbuffer
, log_profiler
.coverage_method_id
);
3180 emit_value (logbuffer
, log_profiler
.coverage_data
->len
);
3184 for (i
= 0; i
< log_profiler
.coverage_data
->len
; i
++) {
3185 CoverageEntry
*entry
= (CoverageEntry
*)log_profiler
.coverage_data
->pdata
[i
];
3187 ENTER_LOG (&coverage_statements_ctr
, logbuffer
,
3188 EVENT_SIZE
/* event */ +
3189 LEB128_SIZE
/* method id */ +
3190 LEB128_SIZE
/* offset */ +
3191 LEB128_SIZE
/* counter */ +
3192 LEB128_SIZE
/* line */ +
3193 LEB128_SIZE
/* column */
3196 emit_event (logbuffer
, TYPE_COVERAGE_STATEMENT
| TYPE_COVERAGE
);
3197 emit_uvalue (logbuffer
, log_profiler
.coverage_method_id
);
3198 emit_uvalue (logbuffer
, entry
->offset
);
3199 emit_uvalue (logbuffer
, entry
->counter
);
3200 emit_uvalue (logbuffer
, entry
->line
);
3201 emit_uvalue (logbuffer
, entry
->column
);
3206 log_profiler
.coverage_method_id
++;
3208 g_free (class_name
);
3210 g_ptr_array_foreach (log_profiler
.coverage_data
, free_coverage_entry
, NULL
);
3211 g_ptr_array_free (log_profiler
.coverage_data
, TRUE
);
3214 /* This empties the queue */
3216 count_queue (MonoLockFreeQueue
*queue
)
3218 MonoLockFreeQueueNode
*node
;
3221 while ((node
= mono_lock_free_queue_dequeue (queue
))) {
3223 mono_thread_hazardous_try_free (node
, g_free
);
3230 build_class_buffer (gpointer key
, gpointer value
, gpointer userdata
)
3232 MonoClass
*klass
= (MonoClass
*)key
;
3233 MonoLockFreeQueue
*class_methods
= (MonoLockFreeQueue
*)value
;
3236 const char *assembly_name
;
3237 int number_of_methods
, partially_covered
;
3238 guint fully_covered
;
3240 image
= mono_class_get_image (klass
);
3241 assembly_name
= mono_image_get_name (image
);
3242 class_name
= mono_type_get_name (mono_class_get_type (klass
));
3244 assembly_name
= assembly_name
? assembly_name
: "";
3245 number_of_methods
= mono_class_num_methods (klass
);
3246 fully_covered
= count_queue (class_methods
);
3247 /* We don't handle partial covered yet */
3248 partially_covered
= 0;
3250 ENTER_LOG (&coverage_classes_ctr
, logbuffer
,
3251 EVENT_SIZE
/* event */ +
3252 strlen (assembly_name
) + 1 /* assembly name */ +
3253 strlen (class_name
) + 1 /* class name */ +
3254 LEB128_SIZE
/* no. methods */ +
3255 LEB128_SIZE
/* fully covered */ +
3256 LEB128_SIZE
/* partially covered */
3259 emit_event (logbuffer
, TYPE_COVERAGE_CLASS
| TYPE_COVERAGE
);
3260 emit_string (logbuffer
, assembly_name
, strlen (assembly_name
) + 1);
3261 emit_string (logbuffer
, class_name
, strlen (class_name
) + 1);
3262 emit_uvalue (logbuffer
, number_of_methods
);
3263 emit_uvalue (logbuffer
, fully_covered
);
3264 emit_uvalue (logbuffer
, partially_covered
);
3268 g_free (class_name
);
3272 get_coverage_for_image (MonoImage
*image
, int *number_of_methods
, guint
*fully_covered
, int *partially_covered
)
3274 MonoLockFreeQueue
*image_methods
= (MonoLockFreeQueue
*)mono_conc_hashtable_lookup (log_profiler
.coverage_image_to_methods
, image
);
3276 *number_of_methods
= mono_image_get_table_rows (image
, MONO_TABLE_METHOD
);
3278 *fully_covered
= count_queue (image_methods
);
3282 // FIXME: We don't handle partially covered yet.
3283 *partially_covered
= 0;
3287 build_assembly_buffer (gpointer key
, gpointer value
, gpointer userdata
)
3289 MonoAssembly
*assembly
= (MonoAssembly
*)value
;
3290 MonoImage
*image
= mono_assembly_get_image (assembly
);
3291 const char *name
, *guid
, *filename
;
3292 int number_of_methods
= 0, partially_covered
= 0;
3293 guint fully_covered
= 0;
3295 name
= mono_image_get_name (image
);
3296 guid
= mono_image_get_guid (image
);
3297 filename
= mono_image_get_filename (image
);
3299 name
= name
? name
: "";
3300 guid
= guid
? guid
: "";
3301 filename
= filename
? filename
: "";
3303 get_coverage_for_image (image
, &number_of_methods
, &fully_covered
, &partially_covered
);
3305 ENTER_LOG (&coverage_assemblies_ctr
, logbuffer
,
3306 EVENT_SIZE
/* event */ +
3307 strlen (name
) + 1 /* name */ +
3308 strlen (guid
) + 1 /* guid */ +
3309 strlen (filename
) + 1 /* file name */ +
3310 LEB128_SIZE
/* no. methods */ +
3311 LEB128_SIZE
/* fully covered */ +
3312 LEB128_SIZE
/* partially covered */
3315 emit_event (logbuffer
, TYPE_COVERAGE_ASSEMBLY
| TYPE_COVERAGE
);
3316 emit_string (logbuffer
, name
, strlen (name
) + 1);
3317 emit_string (logbuffer
, guid
, strlen (guid
) + 1);
3318 emit_string (logbuffer
, filename
, strlen (filename
) + 1);
3319 emit_uvalue (logbuffer
, number_of_methods
);
3320 emit_uvalue (logbuffer
, fully_covered
);
3321 emit_uvalue (logbuffer
, partially_covered
);
3327 dump_coverage (void)
3329 mono_os_mutex_lock (&log_profiler
.coverage_mutex
);
3330 mono_conc_hashtable_foreach (log_profiler
.coverage_assemblies
, build_assembly_buffer
, NULL
);
3331 mono_conc_hashtable_foreach (log_profiler
.coverage_classes
, build_class_buffer
, NULL
);
3332 mono_conc_hashtable_foreach (log_profiler
.coverage_methods
, build_method_buffer
, NULL
);
3333 mono_os_mutex_unlock (&log_profiler
.coverage_mutex
);
3336 static MonoLockFreeQueueNode
*
3337 create_method_node (MonoMethod
*method
)
3339 MethodNode
*node
= (MethodNode
*) g_malloc (sizeof (MethodNode
));
3340 mono_lock_free_queue_node_init ((MonoLockFreeQueueNode
*) node
, FALSE
);
3341 node
->method
= method
;
3343 return (MonoLockFreeQueueNode
*) node
;
3347 coverage_filter (MonoProfiler
*prof
, MonoMethod
*method
)
3352 MonoAssembly
*assembly
;
3353 MonoMethodHeader
*header
;
3354 guint32 iflags
, flags
, code_size
;
3355 char *fqn
, *classname
;
3356 gboolean has_positive
, found
;
3357 MonoLockFreeQueue
*image_methods
, *class_methods
;
3358 MonoLockFreeQueueNode
*node
;
3360 flags
= mono_method_get_flags (method
, &iflags
);
3361 if ((iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
3362 (flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
))
3365 // Don't need to do anything else if we're already tracking this method
3366 if (mono_conc_hashtable_lookup (log_profiler
.coverage_methods
, method
))
3369 klass
= mono_method_get_class (method
);
3370 image
= mono_class_get_image (klass
);
3372 // Don't handle coverage for the core assemblies
3373 if (mono_conc_hashtable_lookup (log_profiler
.coverage_suppressed_assemblies
, (gpointer
) mono_image_get_name (image
)) != NULL
)
3376 if (prof
->coverage_filters
) {
3377 /* Check already filtered classes first */
3378 if (mono_conc_hashtable_lookup (log_profiler
.coverage_filtered_classes
, klass
))
3381 classname
= mono_type_get_name (mono_class_get_type (klass
));
3383 fqn
= g_strdup_printf ("[%s]%s", mono_image_get_name (image
), classname
);
3385 // Check positive filters first
3386 has_positive
= FALSE
;
3388 for (guint i
= 0; i
< prof
->coverage_filters
->len
; ++i
) {
3389 char *filter
= (char *)g_ptr_array_index (prof
->coverage_filters
, i
);
3391 if (filter
[0] == '+') {
3392 filter
= &filter
[1];
3394 if (strstr (fqn
, filter
) != NULL
)
3397 has_positive
= TRUE
;
3401 if (has_positive
&& !found
) {
3402 mono_os_mutex_lock (&log_profiler
.coverage_mutex
);
3403 mono_conc_hashtable_insert (log_profiler
.coverage_filtered_classes
, klass
, klass
);
3404 mono_os_mutex_unlock (&log_profiler
.coverage_mutex
);
3411 for (guint i
= 0; i
< prof
->coverage_filters
->len
; ++i
) {
3412 // FIXME: Is substring search sufficient?
3413 char *filter
= (char *)g_ptr_array_index (prof
->coverage_filters
, i
);
3414 if (filter
[0] == '+')
3418 filter
= &filter
[1];
3420 if (strstr (fqn
, filter
) != NULL
) {
3421 mono_os_mutex_lock (&log_profiler
.coverage_mutex
);
3422 mono_conc_hashtable_insert (log_profiler
.coverage_filtered_classes
, klass
, klass
);
3423 mono_os_mutex_unlock (&log_profiler
.coverage_mutex
);
3435 header
= mono_method_get_header_checked (method
, &error
);
3436 mono_error_cleanup (&error
);
3438 mono_method_header_get_code (header
, &code_size
, NULL
);
3440 assembly
= mono_image_get_assembly (image
);
3442 // Need to keep the assemblies around for as long as they are kept in the hashtable
3443 // Nunit, for example, has a habit of unloading them before the coverage statistics are
3444 // generated causing a crash. See https://bugzilla.xamarin.com/show_bug.cgi?id=39325
3445 mono_assembly_addref (assembly
);
3447 mono_os_mutex_lock (&log_profiler
.coverage_mutex
);
3448 mono_conc_hashtable_insert (log_profiler
.coverage_methods
, method
, method
);
3449 mono_conc_hashtable_insert (log_profiler
.coverage_assemblies
, assembly
, assembly
);
3450 mono_os_mutex_unlock (&log_profiler
.coverage_mutex
);
3452 image_methods
= (MonoLockFreeQueue
*)mono_conc_hashtable_lookup (log_profiler
.coverage_image_to_methods
, image
);
3454 if (image_methods
== NULL
) {
3455 image_methods
= (MonoLockFreeQueue
*) g_malloc (sizeof (MonoLockFreeQueue
));
3456 mono_lock_free_queue_init (image_methods
);
3457 mono_os_mutex_lock (&log_profiler
.coverage_mutex
);
3458 mono_conc_hashtable_insert (log_profiler
.coverage_image_to_methods
, image
, image_methods
);
3459 mono_os_mutex_unlock (&log_profiler
.coverage_mutex
);
3462 node
= create_method_node (method
);
3463 mono_lock_free_queue_enqueue (image_methods
, node
);
3465 class_methods
= (MonoLockFreeQueue
*)mono_conc_hashtable_lookup (log_profiler
.coverage_classes
, klass
);
3467 if (class_methods
== NULL
) {
3468 class_methods
= (MonoLockFreeQueue
*) g_malloc (sizeof (MonoLockFreeQueue
));
3469 mono_lock_free_queue_init (class_methods
);
3470 mono_os_mutex_lock (&log_profiler
.coverage_mutex
);
3471 mono_conc_hashtable_insert (log_profiler
.coverage_classes
, klass
, class_methods
);
3472 mono_os_mutex_unlock (&log_profiler
.coverage_mutex
);
3475 node
= create_method_node (method
);
3476 mono_lock_free_queue_enqueue (class_methods
, node
);
3481 #define LINE_BUFFER_SIZE 4096
3482 /* Max file limit of 128KB */
3483 #define MAX_FILE_SIZE 128 * 1024
3485 get_file_content (FILE *stream
)
3490 int res
, offset
= 0;
3492 res
= fseek (stream
, 0, SEEK_END
);
3496 filesize
= ftell (stream
);
3500 res
= fseek (stream
, 0, SEEK_SET
);
3504 if (filesize
> MAX_FILE_SIZE
)
3507 buffer
= (char *) g_malloc ((filesize
+ 1) * sizeof (char));
3508 while ((bytes_read
= fread (buffer
+ offset
, 1, LINE_BUFFER_SIZE
, stream
)) > 0)
3509 offset
+= bytes_read
;
3511 /* NULL terminate our buffer */
3512 buffer
[filesize
] = '\0';
3517 get_next_line (char *contents
, char **next_start
)
3521 if (p
== NULL
|| *p
== '\0') {
3526 while (*p
!= '\n' && *p
!= '\0')
3531 *next_start
= p
+ 1;
3539 init_suppressed_assemblies (void)
3545 log_profiler
.coverage_suppressed_assemblies
= mono_conc_hashtable_new (g_str_hash
, g_str_equal
);
3546 sa_file
= fopen (SUPPRESSION_DIR
"/mono-profiler-log.suppression", "r");
3547 if (sa_file
== NULL
)
3550 /* Don't need to free @content as it is referred to by the lines stored in @suppressed_assemblies */
3551 content
= get_file_content (sa_file
);
3552 if (content
== NULL
)
3553 g_error ("mono-profiler-log.suppression is greater than 128kb - aborting.");
3555 while ((line
= get_next_line (content
, &content
))) {
3556 line
= g_strchomp (g_strchug (line
));
3557 /* No locking needed as we're doing initialization */
3558 mono_conc_hashtable_insert (log_profiler
.coverage_suppressed_assemblies
, line
, line
);
3565 parse_cov_filter_file (GPtrArray
*filters
, const char *file
)
3567 FILE *filter_file
= fopen (file
, "r");
3569 if (filter_file
== NULL
) {
3570 mono_profiler_printf_err ("Could not open coverage filter file '%s'.", file
);
3574 /* Don't need to free content as it is referred to by the lines stored in @filters */
3575 char *content
= get_file_content (filter_file
);
3577 if (content
== NULL
)
3578 mono_profiler_printf_err ("Coverage filter file '%s' is larger than 128kb - ignoring.", file
);
3582 while ((line
= get_next_line (content
, &content
)))
3583 g_ptr_array_add (filters
, g_strchug (g_strchomp (line
)));
3585 fclose (filter_file
);
3589 coverage_init (void)
3591 mono_os_mutex_init (&log_profiler
.coverage_mutex
);
3592 log_profiler
.coverage_methods
= mono_conc_hashtable_new (NULL
, NULL
);
3593 log_profiler
.coverage_assemblies
= mono_conc_hashtable_new (NULL
, NULL
);
3594 log_profiler
.coverage_classes
= mono_conc_hashtable_new (NULL
, NULL
);
3595 log_profiler
.coverage_filtered_classes
= mono_conc_hashtable_new (NULL
, NULL
);
3596 log_profiler
.coverage_image_to_methods
= mono_conc_hashtable_new (NULL
, NULL
);
3597 init_suppressed_assemblies ();
3601 unref_coverage_assemblies (gpointer key
, gpointer value
, gpointer userdata
)
3603 MonoAssembly
*assembly
= (MonoAssembly
*)value
;
3604 mono_assembly_close (assembly
);
3608 free_sample_hit (gpointer p
)
3610 mono_lock_free_free (p
, SAMPLE_BLOCK_SIZE
);
3614 cleanup_reusable_samples (void)
3618 while ((sample
= (SampleHit
*) mono_lock_free_queue_dequeue (&log_profiler
.sample_reuse_queue
)))
3619 mono_thread_hazardous_try_free (sample
, free_sample_hit
);
3623 log_early_shutdown (MonoProfiler
*prof
)
3625 if (log_config
.hs_on_shutdown
) {
3626 mono_atomic_store_i32 (&log_profiler
.heapshot_requested
, 1);
3627 mono_gc_collect (mono_gc_max_generation ());
3632 log_shutdown (MonoProfiler
*prof
)
3634 mono_atomic_store_i32 (&log_profiler
.in_shutdown
, 1);
3636 if (ENABLED (PROFLOG_COUNTER_EVENTS
))
3637 counters_and_perfcounters_sample ();
3639 if (log_config
.collect_coverage
)
3644 if (write (prof
->pipes
[1], &c
, 1) != 1) {
3645 mono_profiler_printf_err ("Could not write to log profiler pipe: %s", g_strerror (errno
));
3649 mono_native_thread_join (prof
->helper_thread
);
3651 mono_os_mutex_destroy (&log_profiler
.counters_mutex
);
3653 MonoCounterAgent
*mc_next
;
3655 for (MonoCounterAgent
*cur
= log_profiler
.counters
; cur
; cur
= mc_next
) {
3656 mc_next
= cur
->next
;
3660 PerfCounterAgent
*pc_next
;
3662 for (PerfCounterAgent
*cur
= log_profiler
.perfcounters
; cur
; cur
= pc_next
) {
3663 pc_next
= cur
->next
;
3668 * Ensure that we empty the LLS completely, even if some nodes are
3669 * not immediately removed upon calling mono_lls_remove (), by
3670 * iterating until the head is NULL.
3672 while (log_profiler
.profiler_thread_list
.head
) {
3673 MONO_LLS_FOREACH_SAFE (&log_profiler
.profiler_thread_list
, MonoProfilerThread
, thread
) {
3674 g_assert (thread
->attached
&& "Why is a thread in the LLS not attached?");
3676 remove_thread (thread
);
3677 } MONO_LLS_FOREACH_SAFE_END
3681 * Ensure that all threads have been freed, so that we don't miss any
3682 * buffers when we shut down the writer thread below.
3684 mono_thread_hazardous_try_free_all ();
3686 mono_atomic_store_i32 (&prof
->run_dumper_thread
, 0);
3687 mono_os_sem_post (&prof
->dumper_queue_sem
);
3688 mono_native_thread_join (prof
->dumper_thread
);
3689 mono_os_sem_destroy (&prof
->dumper_queue_sem
);
3691 mono_atomic_store_i32 (&prof
->run_writer_thread
, 0);
3692 mono_os_sem_post (&prof
->writer_queue_sem
);
3693 mono_native_thread_join (prof
->writer_thread
);
3694 mono_os_sem_destroy (&prof
->writer_queue_sem
);
3697 * Free all writer queue entries, and ensure that all sample hits will be
3698 * added to the sample reuse queue.
3700 mono_thread_hazardous_try_free_all ();
3702 cleanup_reusable_samples ();
3705 * Finally, make sure that all sample hits are freed. This should cover all
3706 * hazardous data from the profiler. We can now be sure that the runtime
3707 * won't later invoke free functions in the profiler library after it has
3710 mono_thread_hazardous_try_free_all ();
3712 gint32 state
= mono_atomic_load_i32 (&log_profiler
.buffer_lock_state
);
3714 g_assert (!(state
& 0xFFFF) && "Why is the reader count still non-zero?");
3715 g_assert (!(state
>> 16) && "Why is the exclusive lock still held?");
3717 #if defined (HAVE_SYS_ZLIB)
3719 gzclose (prof
->gzfile
);
3721 if (prof
->pipe_output
)
3722 pclose (prof
->file
);
3724 fclose (prof
->file
);
3726 mono_conc_hashtable_destroy (prof
->method_table
);
3727 mono_os_mutex_destroy (&prof
->method_table_mutex
);
3729 if (log_config
.collect_coverage
) {
3730 mono_os_mutex_lock (&log_profiler
.coverage_mutex
);
3731 mono_conc_hashtable_foreach (log_profiler
.coverage_assemblies
, unref_coverage_assemblies
, NULL
);
3732 mono_os_mutex_unlock (&log_profiler
.coverage_mutex
);
3734 mono_conc_hashtable_destroy (log_profiler
.coverage_methods
);
3735 mono_conc_hashtable_destroy (log_profiler
.coverage_assemblies
);
3736 mono_conc_hashtable_destroy (log_profiler
.coverage_classes
);
3737 mono_conc_hashtable_destroy (log_profiler
.coverage_filtered_classes
);
3739 mono_conc_hashtable_destroy (log_profiler
.coverage_image_to_methods
);
3740 mono_conc_hashtable_destroy (log_profiler
.coverage_suppressed_assemblies
);
3741 mono_os_mutex_destroy (&log_profiler
.coverage_mutex
);
3744 mono_coop_mutex_destroy (&log_profiler
.api_mutex
);
3748 g_free (prof
->args
);
3752 new_filename (const char* filename
)
3754 time_t t
= time (NULL
);
3755 int pid
= process_id ();
3760 int count_dates
= 0;
3764 for (p
= filename
; *p
; p
++) {
3775 if (!count_dates
&& !count_pids
)
3776 return pstrdup (filename
);
3777 snprintf (pid_buf
, sizeof (pid_buf
), "%d", pid
);
3779 snprintf (time_buf
, sizeof (time_buf
), "%d%02d%02d%02d%02d%02d",
3780 1900 + ts
->tm_year
, 1 + ts
->tm_mon
, ts
->tm_mday
, ts
->tm_hour
, ts
->tm_min
, ts
->tm_sec
);
3781 s_date
= strlen (time_buf
);
3782 s_pid
= strlen (pid_buf
);
3783 d
= res
= (char *) g_malloc (strlen (filename
) + s_date
* count_dates
+ s_pid
* count_pids
);
3784 for (p
= filename
; *p
; p
++) {
3791 strcpy (d
, time_buf
);
3794 } else if (*p
== 'p') {
3795 strcpy (d
, pid_buf
);
3798 } else if (*p
== '%') {
3811 add_to_fd_set (fd_set
*set
, int fd
, int *max_fd
)
3814 * This should only trigger for the basic FDs (server socket, pipes) at
3815 * startup if for some mysterious reason they're too large. In this case,
3816 * the profiler really can't function, and we're better off printing an
3817 * error and exiting.
3819 if (fd
>= FD_SETSIZE
) {
3820 mono_profiler_printf_err ("File descriptor is out of bounds for fd_set: %d", fd
);
3831 helper_thread (void *arg
)
3833 mono_threads_attach_tools_thread ();
3834 mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler helper");
3836 MonoProfilerThread
*thread
= init_thread (FALSE
);
3838 GArray
*command_sockets
= g_array_new (FALSE
, FALSE
, sizeof (int));
3846 add_to_fd_set (&rfds
, log_profiler
.server_socket
, &max_fd
);
3847 add_to_fd_set (&rfds
, log_profiler
.pipes
[0], &max_fd
);
3849 for (gint i
= 0; i
< command_sockets
->len
; i
++)
3850 add_to_fd_set (&rfds
, g_array_index (command_sockets
, int, i
), &max_fd
);
3852 struct timeval tv
= { .tv_sec
= 1, .tv_usec
= 0 };
3854 // Sleep for 1sec or until a file descriptor has data.
3855 if (select (max_fd
+ 1, &rfds
, NULL
, NULL
, &tv
) == -1) {
3859 mono_profiler_printf_err ("Could not poll in log profiler helper thread: %s", g_strerror (errno
));
3863 if (ENABLED (PROFLOG_COUNTER_EVENTS
))
3864 counters_and_perfcounters_sample ();
3866 buffer_lock_excl ();
3868 sync_point (SYNC_POINT_PERIODIC
);
3870 buffer_unlock_excl ();
3872 // Are we shutting down?
3873 if (FD_ISSET (log_profiler
.pipes
[0], &rfds
)) {
3875 read (log_profiler
.pipes
[0], &c
, 1);
3879 for (gint i
= 0; i
< command_sockets
->len
; i
++) {
3880 int fd
= g_array_index (command_sockets
, int, i
);
3882 if (!FD_ISSET (fd
, &rfds
))
3886 int len
= read (fd
, buf
, sizeof (buf
) - 1);
3892 // The other end disconnected.
3893 g_array_remove_index (command_sockets
, i
);
3901 if (!strcmp (buf
, "heapshot\n"))
3902 trigger_heapshot ();
3905 if (FD_ISSET (log_profiler
.server_socket
, &rfds
)) {
3906 int fd
= accept (log_profiler
.server_socket
, NULL
, NULL
);
3909 if (fd
>= FD_SETSIZE
)
3912 g_array_append_val (command_sockets
, fd
);
3917 for (gint i
= 0; i
< command_sockets
->len
; i
++)
3918 close (g_array_index (command_sockets
, int, i
));
3920 g_array_free (command_sockets
, TRUE
);
3922 send_log_unsafe (FALSE
);
3923 deinit_thread (thread
);
3925 mono_thread_info_detach ();
3931 start_helper_thread (void)
3933 if (pipe (log_profiler
.pipes
) == -1) {
3934 mono_profiler_printf_err ("Could not create log profiler pipe: %s", g_strerror (errno
));
3938 log_profiler
.server_socket
= socket (PF_INET
, SOCK_STREAM
, 0);
3940 if (log_profiler
.server_socket
== -1) {
3941 mono_profiler_printf_err ("Could not create log profiler server socket: %s", g_strerror (errno
));
3945 struct sockaddr_in server_address
;
3947 memset (&server_address
, 0, sizeof (server_address
));
3948 server_address
.sin_family
= AF_INET
;
3949 server_address
.sin_addr
.s_addr
= INADDR_ANY
;
3950 server_address
.sin_port
= htons (log_profiler
.command_port
);
3952 if (bind (log_profiler
.server_socket
, (struct sockaddr
*) &server_address
, sizeof (server_address
)) == -1) {
3953 mono_profiler_printf_err ("Could not bind log profiler server socket on port %d: %s", log_profiler
.command_port
, g_strerror (errno
));
3954 close (log_profiler
.server_socket
);
3958 if (listen (log_profiler
.server_socket
, 1) == -1) {
3959 mono_profiler_printf_err ("Could not listen on log profiler server socket: %s", g_strerror (errno
));
3960 close (log_profiler
.server_socket
);
3964 socklen_t slen
= sizeof (server_address
);
3966 if (getsockname (log_profiler
.server_socket
, (struct sockaddr
*) &server_address
, &slen
)) {
3967 mono_profiler_printf_err ("Could not retrieve assigned port for log profiler server socket: %s", g_strerror (errno
));
3968 close (log_profiler
.server_socket
);
3972 log_profiler
.command_port
= ntohs (server_address
.sin_port
);
3974 if (!mono_native_thread_create (&log_profiler
.helper_thread
, helper_thread
, NULL
)) {
3975 mono_profiler_printf_err ("Could not start log profiler helper thread");
3976 close (log_profiler
.server_socket
);
3982 free_writer_entry (gpointer p
)
3984 mono_lock_free_free (p
, WRITER_ENTRY_BLOCK_SIZE
);
3988 handle_writer_queue_entry (void)
3990 WriterQueueEntry
*entry
;
3992 if ((entry
= (WriterQueueEntry
*) mono_lock_free_queue_dequeue (&log_profiler
.writer_queue
))) {
3993 if (!entry
->methods
)
3996 gboolean wrote_methods
= FALSE
;
3999 * Encode the method events in a temporary log buffer that we
4000 * flush to disk before the main buffer, ensuring that all
4001 * methods have metadata emitted before they're referenced.
4003 * We use a 'proper' thread-local buffer for this as opposed
4004 * to allocating and freeing a buffer by hand because the call
4005 * to mono_method_full_name () below may trigger class load
4006 * events when it retrieves the signature of the method. So a
4007 * thread-local buffer needs to exist when such events occur.
4009 for (guint i
= 0; i
< entry
->methods
->len
; i
++) {
4010 MethodInfo
*info
= (MethodInfo
*) g_ptr_array_index (entry
->methods
, i
);
4012 if (mono_conc_hashtable_lookup (log_profiler
.method_table
, info
->method
))
4013 goto free_info
; // This method already has metadata emitted.
4016 * Other threads use this hash table to get a general
4017 * idea of whether a method has already been emitted to
4018 * the stream. Due to the way we add to this table, it
4019 * can easily happen that multiple threads queue up the
4020 * same methods, but that's OK since eventually all
4021 * methods will be in this table and the thread-local
4022 * method lists will just be empty for the rest of the
4025 mono_os_mutex_lock (&log_profiler
.method_table_mutex
);
4026 mono_conc_hashtable_insert (log_profiler
.method_table
, info
->method
, info
->method
);
4027 mono_os_mutex_unlock (&log_profiler
.method_table_mutex
);
4029 char *name
= mono_method_full_name (info
->method
, 1);
4030 int nlen
= strlen (name
) + 1;
4031 void *cstart
= info
->ji
? mono_jit_info_get_code_start (info
->ji
) : NULL
;
4032 int csize
= info
->ji
? mono_jit_info_get_code_size (info
->ji
) : 0;
4034 ENTER_LOG (&method_jits_ctr
, logbuffer
,
4035 EVENT_SIZE
/* event */ +
4036 LEB128_SIZE
/* method */ +
4037 LEB128_SIZE
/* start */ +
4038 LEB128_SIZE
/* size */ +
4042 emit_event_time (logbuffer
, TYPE_JIT
| TYPE_METHOD
, info
->time
);
4043 emit_method_inner (logbuffer
, info
->method
);
4044 emit_ptr (logbuffer
, cstart
);
4045 emit_value (logbuffer
, csize
);
4047 memcpy (logbuffer
->cursor
, name
, nlen
);
4048 logbuffer
->cursor
+= nlen
;
4050 EXIT_LOG_EXPLICIT (NO_SEND
);
4054 wrote_methods
= TRUE
;
4060 g_ptr_array_free (entry
->methods
, TRUE
);
4062 if (wrote_methods
) {
4063 MonoProfilerThread
*thread
= get_thread ();
4065 dump_buffer_threadless (thread
->buffer
);
4066 init_buffer_state (thread
);
4070 dump_buffer (entry
->buffer
);
4072 mono_thread_hazardous_try_free (entry
, free_writer_entry
);
4081 writer_thread (void *arg
)
4083 mono_threads_attach_tools_thread ();
4084 mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler writer");
4088 MonoProfilerThread
*thread
= init_thread (FALSE
);
4090 while (mono_atomic_load_i32 (&log_profiler
.run_writer_thread
)) {
4091 mono_os_sem_wait (&log_profiler
.writer_queue_sem
, MONO_SEM_FLAGS_NONE
);
4092 handle_writer_queue_entry ();
4095 /* Drain any remaining entries on shutdown. */
4096 while (handle_writer_queue_entry ());
4098 free_buffer (thread
->buffer
, thread
->buffer
->size
);
4099 deinit_thread (thread
);
4101 mono_thread_info_detach ();
4107 start_writer_thread (void)
4109 mono_atomic_store_i32 (&log_profiler
.run_writer_thread
, 1);
4111 if (!mono_native_thread_create (&log_profiler
.writer_thread
, writer_thread
, NULL
)) {
4112 mono_profiler_printf_err ("Could not start log profiler writer thread");
4118 reuse_sample_hit (gpointer p
)
4120 SampleHit
*sample
= p
;
4122 mono_lock_free_queue_node_unpoison (&sample
->node
);
4123 mono_lock_free_queue_enqueue (&log_profiler
.sample_reuse_queue
, &sample
->node
);
4127 handle_dumper_queue_entry (void)
4131 if ((sample
= (SampleHit
*) mono_lock_free_queue_dequeue (&log_profiler
.dumper_queue
))) {
4132 for (int i
= 0; i
< sample
->count
; ++i
) {
4133 MonoMethod
*method
= sample
->frames
[i
].method
;
4134 MonoDomain
*domain
= sample
->frames
[i
].domain
;
4135 void *address
= sample
->frames
[i
].base_address
;
4138 g_assert (domain
&& "What happened to the domain pointer?");
4139 g_assert (address
&& "What happened to the instruction pointer?");
4141 MonoJitInfo
*ji
= mono_jit_info_table_find (domain
, (char *) address
);
4144 sample
->frames
[i
].method
= mono_jit_info_get_method (ji
);
4148 ENTER_LOG (&sample_hits_ctr
, logbuffer
,
4149 EVENT_SIZE
/* event */ +
4150 LEB128_SIZE
/* tid */ +
4151 LEB128_SIZE
/* count */ +
4153 LEB128_SIZE
/* ip */
4155 LEB128_SIZE
/* managed count */ +
4157 LEB128_SIZE
/* method */
4161 emit_event_time (logbuffer
, TYPE_SAMPLE
| TYPE_SAMPLE_HIT
, sample
->time
);
4162 emit_ptr (logbuffer
, (void *) sample
->tid
);
4163 emit_value (logbuffer
, 1);
4165 // TODO: Actual native unwinding.
4166 for (int i
= 0; i
< 1; ++i
) {
4167 emit_ptr (logbuffer
, sample
->ip
);
4168 add_code_pointer ((uintptr_t) sample
->ip
);
4171 /* new in data version 6 */
4172 emit_uvalue (logbuffer
, sample
->count
);
4174 for (int i
= 0; i
< sample
->count
; ++i
)
4175 emit_method (logbuffer
, sample
->frames
[i
].method
);
4179 mono_thread_hazardous_try_free (sample
, reuse_sample_hit
);
4181 dump_unmanaged_coderefs ();
4188 dumper_thread (void *arg
)
4190 mono_threads_attach_tools_thread ();
4191 mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler dumper");
4193 MonoProfilerThread
*thread
= init_thread (FALSE
);
4195 while (mono_atomic_load_i32 (&log_profiler
.run_dumper_thread
)) {
4197 * Flush samples every second so it doesn't seem like the profiler is
4198 * not working if the program is mostly idle.
4200 if (mono_os_sem_timedwait (&log_profiler
.dumper_queue_sem
, 1000, MONO_SEM_FLAGS_NONE
) == MONO_SEM_TIMEDWAIT_RET_TIMEDOUT
)
4201 send_log_unsafe (FALSE
);
4203 handle_dumper_queue_entry ();
4206 /* Drain any remaining entries on shutdown. */
4207 while (handle_dumper_queue_entry ());
4209 send_log_unsafe (FALSE
);
4210 deinit_thread (thread
);
4212 mono_thread_info_detach ();
4218 start_dumper_thread (void)
4220 mono_atomic_store_i32 (&log_profiler
.run_dumper_thread
, 1);
4222 if (!mono_native_thread_create (&log_profiler
.dumper_thread
, dumper_thread
, NULL
)) {
4223 mono_profiler_printf_err ("Could not start log profiler dumper thread");
4229 register_counter (const char *name
, gint32
*counter
)
4231 mono_counters_register (name
, MONO_COUNTER_UINT
| MONO_COUNTER_PROFILER
| MONO_COUNTER_MONOTONIC
, counter
);
4235 proflog_icall_GetMaxStackTraceFrames (void)
4241 proflog_icall_GetStackTraceFrames (void)
4243 return log_config
.num_frames
;
4247 proflog_icall_SetStackTraceFrames (gint32 value
)
4249 log_config
.num_frames
= value
;
4252 ICALL_EXPORT MonoProfilerHeapshotMode
4253 proflog_icall_GetHeapshotMode (void)
4255 return log_config
.hs_mode
;
4259 proflog_icall_SetHeapshotMode (MonoProfilerHeapshotMode value
)
4261 log_config
.hs_mode
= value
;
4265 proflog_icall_GetHeapshotMillisecondsFrequency (void)
4267 return log_config
.hs_freq_ms
;
4271 proflog_icall_SetHeapshotMillisecondsFrequency (gint32 value
)
4273 log_config
.hs_freq_ms
= value
;
4277 proflog_icall_GetHeapshotCollectionsFrequency (void)
4279 return log_config
.hs_freq_gc
;
4283 proflog_icall_SetHeapshotCollectionsFrequency (gint32 value
)
4285 log_config
.hs_freq_gc
= value
;
4289 proflog_icall_TriggerHeapshot (void)
4291 trigger_heapshot ();
4295 proflog_icall_GetCallDepth (void)
4297 return log_config
.max_call_depth
;
4301 proflog_icall_SetCallDepth (gint32 value
)
4303 log_config
.max_call_depth
= value
;
4307 proflog_icall_GetSampleMode (MonoProfilerSampleMode
*mode
, gint32
*frequency
)
4311 mono_profiler_get_sample_mode (log_profiler
.handle
, mode
, &freq
);
4316 ICALL_EXPORT MonoBoolean
4317 proflog_icall_SetSampleMode (MonoProfilerSampleMode mode
, gint32 frequency
)
4319 mono_coop_mutex_lock (&log_profiler
.api_mutex
);
4321 mono_bool result
= mono_profiler_set_sample_mode (log_profiler
.handle
, mode
, frequency
);
4323 if (mode
!= MONO_PROFILER_SAMPLE_MODE_NONE
) {
4324 ENABLE (PROFLOG_SAMPLE_EVENTS
);
4325 mono_profiler_set_sample_hit_callback (log_profiler
.handle
, mono_sample_hit
);
4327 DISABLE (PROFLOG_SAMPLE_EVENTS
);
4328 mono_profiler_set_sample_hit_callback (log_profiler
.handle
, NULL
);
4331 mono_coop_mutex_unlock (&log_profiler
.api_mutex
);
4336 ICALL_EXPORT MonoBoolean
4337 proflog_icall_GetExceptionEvents (void)
4339 return ENABLED (PROFLOG_EXCEPTION_EVENTS
);
4343 proflog_icall_SetExceptionEvents (MonoBoolean value
)
4345 mono_coop_mutex_lock (&log_profiler
.api_mutex
);
4348 ENABLE (PROFLOG_EXCEPTION_EVENTS
);
4349 mono_profiler_set_exception_throw_callback (log_profiler
.handle
, throw_exc
);
4350 mono_profiler_set_exception_clause_callback (log_profiler
.handle
, clause_exc
);
4352 DISABLE (PROFLOG_EXCEPTION_EVENTS
);
4353 mono_profiler_set_exception_throw_callback (log_profiler
.handle
, NULL
);
4354 mono_profiler_set_exception_clause_callback (log_profiler
.handle
, NULL
);
4357 mono_coop_mutex_unlock (&log_profiler
.api_mutex
);
4360 ICALL_EXPORT MonoBoolean
4361 proflog_icall_GetMonitorEvents (void)
4363 return ENABLED (PROFLOG_MONITOR_EVENTS
);
4367 proflog_icall_SetMonitorEvents (MonoBoolean value
)
4369 mono_coop_mutex_lock (&log_profiler
.api_mutex
);
4372 ENABLE (PROFLOG_MONITOR_EVENTS
);
4373 mono_profiler_set_monitor_contention_callback (log_profiler
.handle
, monitor_contention
);
4374 mono_profiler_set_monitor_acquired_callback (log_profiler
.handle
, monitor_acquired
);
4375 mono_profiler_set_monitor_failed_callback (log_profiler
.handle
, monitor_failed
);
4377 DISABLE (PROFLOG_MONITOR_EVENTS
);
4378 mono_profiler_set_monitor_contention_callback (log_profiler
.handle
, NULL
);
4379 mono_profiler_set_monitor_acquired_callback (log_profiler
.handle
, NULL
);
4380 mono_profiler_set_monitor_failed_callback (log_profiler
.handle
, NULL
);
4383 mono_coop_mutex_unlock (&log_profiler
.api_mutex
);
4386 ICALL_EXPORT MonoBoolean
4387 proflog_icall_GetGCEvents (void)
4389 return ENABLED (PROFLOG_GC_EVENTS
);
4393 proflog_icall_SetGCEvents (MonoBoolean value
)
4395 mono_coop_mutex_lock (&log_profiler
.api_mutex
);
4398 ENABLE (PROFLOG_GC_EVENTS
);
4400 DISABLE (PROFLOG_GC_EVENTS
);
4402 mono_coop_mutex_unlock (&log_profiler
.api_mutex
);
4405 ICALL_EXPORT MonoBoolean
4406 proflog_icall_GetGCAllocationEvents (void)
4408 return ENABLED (PROFLOG_GC_ALLOCATION_EVENTS
);
4412 proflog_icall_SetGCAllocationEvents (MonoBoolean value
)
4414 mono_coop_mutex_lock (&log_profiler
.api_mutex
);
4417 ENABLE (PROFLOG_GC_ALLOCATION_EVENTS
);
4418 mono_profiler_set_gc_allocation_callback (log_profiler
.handle
, gc_alloc
);
4420 DISABLE (PROFLOG_GC_ALLOCATION_EVENTS
);
4421 mono_profiler_set_gc_allocation_callback (log_profiler
.handle
, NULL
);
4424 mono_coop_mutex_unlock (&log_profiler
.api_mutex
);
4427 ICALL_EXPORT MonoBoolean
4428 proflog_icall_GetGCMoveEvents (void)
4430 return ENABLED (PROFLOG_GC_MOVE_EVENTS
);
4434 proflog_icall_SetGCMoveEvents (MonoBoolean value
)
4436 mono_coop_mutex_lock (&log_profiler
.api_mutex
);
4439 ENABLE (PROFLOG_GC_MOVE_EVENTS
);
4440 mono_profiler_set_gc_moves_callback (log_profiler
.handle
, gc_moves
);
4442 DISABLE (PROFLOG_GC_MOVE_EVENTS
);
4443 mono_profiler_set_gc_moves_callback (log_profiler
.handle
, NULL
);
4446 mono_coop_mutex_unlock (&log_profiler
.api_mutex
);
4449 ICALL_EXPORT MonoBoolean
4450 proflog_icall_GetGCRootEvents (void)
4452 return ENABLED (PROFLOG_GC_ROOT_EVENTS
);
4456 proflog_icall_SetGCRootEvents (MonoBoolean value
)
4458 mono_coop_mutex_lock (&log_profiler
.api_mutex
);
4461 ENABLE (PROFLOG_GC_ROOT_EVENTS
);
4463 DISABLE (PROFLOG_GC_ROOT_EVENTS
);
4465 mono_coop_mutex_unlock (&log_profiler
.api_mutex
);
4468 ICALL_EXPORT MonoBoolean
4469 proflog_icall_GetGCHandleEvents (void)
4471 return ENABLED (PROFLOG_GC_HANDLE_EVENTS
);
4475 proflog_icall_SetGCHandleEvents (MonoBoolean value
)
4477 mono_coop_mutex_lock (&log_profiler
.api_mutex
);
4480 ENABLE (PROFLOG_GC_HANDLE_EVENTS
);
4481 mono_profiler_set_gc_handle_created_callback (log_profiler
.handle
, gc_handle_created
);
4482 mono_profiler_set_gc_handle_deleted_callback (log_profiler
.handle
, gc_handle_deleted
);
4484 DISABLE (PROFLOG_GC_HANDLE_EVENTS
);
4485 mono_profiler_set_gc_handle_created_callback (log_profiler
.handle
, NULL
);
4486 mono_profiler_set_gc_handle_deleted_callback (log_profiler
.handle
, NULL
);
4489 mono_coop_mutex_unlock (&log_profiler
.api_mutex
);
4492 ICALL_EXPORT MonoBoolean
4493 proflog_icall_GetGCFinalizationEvents (void)
4495 return ENABLED (PROFLOG_GC_FINALIZATION_EVENTS
);
4499 proflog_icall_SetGCFinalizationEvents (MonoBoolean value
)
4501 mono_coop_mutex_lock (&log_profiler
.api_mutex
);
4504 ENABLE (PROFLOG_GC_FINALIZATION_EVENTS
);
4505 mono_profiler_set_gc_finalizing_callback (log_profiler
.handle
, finalize_begin
);
4506 mono_profiler_set_gc_finalizing_object_callback (log_profiler
.handle
, finalize_object_begin
);
4507 mono_profiler_set_gc_finalized_object_callback (log_profiler
.handle
, finalize_object_end
);
4509 DISABLE (PROFLOG_GC_FINALIZATION_EVENTS
);
4510 mono_profiler_set_gc_finalizing_callback (log_profiler
.handle
, NULL
);
4511 mono_profiler_set_gc_finalizing_object_callback (log_profiler
.handle
, NULL
);
4512 mono_profiler_set_gc_finalized_object_callback (log_profiler
.handle
, NULL
);
4515 mono_coop_mutex_unlock (&log_profiler
.api_mutex
);
4518 ICALL_EXPORT MonoBoolean
4519 proflog_icall_GetCounterEvents (void)
4521 return ENABLED (PROFLOG_COUNTER_EVENTS
);
4525 proflog_icall_SetCounterEvents (MonoBoolean value
)
4527 mono_coop_mutex_lock (&log_profiler
.api_mutex
);
4530 ENABLE (PROFLOG_COUNTER_EVENTS
);
4532 DISABLE (PROFLOG_COUNTER_EVENTS
);
4534 mono_coop_mutex_unlock (&log_profiler
.api_mutex
);
4537 ICALL_EXPORT MonoBoolean
4538 proflog_icall_GetJitEvents (void)
4540 return ENABLED (PROFLOG_JIT_EVENTS
);
4544 proflog_icall_SetJitEvents (MonoBoolean value
)
4546 mono_coop_mutex_lock (&log_profiler
.api_mutex
);
4549 ENABLE (PROFLOG_JIT_EVENTS
);
4550 mono_profiler_set_jit_code_buffer_callback (log_profiler
.handle
, code_buffer_new
);
4552 DISABLE (PROFLOG_JIT_EVENTS
);
4553 mono_profiler_set_jit_code_buffer_callback (log_profiler
.handle
, NULL
);
4556 mono_coop_mutex_unlock (&log_profiler
.api_mutex
);
4560 runtime_initialized (MonoProfiler
*profiler
)
4562 mono_atomic_store_i32 (&log_profiler
.runtime_inited
, 1);
4564 register_counter ("Sample events allocated", &sample_allocations_ctr
);
4565 register_counter ("Log buffers allocated", &buffer_allocations_ctr
);
4567 register_counter ("Event: Sync points", &sync_points_ctr
);
4568 register_counter ("Event: Heap objects", &heap_objects_ctr
);
4569 register_counter ("Event: Heap starts", &heap_starts_ctr
);
4570 register_counter ("Event: Heap ends", &heap_ends_ctr
);
4571 register_counter ("Event: Heap roots", &heap_roots_ctr
);
4572 register_counter ("Event: GC events", &gc_events_ctr
);
4573 register_counter ("Event: GC resizes", &gc_resizes_ctr
);
4574 register_counter ("Event: GC allocations", &gc_allocs_ctr
);
4575 register_counter ("Event: GC moves", &gc_moves_ctr
);
4576 register_counter ("Event: GC handle creations", &gc_handle_creations_ctr
);
4577 register_counter ("Event: GC handle deletions", &gc_handle_deletions_ctr
);
4578 register_counter ("Event: GC finalize starts", &finalize_begins_ctr
);
4579 register_counter ("Event: GC finalize ends", &finalize_ends_ctr
);
4580 register_counter ("Event: GC finalize object starts", &finalize_object_begins_ctr
);
4581 register_counter ("Event: GC finalize object ends", &finalize_object_ends_ctr
);
4582 register_counter ("Event: Image loads", &image_loads_ctr
);
4583 register_counter ("Event: Image unloads", &image_unloads_ctr
);
4584 register_counter ("Event: Assembly loads", &assembly_loads_ctr
);
4585 register_counter ("Event: Assembly unloads", &assembly_unloads_ctr
);
4586 register_counter ("Event: Class loads", &class_loads_ctr
);
4587 register_counter ("Event: VTable loads", &vtable_loads_ctr
);
4588 register_counter ("Event: Method entries", &method_entries_ctr
);
4589 register_counter ("Event: Method exits", &method_exits_ctr
);
4590 register_counter ("Event: Method exception leaves", &method_exception_exits_ctr
);
4591 register_counter ("Event: Method JITs", &method_jits_ctr
);
4592 register_counter ("Event: Code buffers", &code_buffers_ctr
);
4593 register_counter ("Event: Exception throws", &exception_throws_ctr
);
4594 register_counter ("Event: Exception clauses", &exception_clauses_ctr
);
4595 register_counter ("Event: Monitor events", &monitor_events_ctr
);
4596 register_counter ("Event: Thread starts", &thread_starts_ctr
);
4597 register_counter ("Event: Thread ends", &thread_ends_ctr
);
4598 register_counter ("Event: Thread names", &thread_names_ctr
);
4599 register_counter ("Event: Domain loads", &domain_loads_ctr
);
4600 register_counter ("Event: Domain unloads", &domain_unloads_ctr
);
4601 register_counter ("Event: Domain names", &domain_names_ctr
);
4602 register_counter ("Event: Context loads", &context_loads_ctr
);
4603 register_counter ("Event: Context unloads", &context_unloads_ctr
);
4604 register_counter ("Event: Sample binaries", &sample_ubins_ctr
);
4605 register_counter ("Event: Sample symbols", &sample_usyms_ctr
);
4606 register_counter ("Event: Sample hits", &sample_hits_ctr
);
4607 register_counter ("Event: Counter descriptors", &counter_descriptors_ctr
);
4608 register_counter ("Event: Counter samples", &counter_samples_ctr
);
4609 register_counter ("Event: Performance counter descriptors", &perfcounter_descriptors_ctr
);
4610 register_counter ("Event: Performance counter samples", &perfcounter_samples_ctr
);
4611 register_counter ("Event: Coverage methods", &coverage_methods_ctr
);
4612 register_counter ("Event: Coverage statements", &coverage_statements_ctr
);
4613 register_counter ("Event: Coverage classes", &coverage_classes_ctr
);
4614 register_counter ("Event: Coverage assemblies", &coverage_assemblies_ctr
);
4619 * We must start the helper thread before the writer thread. This is
4620 * because the helper thread sets up the command port which is written to
4621 * the log header by the writer thread.
4623 start_helper_thread ();
4624 start_writer_thread ();
4625 start_dumper_thread ();
4627 mono_coop_mutex_init (&log_profiler
.api_mutex
);
4629 #define ADD_ICALL(NAME) \
4630 mono_add_internal_call ("Mono.Profiler.Log.LogProfiler::" EGLIB_STRINGIFY (NAME), proflog_icall_ ## NAME);
4632 ADD_ICALL (GetMaxStackTraceFrames
);
4633 ADD_ICALL (GetStackTraceFrames
);
4634 ADD_ICALL (SetStackTraceFrames
);
4635 ADD_ICALL (GetHeapshotMode
);
4636 ADD_ICALL (SetHeapshotMode
);
4637 ADD_ICALL (GetHeapshotMillisecondsFrequency
);
4638 ADD_ICALL (SetHeapshotMillisecondsFrequency
);
4639 ADD_ICALL (GetHeapshotCollectionsFrequency
);
4640 ADD_ICALL (SetHeapshotCollectionsFrequency
);
4641 ADD_ICALL (TriggerHeapshot
);
4642 ADD_ICALL (GetCallDepth
);
4643 ADD_ICALL (SetCallDepth
);
4644 ADD_ICALL (GetSampleMode
);
4645 ADD_ICALL (SetSampleMode
);
4646 ADD_ICALL (GetExceptionEvents
);
4647 ADD_ICALL (SetExceptionEvents
);
4648 ADD_ICALL (GetMonitorEvents
);
4649 ADD_ICALL (SetMonitorEvents
);
4650 ADD_ICALL (GetGCEvents
);
4651 ADD_ICALL (SetGCEvents
);
4652 ADD_ICALL (GetGCAllocationEvents
);
4653 ADD_ICALL (SetGCAllocationEvents
);
4654 ADD_ICALL (GetGCMoveEvents
);
4655 ADD_ICALL (SetGCMoveEvents
);
4656 ADD_ICALL (GetGCRootEvents
);
4657 ADD_ICALL (SetGCRootEvents
);
4658 ADD_ICALL (GetGCHandleEvents
);
4659 ADD_ICALL (SetGCHandleEvents
);
4660 ADD_ICALL (GetGCFinalizationEvents
);
4661 ADD_ICALL (SetGCFinalizationEvents
);
4662 ADD_ICALL (GetCounterEvents
);
4663 ADD_ICALL (SetCounterEvents
);
4664 ADD_ICALL (GetJitEvents
);
4665 ADD_ICALL (SetJitEvents
);
4671 create_profiler (const char *args
, const char *filename
, GPtrArray
*filters
)
4675 log_profiler
.args
= pstrdup (args
);
4676 log_profiler
.command_port
= log_config
.command_port
;
4678 //If filename begin with +, append the pid at the end
4679 if (filename
&& *filename
== '+')
4680 filename
= g_strdup_printf ("%s.%d", filename
+ 1, getpid ());
4683 if (log_config
.do_report
)
4684 filename
= "|mprof-report -";
4686 filename
= "output.mlpd";
4687 nf
= (char*)filename
;
4689 nf
= new_filename (filename
);
4690 if (log_config
.do_report
) {
4691 int s
= strlen (nf
) + 32;
4692 char *p
= (char *) g_malloc (s
);
4693 snprintf (p
, s
, "|mprof-report '--out=%s' -", nf
);
4699 log_profiler
.file
= popen (nf
+ 1, "w");
4700 log_profiler
.pipe_output
= 1;
4701 } else if (*nf
== '#') {
4702 int fd
= strtol (nf
+ 1, NULL
, 10);
4703 log_profiler
.file
= fdopen (fd
, "a");
4705 log_profiler
.file
= fopen (nf
, "wb");
4707 if (!log_profiler
.file
) {
4708 mono_profiler_printf_err ("Could not create log profiler output file '%s': %s", nf
, g_strerror (errno
));
4712 #if defined (HAVE_SYS_ZLIB)
4713 if (log_config
.use_zip
)
4714 log_profiler
.gzfile
= gzdopen (fileno (log_profiler
.file
), "wb");
4717 // FIXME: We should free this stuff too.
4718 mono_lock_free_allocator_init_size_class (&log_profiler
.sample_size_class
, SAMPLE_SLOT_SIZE (log_config
.num_frames
), SAMPLE_BLOCK_SIZE
);
4719 mono_lock_free_allocator_init_allocator (&log_profiler
.sample_allocator
, &log_profiler
.sample_size_class
, MONO_MEM_ACCOUNT_PROFILER
);
4721 mono_lock_free_queue_init (&log_profiler
.sample_reuse_queue
);
4723 // FIXME: We should free this stuff too.
4724 mono_lock_free_allocator_init_size_class (&log_profiler
.writer_entry_size_class
, sizeof (WriterQueueEntry
), WRITER_ENTRY_BLOCK_SIZE
);
4725 mono_lock_free_allocator_init_allocator (&log_profiler
.writer_entry_allocator
, &log_profiler
.writer_entry_size_class
, MONO_MEM_ACCOUNT_PROFILER
);
4727 mono_lock_free_queue_init (&log_profiler
.writer_queue
);
4728 mono_os_sem_init (&log_profiler
.writer_queue_sem
, 0);
4730 mono_lock_free_queue_init (&log_profiler
.dumper_queue
);
4731 mono_os_sem_init (&log_profiler
.dumper_queue_sem
, 0);
4733 mono_os_mutex_init (&log_profiler
.method_table_mutex
);
4734 log_profiler
.method_table
= mono_conc_hashtable_new (NULL
, NULL
);
4736 if (log_config
.collect_coverage
)
4739 log_profiler
.coverage_filters
= filters
;
4741 log_profiler
.startup_time
= current_time ();
4745 mono_profiler_init_log (const char *desc
);
4748 mono_profiler_init_log (const char *desc
)
4751 * If you hit this assert while increasing MAX_FRAMES, you need to increase
4752 * SAMPLE_BLOCK_SIZE as well.
4754 g_assert (SAMPLE_SLOT_SIZE (MAX_FRAMES
) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (SAMPLE_BLOCK_SIZE
));
4755 g_assert (sizeof (WriterQueueEntry
) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (WRITER_ENTRY_BLOCK_SIZE
));
4757 GPtrArray
*filters
= NULL
;
4759 proflog_parse_args (&log_config
, desc
[3] == ':' ? desc
+ 4 : "");
4761 if (log_config
.cov_filter_files
) {
4762 filters
= g_ptr_array_new ();
4764 for (i
= 0; i
< log_config
.cov_filter_files
->len
; ++i
) {
4765 const char *name
= log_config
.cov_filter_files
->pdata
[i
];
4766 parse_cov_filter_file (filters
, name
);
4770 MonoProfilerHandle handle
= log_profiler
.handle
= mono_profiler_create (&log_profiler
);
4772 if (log_config
.enter_leave
)
4773 mono_profiler_set_call_instrumentation_filter_callback (handle
, method_filter
);
4776 * If the runtime was invoked for the purpose of AOT compilation only, the
4777 * only thing we want to do is install the call instrumentation filter.
4779 if (mono_jit_aot_compiling ())
4786 create_profiler (desc
, log_config
.output_filename
, filters
);
4788 mono_lls_init (&log_profiler
.profiler_thread_list
, NULL
);
4791 * Required callbacks. These are either necessary for the profiler itself
4792 * to function, or provide metadata that's needed if other events (e.g.
4793 * allocations, exceptions) are dynamically enabled/disabled.
4796 mono_profiler_set_runtime_shutdown_begin_callback (handle
, log_early_shutdown
);
4797 mono_profiler_set_runtime_shutdown_end_callback (handle
, log_shutdown
);
4798 mono_profiler_set_runtime_initialized_callback (handle
, runtime_initialized
);
4800 mono_profiler_set_gc_event2_callback (handle
, gc_event
);
4802 mono_profiler_set_thread_started_callback (handle
, thread_start
);
4803 mono_profiler_set_thread_exited_callback (handle
, thread_end
);
4804 mono_profiler_set_thread_name_callback (handle
, thread_name
);
4806 mono_profiler_set_domain_loaded_callback (handle
, domain_loaded
);
4807 mono_profiler_set_domain_unloading_callback (handle
, domain_unloaded
);
4808 mono_profiler_set_domain_name_callback (handle
, domain_name
);
4810 mono_profiler_set_context_loaded_callback (handle
, context_loaded
);
4811 mono_profiler_set_context_unloaded_callback (handle
, context_unloaded
);
4813 mono_profiler_set_assembly_loaded_callback (handle
, assembly_loaded
);
4814 mono_profiler_set_assembly_unloading_callback (handle
, assembly_unloaded
);
4816 mono_profiler_set_image_loaded_callback (handle
, image_loaded
);
4817 mono_profiler_set_image_unloading_callback (handle
, image_unloaded
);
4819 mono_profiler_set_class_loaded_callback (handle
, class_loaded
);
4821 mono_profiler_set_vtable_loaded_callback (handle
, vtable_loaded
);
4823 mono_profiler_set_jit_done_callback (handle
, method_jitted
);
4825 mono_profiler_set_gc_root_register_callback (handle
, gc_root_register
);
4826 mono_profiler_set_gc_root_unregister_callback (handle
, gc_root_deregister
);
4828 if (ENABLED (PROFLOG_EXCEPTION_EVENTS
)) {
4829 mono_profiler_set_exception_throw_callback (handle
, throw_exc
);
4830 mono_profiler_set_exception_clause_callback (handle
, clause_exc
);
4833 if (ENABLED (PROFLOG_MONITOR_EVENTS
)) {
4834 mono_profiler_set_monitor_contention_callback (handle
, monitor_contention
);
4835 mono_profiler_set_monitor_acquired_callback (handle
, monitor_acquired
);
4836 mono_profiler_set_monitor_failed_callback (handle
, monitor_failed
);
4839 if (ENABLED (PROFLOG_GC_EVENTS
))
4840 mono_profiler_set_gc_resize_callback (handle
, gc_resize
);
4842 if (ENABLED (PROFLOG_GC_ALLOCATION_EVENTS
))
4843 mono_profiler_set_gc_allocation_callback (handle
, gc_alloc
);
4845 if (ENABLED (PROFLOG_GC_MOVE_EVENTS
))
4846 mono_profiler_set_gc_moves_callback (handle
, gc_moves
);
4848 if (ENABLED (PROFLOG_GC_HANDLE_EVENTS
)) {
4849 mono_profiler_set_gc_handle_created_callback (handle
, gc_handle_created
);
4850 mono_profiler_set_gc_handle_deleted_callback (handle
, gc_handle_deleted
);
4853 if (ENABLED (PROFLOG_GC_FINALIZATION_EVENTS
)) {
4854 mono_profiler_set_gc_finalizing_callback (handle
, finalize_begin
);
4855 mono_profiler_set_gc_finalized_callback (handle
, finalize_end
);
4856 mono_profiler_set_gc_finalizing_object_callback (handle
, finalize_object_begin
);
4859 //On Demand heapshot uses the finalizer thread to force a collection and thus a heapshot
4860 mono_profiler_set_gc_finalized_callback (handle
, finalize_end
);
4862 if (ENABLED (PROFLOG_SAMPLE_EVENTS
))
4863 mono_profiler_set_sample_hit_callback (handle
, mono_sample_hit
);
4865 if (ENABLED (PROFLOG_JIT_EVENTS
))
4866 mono_profiler_set_jit_code_buffer_callback (handle
, code_buffer_new
);
4868 if (log_config
.enter_leave
) {
4869 mono_profiler_set_method_enter_callback (handle
, method_enter
);
4870 mono_profiler_set_method_leave_callback (handle
, method_leave
);
4871 mono_profiler_set_method_tail_call_callback (handle
, tail_call
);
4872 mono_profiler_set_method_exception_leave_callback (handle
, method_exc_leave
);
4875 if (log_config
.collect_coverage
) {
4876 mono_profiler_enable_coverage ();
4877 mono_profiler_set_coverage_filter_callback (handle
, coverage_filter
);
4880 mono_profiler_enable_allocations ();
4881 mono_profiler_enable_sampling (handle
);
4884 * If no sample option was given by the user, this just leaves the sampling
4885 * thread in idle mode. We do this even if no option was given so that we
4886 * can warn if another profiler controls sampling parameters.
4888 if (!mono_profiler_set_sample_mode (handle
, log_config
.sampling_mode
, log_config
.sample_freq
))
4889 mono_profiler_printf_err ("Another profiler controls sampling parameters; the log profiler will not be able to modify them.");