[profiler] Fixed GCResizeEvent value overflow (#6335)
[mono-project.git] / mono / profiler / log.c
blob15db47c203c09fbbf4357cd09265fa6a3d7fee94
1 /*
2 * log.c: mono log profiler
4 * Authors:
5 * Paolo Molaro (lupus@ximian.com)
6 * Alex Rønne Petersen (alexrp@xamarin.com)
8 * Copyright 2010 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
13 #include <config.h>
14 #include <mono/metadata/assembly.h>
15 #include <mono/metadata/class-internals.h>
16 #include <mono/metadata/debug-helpers.h>
17 #include <mono/metadata/loader.h>
18 #include <mono/metadata/metadata-internals.h>
19 #include <mono/metadata/mono-config.h>
20 #include <mono/metadata/mono-gc.h>
21 #include <mono/metadata/mono-perfcounters.h>
22 #include <mono/metadata/tabledefs.h>
23 #include <mono/mini/jit.h>
24 #include <mono/utils/atomic.h>
25 #include <mono/utils/hazard-pointer.h>
26 #include <mono/utils/lock-free-alloc.h>
27 #include <mono/utils/lock-free-queue.h>
28 #include <mono/utils/mono-conc-hashtable.h>
29 #include <mono/utils/mono-coop-mutex.h>
30 #include <mono/utils/mono-counters.h>
31 #include <mono/utils/mono-logger-internals.h>
32 #include <mono/utils/mono-linked-list-set.h>
33 #include <mono/utils/mono-membar.h>
34 #include <mono/utils/mono-mmap.h>
35 #include <mono/utils/mono-os-mutex.h>
36 #include <mono/utils/mono-os-semaphore.h>
37 #include <mono/utils/mono-threads.h>
38 #include <mono/utils/mono-threads-api.h>
39 #include "log.h"
41 #ifdef HAVE_DLFCN_H
42 #include <dlfcn.h>
43 #endif
44 #include <fcntl.h>
45 #ifdef HAVE_LINK_H
46 #include <link.h>
47 #endif
48 #ifdef HAVE_UNISTD_H
49 #include <unistd.h>
50 #endif
51 #if defined(__APPLE__)
52 #include <mach/mach_time.h>
53 #endif
54 #include <netinet/in.h>
55 #ifdef HAVE_SYS_MMAN_H
56 #include <sys/mman.h>
57 #endif
58 #include <sys/socket.h>
59 #if defined (HAVE_SYS_ZLIB)
60 #include <zlib.h>
61 #endif
63 // Statistics for internal profiler data structures.
64 static gint32 sample_allocations_ctr,
65 buffer_allocations_ctr;
67 // Statistics for profiler events.
68 static gint32 sync_points_ctr,
69 heap_objects_ctr,
70 heap_starts_ctr,
71 heap_ends_ctr,
72 heap_roots_ctr,
73 gc_events_ctr,
74 gc_resizes_ctr,
75 gc_allocs_ctr,
76 gc_moves_ctr,
77 gc_handle_creations_ctr,
78 gc_handle_deletions_ctr,
79 finalize_begins_ctr,
80 finalize_ends_ctr,
81 finalize_object_begins_ctr,
82 finalize_object_ends_ctr,
83 image_loads_ctr,
84 image_unloads_ctr,
85 assembly_loads_ctr,
86 assembly_unloads_ctr,
87 class_loads_ctr,
88 vtable_loads_ctr,
89 method_entries_ctr,
90 method_exits_ctr,
91 method_exception_exits_ctr,
92 method_jits_ctr,
93 code_buffers_ctr,
94 exception_throws_ctr,
95 exception_clauses_ctr,
96 monitor_events_ctr,
97 thread_starts_ctr,
98 thread_ends_ctr,
99 thread_names_ctr,
100 domain_loads_ctr,
101 domain_unloads_ctr,
102 domain_names_ctr,
103 context_loads_ctr,
104 context_unloads_ctr,
105 sample_ubins_ctr,
106 sample_usyms_ctr,
107 sample_hits_ctr,
108 counter_descriptors_ctr,
109 counter_samples_ctr,
110 perfcounter_descriptors_ctr,
111 perfcounter_samples_ctr,
112 coverage_methods_ctr,
113 coverage_statements_ctr,
114 coverage_classes_ctr,
115 coverage_assemblies_ctr;
117 // Pending data to be written to the log, for a single thread.
118 // Threads periodically flush their own LogBuffers by calling safe_send
119 typedef struct _LogBuffer LogBuffer;
120 struct _LogBuffer {
121 // Next (older) LogBuffer in processing queue
122 LogBuffer *next;
124 uint64_t time_base;
125 uint64_t last_time;
126 gboolean has_ptr_base;
127 uintptr_t ptr_base;
128 uintptr_t method_base;
129 uintptr_t last_method;
130 uintptr_t obj_base;
131 uintptr_t thread_id;
133 // Bytes allocated for this LogBuffer
134 int size;
136 // Start of currently unused space in buffer
137 unsigned char* cursor;
139 // Pointer to start-of-structure-plus-size (for convenience)
140 unsigned char* buf_end;
142 // Start of data in buffer. Contents follow "buffer format" described above.
143 unsigned char buf [1];
146 typedef struct {
147 MonoLinkedListSetNode node;
149 // Was this thread added to the LLS?
150 gboolean attached;
152 // The current log buffer for this thread.
153 LogBuffer *buffer;
155 // Methods referenced by events in `buffer`, see `MethodInfo`.
156 GPtrArray *methods;
158 // Current call depth for enter/leave events.
159 int call_depth;
161 // Indicates whether this thread is currently writing to its `buffer`.
162 gboolean busy;
164 // Has this thread written a thread end event to `buffer`?
165 gboolean ended;
167 // Stored in `buffer_lock_state` to take the exclusive lock.
168 int small_id;
169 } MonoProfilerThread;
171 // Default value in `profiler_tls` for new threads.
172 #define MONO_PROFILER_THREAD_ZERO ((MonoProfilerThread *) NULL)
174 // This is written to `profiler_tls` to indicate that a thread has stopped.
175 #define MONO_PROFILER_THREAD_DEAD ((MonoProfilerThread *) -1)
177 // Do not use these TLS macros directly unless you know what you're doing.
179 #ifdef HOST_WIN32
181 #define PROF_TLS_SET(VAL) (TlsSetValue (profiler_tls, (VAL)))
182 #define PROF_TLS_GET() ((MonoProfilerThread *) TlsGetValue (profiler_tls))
183 #define PROF_TLS_INIT() (profiler_tls = TlsAlloc ())
184 #define PROF_TLS_FREE() (TlsFree (profiler_tls))
186 static DWORD profiler_tls;
188 #elif HAVE_KW_THREAD
190 #define PROF_TLS_SET(VAL) (profiler_tls = (VAL))
191 #define PROF_TLS_GET() (profiler_tls)
192 #define PROF_TLS_INIT()
193 #define PROF_TLS_FREE()
195 static __thread MonoProfilerThread *profiler_tls;
197 #else
199 #define PROF_TLS_SET(VAL) (pthread_setspecific (profiler_tls, (VAL)))
200 #define PROF_TLS_GET() ((MonoProfilerThread *) pthread_getspecific (profiler_tls))
201 #define PROF_TLS_INIT() (pthread_key_create (&profiler_tls, NULL))
202 #define PROF_TLS_FREE() (pthread_key_delete (profiler_tls))
204 static pthread_key_t profiler_tls;
206 #endif
208 static uintptr_t
209 thread_id (void)
211 return (uintptr_t) mono_native_thread_id_get ();
214 static uintptr_t
215 process_id (void)
217 #ifdef HOST_WIN32
218 return (uintptr_t) GetCurrentProcessId ();
219 #else
220 return (uintptr_t) getpid ();
221 #endif
224 #define ENABLED(EVT) (!!(log_config.effective_mask & (EVT)))
225 #define ENABLE(EVT) do { log_config.effective_mask |= (EVT); } while (0)
226 #define DISABLE(EVT) do { log_config.effective_mask &= ~(EVT); } while (0)
229 * These macros should be used when writing an event to a log buffer. They
230 * take care of a bunch of stuff that can be repetitive and error-prone, such
231 * as attaching the current thread, acquiring/releasing the buffer lock,
232 * incrementing the event counter, expanding the log buffer, etc. They also
233 * create a scope so that it's harder to leak the LogBuffer pointer, which can
234 * be problematic as the pointer is unstable when the buffer lock isn't
235 * acquired.
237 * If the calling thread is already attached, these macros will not alter its
238 * attach mode (i.e. whether it's added to the LLS). If the thread is not
239 * attached, init_thread () will be called with add_to_lls = TRUE.
242 #define ENTER_LOG(COUNTER, BUFFER, SIZE) \
243 do { \
244 MonoProfilerThread *thread__ = get_thread (); \
245 g_assert (!thread__->busy && "Why are we trying to write a new event while already writing one?"); \
246 thread__->busy = TRUE; \
247 mono_atomic_inc_i32 ((COUNTER)); \
248 if (thread__->attached) \
249 buffer_lock (); \
250 LogBuffer *BUFFER = ensure_logbuf_unsafe (thread__, (SIZE))
252 #define EXIT_LOG_EXPLICIT(SEND) \
253 if ((SEND)) \
254 send_log_unsafe (TRUE); \
255 if (thread__->attached) \
256 buffer_unlock (); \
257 thread__->busy = FALSE; \
258 } while (0)
260 // Pass these to EXIT_LOG_EXPLICIT () for easier reading.
261 #define DO_SEND TRUE
262 #define NO_SEND FALSE
264 #define EXIT_LOG EXIT_LOG_EXPLICIT (DO_SEND)
266 typedef struct _BinaryObject BinaryObject;
267 struct _BinaryObject {
268 BinaryObject *next;
269 void *addr;
270 char *name;
273 typedef struct MonoCounterAgent {
274 MonoCounter *counter;
275 // MonoCounterAgent specific data :
276 void *value;
277 size_t value_size;
278 guint32 index;
279 gboolean emitted;
280 struct MonoCounterAgent *next;
281 } MonoCounterAgent;
283 typedef struct _PerfCounterAgent PerfCounterAgent;
284 struct _PerfCounterAgent {
285 PerfCounterAgent *next;
286 guint32 index;
287 char *category_name;
288 char *name;
289 gint64 value;
290 gboolean emitted;
291 gboolean updated;
292 gboolean deleted;
295 struct _MonoProfiler {
296 MonoProfilerHandle handle;
298 FILE* file;
299 #if defined (HAVE_SYS_ZLIB)
300 gzFile gzfile;
301 #endif
303 char *args;
304 uint64_t startup_time;
305 int timer_overhead;
307 #ifdef __APPLE__
308 mach_timebase_info_data_t timebase_info;
309 #elif defined (HOST_WIN32)
310 LARGE_INTEGER pcounter_freq;
311 #endif
313 int pipe_output;
314 int command_port;
315 int server_socket;
316 int pipes [2];
318 MonoLinkedListSet profiler_thread_list;
319 volatile gint32 buffer_lock_state;
320 volatile gint32 buffer_lock_exclusive_intent;
322 volatile gint32 runtime_inited;
323 volatile gint32 in_shutdown;
325 MonoNativeThreadId helper_thread;
327 MonoNativeThreadId writer_thread;
328 volatile gint32 run_writer_thread;
329 MonoLockFreeQueue writer_queue;
330 MonoSemType writer_queue_sem;
332 MonoLockFreeAllocSizeClass writer_entry_size_class;
333 MonoLockFreeAllocator writer_entry_allocator;
335 MonoConcurrentHashTable *method_table;
336 mono_mutex_t method_table_mutex;
338 MonoNativeThreadId dumper_thread;
339 volatile gint32 run_dumper_thread;
340 MonoLockFreeQueue dumper_queue;
341 MonoSemType dumper_queue_sem;
343 MonoLockFreeAllocSizeClass sample_size_class;
344 MonoLockFreeAllocator sample_allocator;
345 MonoLockFreeQueue sample_reuse_queue;
347 BinaryObject *binary_objects;
349 volatile gint32 heapshot_requested;
350 guint64 gc_count;
351 guint64 last_hs_time;
352 gboolean do_heap_walk;
354 mono_mutex_t counters_mutex;
355 MonoCounterAgent *counters;
356 PerfCounterAgent *perfcounters;
357 guint32 counters_index;
359 mono_mutex_t coverage_mutex;
360 GPtrArray *coverage_data;
362 GPtrArray *coverage_filters;
363 MonoConcurrentHashTable *coverage_filtered_classes;
364 MonoConcurrentHashTable *coverage_suppressed_assemblies;
366 MonoConcurrentHashTable *coverage_methods;
367 MonoConcurrentHashTable *coverage_assemblies;
368 MonoConcurrentHashTable *coverage_classes;
370 MonoConcurrentHashTable *coverage_image_to_methods;
372 guint32 coverage_previous_offset;
373 guint32 coverage_method_id;
375 MonoCoopMutex api_mutex;
378 static ProfilerConfig log_config;
379 static struct _MonoProfiler log_profiler;
381 typedef struct {
382 MonoLockFreeQueueNode node;
383 GPtrArray *methods;
384 LogBuffer *buffer;
385 } WriterQueueEntry;
387 #define WRITER_ENTRY_BLOCK_SIZE (mono_pagesize ())
389 typedef struct {
390 MonoMethod *method;
391 MonoJitInfo *ji;
392 uint64_t time;
393 } MethodInfo;
395 #define TICKS_PER_SEC 1000000000LL
397 static uint64_t
398 current_time (void)
400 #ifdef __APPLE__
401 uint64_t time = mach_absolute_time ();
403 time *= log_profiler.timebase_info.numer;
404 time /= log_profiler.timebase_info.denom;
406 return time;
407 #elif defined (HOST_WIN32)
408 LARGE_INTEGER value;
410 QueryPerformanceCounter (&value);
412 return value.QuadPart * TICKS_PER_SEC / log_profiler.pcounter_freq.QuadPart;
413 #elif defined (CLOCK_MONOTONIC)
414 struct timespec tspec;
416 clock_gettime (CLOCK_MONOTONIC, &tspec);
418 return ((uint64_t) tspec.tv_sec * TICKS_PER_SEC + tspec.tv_nsec);
419 #else
420 struct timeval tv;
422 gettimeofday (&tv, NULL);
424 return ((uint64_t) tv.tv_sec * TICKS_PER_SEC + tv.tv_usec * 1000);
425 #endif
428 static void
429 init_time (void)
431 #ifdef __APPLE__
432 mach_timebase_info (&log_profiler.timebase_info);
433 #elif defined (HOST_WIN32)
434 QueryPerformanceFrequency (&log_profiler.pcounter_freq);
435 #endif
437 uint64_t time_start = current_time ();
439 for (int i = 0; i < 256; ++i)
440 current_time ();
442 uint64_t time_end = current_time ();
444 log_profiler.timer_overhead = (time_end - time_start) / 256;
447 static char*
448 pstrdup (const char *s)
450 int len = strlen (s) + 1;
451 char *p = (char *) g_malloc (len);
452 memcpy (p, s, len);
453 return p;
456 #define BUFFER_SIZE (4096 * 16)
458 /* Worst-case size in bytes of a 64-bit value encoded with LEB128. */
459 #define LEB128_SIZE 10
461 /* Size of a value encoded as a single byte. */
462 #undef BYTE_SIZE // mach/i386/vm_param.h on OS X defines this to 8, but it isn't used for anything.
463 #define BYTE_SIZE 1
465 /* Size in bytes of the event prefix (ID + time). */
466 #define EVENT_SIZE (BYTE_SIZE + LEB128_SIZE)
468 static void *
469 alloc_buffer (int size)
471 return mono_valloc (NULL, size, MONO_MMAP_READ | MONO_MMAP_WRITE | MONO_MMAP_ANON | MONO_MMAP_PRIVATE, MONO_MEM_ACCOUNT_PROFILER);
474 static void
475 free_buffer (void *buf, int size)
477 mono_vfree (buf, size, MONO_MEM_ACCOUNT_PROFILER);
480 static LogBuffer*
481 create_buffer (uintptr_t tid, int bytes)
483 LogBuffer* buf = (LogBuffer *) alloc_buffer (MAX (BUFFER_SIZE, bytes));
485 mono_atomic_inc_i32 (&buffer_allocations_ctr);
487 buf->size = BUFFER_SIZE;
488 buf->time_base = current_time ();
489 buf->last_time = buf->time_base;
490 buf->buf_end = (unsigned char *) buf + buf->size;
491 buf->cursor = buf->buf;
492 buf->thread_id = tid;
494 return buf;
498 * Must be called with the reader lock held if thread is the current thread, or
499 * the exclusive lock if thread is a different thread. However, if thread is
500 * the current thread, and init_thread () was called with add_to_lls = FALSE,
501 * then no locking is necessary.
503 static void
504 init_buffer_state (MonoProfilerThread *thread)
506 thread->buffer = create_buffer (thread->node.key, 0);
507 thread->methods = NULL;
510 static void
511 clear_hazard_pointers (MonoThreadHazardPointers *hp)
513 mono_hazard_pointer_clear (hp, 0);
514 mono_hazard_pointer_clear (hp, 1);
515 mono_hazard_pointer_clear (hp, 2);
518 static MonoProfilerThread *
519 init_thread (gboolean add_to_lls)
521 MonoProfilerThread *thread = PROF_TLS_GET ();
523 g_assert (thread != MONO_PROFILER_THREAD_DEAD && "Why are we trying to resurrect a stopped thread?");
526 * Sometimes we may try to initialize a thread twice. One example is the
527 * main thread: We initialize it when setting up the profiler, but we will
528 * also get a thread_start () callback for it. Another example is when
529 * attaching new threads to the runtime: We may get a gc_alloc () callback
530 * for that thread's thread object (where we initialize it), soon followed
531 * by a thread_start () callback.
533 * These cases are harmless anyhow. Just return if we've already done the
534 * initialization work.
536 if (thread != MONO_PROFILER_THREAD_ZERO)
537 return thread;
539 thread = g_malloc (sizeof (MonoProfilerThread));
540 thread->node.key = thread_id ();
541 thread->attached = add_to_lls;
542 thread->call_depth = 0;
543 thread->busy = FALSE;
544 thread->ended = FALSE;
546 init_buffer_state (thread);
548 thread->small_id = mono_thread_info_register_small_id ();
551 * Some internal profiler threads don't need to be cleaned up
552 * by the main thread on shutdown.
554 if (add_to_lls) {
555 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
556 g_assert (mono_lls_insert (&log_profiler.profiler_thread_list, hp, &thread->node) && "Why can't we insert the thread in the LLS?");
557 clear_hazard_pointers (hp);
560 PROF_TLS_SET (thread);
562 return thread;
565 // Only valid if init_thread () was called with add_to_lls = FALSE.
566 static void
567 deinit_thread (MonoProfilerThread *thread)
569 g_assert (!thread->attached && "Why are we manually freeing an attached thread?");
571 g_free (thread);
572 PROF_TLS_SET (MONO_PROFILER_THREAD_DEAD);
575 static MonoProfilerThread *
576 get_thread (void)
578 return init_thread (TRUE);
581 // Only valid if init_thread () was called with add_to_lls = FALSE.
582 static LogBuffer *
583 ensure_logbuf_unsafe (MonoProfilerThread *thread, int bytes)
585 LogBuffer *old = thread->buffer;
587 if (old->cursor + bytes < old->buf_end)
588 return old;
590 LogBuffer *new_ = create_buffer (thread->node.key, bytes);
591 new_->next = old;
592 thread->buffer = new_;
594 return new_;
598 * This is a reader/writer spin lock of sorts used to protect log buffers.
599 * When a thread modifies its own log buffer, it increments the reader
600 * count. When a thread wants to access log buffers of other threads, it
601 * takes the exclusive lock.
603 * `buffer_lock_state` holds the reader count in its lower 16 bits, and
604 * the small ID of the thread currently holding the exclusive (writer)
605 * lock in its upper 16 bits. Both can be zero. It's important that the
606 * whole lock state is a single word that can be read/written atomically
607 * to avoid race conditions where there could end up being readers while
608 * the writer lock is held.
610 * The lock is writer-biased. When a thread wants to take the exclusive
611 * lock, it increments `buffer_lock_exclusive_intent` which will make new
612 * readers spin until it's back to zero, then takes the exclusive lock
613 * once the reader count has reached zero. After releasing the exclusive
614 * lock, it decrements `buffer_lock_exclusive_intent`, which, when it
615 * reaches zero again, allows readers to increment the reader count.
617 * The writer bias is necessary because we take the exclusive lock in
618 * `gc_event ()` during STW. If the writer bias was not there, and a
619 * program had a large number of threads, STW-induced pauses could be
620 * significantly longer than they have to be. Also, we emit periodic
621 * sync points from the helper thread, which requires taking the
622 * exclusive lock, and we need those to arrive with a reasonably
623 * consistent frequency so that readers don't have to queue up too many
624 * events between sync points.
626 * The lock does not support recursion.
629 static void
630 buffer_lock (void)
633 * If the thread holding the exclusive lock tries to modify the
634 * reader count, just make it a no-op. This way, we also avoid
635 * invoking the GC safe point macros below, which could break if
636 * done from a thread that is currently the initiator of STW.
638 * In other words, we rely on the fact that the GC thread takes
639 * the exclusive lock in the gc_event () callback when the world
640 * is about to stop.
642 if (mono_atomic_load_i32 (&log_profiler.buffer_lock_state) != get_thread ()->small_id << 16) {
643 MONO_ENTER_GC_SAFE;
645 gint32 old, new_;
647 do {
648 restart:
649 // Hold off if a thread wants to take the exclusive lock.
650 while (mono_atomic_load_i32 (&log_profiler.buffer_lock_exclusive_intent))
651 mono_thread_info_yield ();
653 old = mono_atomic_load_i32 (&log_profiler.buffer_lock_state);
655 // Is a thread holding the exclusive lock?
656 if (old >> 16) {
657 mono_thread_info_yield ();
658 goto restart;
661 new_ = old + 1;
662 } while (mono_atomic_cas_i32 (&log_profiler.buffer_lock_state, new_, old) != old);
664 MONO_EXIT_GC_SAFE;
667 mono_memory_barrier ();
670 static void
671 buffer_unlock (void)
673 mono_memory_barrier ();
675 gint32 state = mono_atomic_load_i32 (&log_profiler.buffer_lock_state);
677 // See the comment in buffer_lock ().
678 if (state == get_thread ()->small_id << 16)
679 return;
681 g_assert (state && "Why are we decrementing a zero reader count?");
682 g_assert (!(state >> 16) && "Why is the exclusive lock held?");
684 mono_atomic_dec_i32 (&log_profiler.buffer_lock_state);
687 static void
688 buffer_lock_excl (void)
690 gint32 new_ = get_thread ()->small_id << 16;
692 g_assert (mono_atomic_load_i32 (&log_profiler.buffer_lock_state) != new_ && "Why are we taking the exclusive lock twice?");
694 mono_atomic_inc_i32 (&log_profiler.buffer_lock_exclusive_intent);
696 MONO_ENTER_GC_SAFE;
698 while (mono_atomic_cas_i32 (&log_profiler.buffer_lock_state, new_, 0))
699 mono_thread_info_yield ();
701 MONO_EXIT_GC_SAFE;
703 mono_memory_barrier ();
706 static void
707 buffer_unlock_excl (void)
709 mono_memory_barrier ();
711 gint32 state = mono_atomic_load_i32 (&log_profiler.buffer_lock_state);
712 gint32 excl = state >> 16;
714 g_assert (excl && "Why is the exclusive lock not held?");
715 g_assert (excl == get_thread ()->small_id && "Why does another thread hold the exclusive lock?");
716 g_assert (!(state & 0xFFFF) && "Why are there readers when the exclusive lock is held?");
718 mono_atomic_store_i32 (&log_profiler.buffer_lock_state, 0);
719 mono_atomic_dec_i32 (&log_profiler.buffer_lock_exclusive_intent);
722 static void
723 encode_uleb128 (uint64_t value, uint8_t *buf, uint8_t **endbuf)
725 uint8_t *p = buf;
727 do {
728 uint8_t b = value & 0x7f;
729 value >>= 7;
731 if (value != 0) /* more bytes to come */
732 b |= 0x80;
734 *p ++ = b;
735 } while (value);
737 *endbuf = p;
740 static void
741 encode_sleb128 (intptr_t value, uint8_t *buf, uint8_t **endbuf)
743 int more = 1;
744 int negative = (value < 0);
745 unsigned int size = sizeof (intptr_t) * 8;
746 uint8_t byte;
747 uint8_t *p = buf;
749 while (more) {
750 byte = value & 0x7f;
751 value >>= 7;
753 /* the following is unnecessary if the
754 * implementation of >>= uses an arithmetic rather
755 * than logical shift for a signed left operand
757 if (negative)
758 /* sign extend */
759 value |= - ((intptr_t) 1 <<(size - 7));
761 /* sign bit of byte is second high order bit (0x40) */
762 if ((value == 0 && !(byte & 0x40)) ||
763 (value == -1 && (byte & 0x40)))
764 more = 0;
765 else
766 byte |= 0x80;
768 *p ++= byte;
771 *endbuf = p;
774 static void
775 emit_byte (LogBuffer *logbuffer, int value)
777 logbuffer->cursor [0] = value;
778 logbuffer->cursor++;
780 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
783 static void
784 emit_value (LogBuffer *logbuffer, int value)
786 encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
788 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
791 static void
792 emit_time (LogBuffer *logbuffer, uint64_t value)
794 uint64_t tdiff = value - logbuffer->last_time;
795 encode_uleb128 (tdiff, logbuffer->cursor, &logbuffer->cursor);
796 logbuffer->last_time = value;
798 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
801 static void
802 emit_event_time (LogBuffer *logbuffer, int event, uint64_t time)
804 emit_byte (logbuffer, event);
805 emit_time (logbuffer, time);
808 static void
809 emit_event (LogBuffer *logbuffer, int event)
811 emit_event_time (logbuffer, event, current_time ());
814 static void
815 emit_svalue (LogBuffer *logbuffer, int64_t value)
817 encode_sleb128 (value, logbuffer->cursor, &logbuffer->cursor);
819 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
822 static void
823 emit_uvalue (LogBuffer *logbuffer, uint64_t value)
825 encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
827 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
830 static void
831 emit_ptr (LogBuffer *logbuffer, const void *ptr)
833 if (!logbuffer->has_ptr_base) {
834 logbuffer->ptr_base = (uintptr_t) ptr;
835 logbuffer->has_ptr_base = TRUE;
838 emit_svalue (logbuffer, (intptr_t) ptr - logbuffer->ptr_base);
840 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
843 static void
844 emit_method_inner (LogBuffer *logbuffer, void *method)
846 if (!logbuffer->method_base) {
847 logbuffer->method_base = (intptr_t) method;
848 logbuffer->last_method = (intptr_t) method;
851 encode_sleb128 ((intptr_t) ((char *) method - (char *) logbuffer->last_method), logbuffer->cursor, &logbuffer->cursor);
852 logbuffer->last_method = (intptr_t) method;
854 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
857 // The reader lock must be held.
858 static void
859 register_method_local (MonoMethod *method, MonoJitInfo *ji)
861 MonoProfilerThread *thread = get_thread ();
863 if (!mono_conc_hashtable_lookup (log_profiler.method_table, method)) {
864 MethodInfo *info = (MethodInfo *) g_malloc (sizeof (MethodInfo));
866 info->method = method;
867 info->ji = ji;
868 info->time = current_time ();
870 GPtrArray *arr = thread->methods ? thread->methods : (thread->methods = g_ptr_array_new ());
871 g_ptr_array_add (arr, info);
875 static void
876 emit_method (LogBuffer *logbuffer, MonoMethod *method)
878 register_method_local (method, NULL);
879 emit_method_inner (logbuffer, method);
882 static void
883 emit_obj (LogBuffer *logbuffer, void *ptr)
885 if (!logbuffer->obj_base)
886 logbuffer->obj_base = (uintptr_t) ptr >> 3;
888 emit_svalue (logbuffer, ((uintptr_t) ptr >> 3) - logbuffer->obj_base);
890 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
893 static void
894 emit_string (LogBuffer *logbuffer, const char *str, size_t size)
896 size_t i = 0;
897 if (str) {
898 for (; i < size; i++) {
899 if (str[i] == '\0')
900 break;
901 emit_byte (logbuffer, str [i]);
904 emit_byte (logbuffer, '\0');
907 static void
908 emit_double (LogBuffer *logbuffer, double value)
910 int i;
911 unsigned char buffer[8];
912 memcpy (buffer, &value, 8);
913 #if G_BYTE_ORDER == G_BIG_ENDIAN
914 for (i = 7; i >= 0; i--)
915 #else
916 for (i = 0; i < 8; i++)
917 #endif
918 emit_byte (logbuffer, buffer[i]);
921 static char*
922 write_int16 (char *buf, int32_t value)
924 int i;
925 for (i = 0; i < 2; ++i) {
926 buf [i] = value;
927 value >>= 8;
929 return buf + 2;
932 static char*
933 write_int32 (char *buf, int32_t value)
935 int i;
936 for (i = 0; i < 4; ++i) {
937 buf [i] = value;
938 value >>= 8;
940 return buf + 4;
943 static char*
944 write_int64 (char *buf, int64_t value)
946 int i;
947 for (i = 0; i < 8; ++i) {
948 buf [i] = value;
949 value >>= 8;
951 return buf + 8;
954 static char *
955 write_header_string (char *p, const char *str)
957 size_t len = strlen (str) + 1;
959 p = write_int32 (p, len);
960 strcpy (p, str);
962 return p + len;
965 static void
966 dump_header (void)
968 const char *args = log_profiler.args;
969 const char *arch = mono_config_get_cpu ();
970 const char *os = mono_config_get_os ();
972 char *hbuf = g_malloc (
973 sizeof (gint32) /* header id */ +
974 sizeof (gint8) /* major version */ +
975 sizeof (gint8) /* minor version */ +
976 sizeof (gint8) /* data version */ +
977 sizeof (gint8) /* word size */ +
978 sizeof (gint64) /* startup time */ +
979 sizeof (gint32) /* timer overhead */ +
980 sizeof (gint32) /* flags */ +
981 sizeof (gint32) /* process id */ +
982 sizeof (gint16) /* command port */ +
983 sizeof (gint32) + strlen (args) + 1 /* arguments */ +
984 sizeof (gint32) + strlen (arch) + 1 /* architecture */ +
985 sizeof (gint32) + strlen (os) + 1 /* operating system */
987 char *p = hbuf;
989 p = write_int32 (p, LOG_HEADER_ID);
990 *p++ = LOG_VERSION_MAJOR;
991 *p++ = LOG_VERSION_MINOR;
992 *p++ = LOG_DATA_VERSION;
993 *p++ = sizeof (void *);
994 p = write_int64 (p, ((uint64_t) time (NULL)) * 1000);
995 p = write_int32 (p, log_profiler.timer_overhead);
996 p = write_int32 (p, 0); /* flags */
997 p = write_int32 (p, process_id ());
998 p = write_int16 (p, log_profiler.command_port);
999 p = write_header_string (p, args);
1000 p = write_header_string (p, arch);
1001 p = write_header_string (p, os);
1003 #if defined (HAVE_SYS_ZLIB)
1004 if (log_profiler.gzfile) {
1005 gzwrite (log_profiler.gzfile, hbuf, p - hbuf);
1006 } else
1007 #endif
1009 fwrite (hbuf, p - hbuf, 1, log_profiler.file);
1010 fflush (log_profiler.file);
1013 g_free (hbuf);
1017 * Must be called with the reader lock held if thread is the current thread, or
1018 * the exclusive lock if thread is a different thread. However, if thread is
1019 * the current thread, and init_thread () was called with add_to_lls = FALSE,
1020 * then no locking is necessary.
1022 static void
1023 send_buffer (MonoProfilerThread *thread)
1025 WriterQueueEntry *entry = mono_lock_free_alloc (&log_profiler.writer_entry_allocator);
1026 entry->methods = thread->methods;
1027 entry->buffer = thread->buffer;
1029 mono_lock_free_queue_node_init (&entry->node, FALSE);
1031 mono_lock_free_queue_enqueue (&log_profiler.writer_queue, &entry->node);
1032 mono_os_sem_post (&log_profiler.writer_queue_sem);
1035 static void
1036 free_thread (gpointer p)
1038 MonoProfilerThread *thread = p;
1040 if (!thread->ended) {
1042 * The thread is being cleaned up by the main thread during
1043 * shutdown. This typically happens for internal runtime
1044 * threads. We need to synthesize a thread end event.
1047 mono_atomic_inc_i32 (&thread_ends_ctr);
1049 LogBuffer *buf = ensure_logbuf_unsafe (thread,
1050 EVENT_SIZE /* event */ +
1051 BYTE_SIZE /* type */ +
1052 LEB128_SIZE /* tid */
1055 emit_event (buf, TYPE_END_UNLOAD | TYPE_METADATA);
1056 emit_byte (buf, TYPE_THREAD);
1057 emit_ptr (buf, (void *) thread->node.key);
1060 send_buffer (thread);
1062 g_free (thread);
1065 static void
1066 remove_thread (MonoProfilerThread *thread)
1068 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
1070 if (mono_lls_remove (&log_profiler.profiler_thread_list, hp, &thread->node))
1071 mono_thread_hazardous_try_free (thread, free_thread);
1073 clear_hazard_pointers (hp);
1076 static void
1077 dump_buffer (LogBuffer *buf)
1079 char hbuf [128];
1080 char *p = hbuf;
1082 if (buf->next)
1083 dump_buffer (buf->next);
1085 if (buf->cursor - buf->buf) {
1086 p = write_int32 (p, BUF_ID);
1087 p = write_int32 (p, buf->cursor - buf->buf);
1088 p = write_int64 (p, buf->time_base);
1089 p = write_int64 (p, buf->ptr_base);
1090 p = write_int64 (p, buf->obj_base);
1091 p = write_int64 (p, buf->thread_id);
1092 p = write_int64 (p, buf->method_base);
1094 #if defined (HAVE_SYS_ZLIB)
1095 if (log_profiler.gzfile) {
1096 gzwrite (log_profiler.gzfile, hbuf, p - hbuf);
1097 gzwrite (log_profiler.gzfile, buf->buf, buf->cursor - buf->buf);
1098 } else
1099 #endif
1101 fwrite (hbuf, p - hbuf, 1, log_profiler.file);
1102 fwrite (buf->buf, buf->cursor - buf->buf, 1, log_profiler.file);
1103 fflush (log_profiler.file);
1107 free_buffer (buf, buf->size);
1110 static void
1111 dump_buffer_threadless (LogBuffer *buf)
1113 for (LogBuffer *iter = buf; iter; iter = iter->next)
1114 iter->thread_id = 0;
1116 dump_buffer (buf);
1119 // Only valid if init_thread () was called with add_to_lls = FALSE.
1120 static void
1121 send_log_unsafe (gboolean if_needed)
1123 MonoProfilerThread *thread = get_thread ();
1125 if (!if_needed || (if_needed && thread->buffer->next)) {
1126 if (!thread->attached)
1127 for (LogBuffer *iter = thread->buffer; iter; iter = iter->next)
1128 iter->thread_id = 0;
1130 send_buffer (thread);
1131 init_buffer_state (thread);
1135 // Assumes that the exclusive lock is held.
1136 static void
1137 sync_point_flush (void)
1139 g_assert (mono_atomic_load_i32 (&log_profiler.buffer_lock_state) == get_thread ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1141 MONO_LLS_FOREACH_SAFE (&log_profiler.profiler_thread_list, MonoProfilerThread, thread) {
1142 g_assert (thread->attached && "Why is a thread in the LLS not attached?");
1144 send_buffer (thread);
1145 init_buffer_state (thread);
1146 } MONO_LLS_FOREACH_SAFE_END
1149 // Assumes that the exclusive lock is held.
1150 static void
1151 sync_point_mark (MonoProfilerSyncPointType type)
1153 g_assert (mono_atomic_load_i32 (&log_profiler.buffer_lock_state) == get_thread ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1155 ENTER_LOG (&sync_points_ctr, logbuffer,
1156 EVENT_SIZE /* event */ +
1157 BYTE_SIZE /* type */
1160 emit_event (logbuffer, TYPE_META | TYPE_SYNC_POINT);
1161 emit_byte (logbuffer, type);
1163 EXIT_LOG_EXPLICIT (NO_SEND);
1165 send_log_unsafe (FALSE);
1168 // Assumes that the exclusive lock is held.
1169 static void
1170 sync_point (MonoProfilerSyncPointType type)
1172 sync_point_flush ();
1173 sync_point_mark (type);
1176 static int
1177 gc_reference (MonoObject *obj, MonoClass *klass, uintptr_t size, uintptr_t num, MonoObject **refs, uintptr_t *offsets, void *data)
1179 /* account for object alignment in the heap */
1180 size += 7;
1181 size &= ~7;
1183 ENTER_LOG (&heap_objects_ctr, logbuffer,
1184 EVENT_SIZE /* event */ +
1185 LEB128_SIZE /* obj */ +
1186 LEB128_SIZE /* vtable */ +
1187 LEB128_SIZE /* size */ +
1188 LEB128_SIZE /* num */ +
1189 num * (
1190 LEB128_SIZE /* offset */ +
1191 LEB128_SIZE /* ref */
1195 emit_event (logbuffer, TYPE_HEAP_OBJECT | TYPE_HEAP);
1196 emit_obj (logbuffer, obj);
1197 emit_ptr (logbuffer, mono_object_get_vtable (obj));
1198 emit_value (logbuffer, size);
1199 emit_value (logbuffer, num);
1201 uintptr_t last_offset = 0;
1203 for (int i = 0; i < num; ++i) {
1204 emit_value (logbuffer, offsets [i] - last_offset);
1205 last_offset = offsets [i];
1206 emit_obj (logbuffer, refs [i]);
1209 EXIT_LOG;
1211 return 0;
1214 static void
1215 gc_roots (MonoProfiler *prof, uint64_t num, const mono_byte *const *addresses, MonoObject *const *objects)
1217 ENTER_LOG (&heap_roots_ctr, logbuffer,
1218 EVENT_SIZE /* event */ +
1219 LEB128_SIZE /* num */ +
1220 num * (
1221 LEB128_SIZE /* address */ +
1222 LEB128_SIZE /* object */
1226 emit_event (logbuffer, TYPE_HEAP_ROOT | TYPE_HEAP);
1227 emit_value (logbuffer, num);
1229 for (int i = 0; i < num; ++i) {
1230 emit_ptr (logbuffer, addresses [i]);
1231 emit_obj (logbuffer, objects [i]);
1234 EXIT_LOG;
1237 static void
1238 gc_root_register (MonoProfiler *prof, const mono_byte *start, size_t size, MonoGCRootSource source, const void *key, const char *name)
1240 // We don't write raw domain/context pointers in metadata events.
1241 switch (source) {
1242 case MONO_ROOT_SOURCE_DOMAIN:
1243 if (key)
1244 key = (void *)(uintptr_t) mono_domain_get_id ((MonoDomain *) key);
1245 break;
1246 case MONO_ROOT_SOURCE_CONTEXT_STATIC:
1247 key = (void *)(uintptr_t) mono_context_get_id ((MonoAppContext *) key);
1248 break;
1249 default:
1250 break;
1253 int name_len = name ? strlen (name) + 1 : 0;
1255 ENTER_LOG (&heap_roots_ctr, logbuffer,
1256 EVENT_SIZE /* event */ +
1257 LEB128_SIZE /* start */ +
1258 LEB128_SIZE /* size */ +
1259 BYTE_SIZE /* source */ +
1260 LEB128_SIZE /* key */ +
1261 name_len /* name */
1264 emit_event (logbuffer, TYPE_HEAP_ROOT_REGISTER | TYPE_HEAP);
1265 emit_ptr (logbuffer, start);
1266 emit_uvalue (logbuffer, size);
1267 emit_byte (logbuffer, source);
1268 emit_ptr (logbuffer, key);
1269 emit_string (logbuffer, name, name_len);
1271 EXIT_LOG;
1274 static void
1275 gc_root_deregister (MonoProfiler *prof, const mono_byte *start)
1277 ENTER_LOG (&heap_roots_ctr, logbuffer,
1278 EVENT_SIZE /* event */ +
1279 LEB128_SIZE /* start */
1282 emit_event (logbuffer, TYPE_HEAP_ROOT_UNREGISTER | TYPE_HEAP);
1283 emit_ptr (logbuffer, start);
1285 EXIT_LOG;
1288 static void
1289 trigger_heapshot (void)
1291 // Rely on the finalization callback triggering a GC.
1292 mono_atomic_store_i32 (&log_profiler.heapshot_requested, 1);
1293 mono_gc_finalize_notify ();
1296 static void
1297 process_heapshot (void)
1299 if (mono_atomic_load_i32 (&log_profiler.heapshot_requested))
1300 mono_gc_collect (mono_gc_max_generation ());
1303 #define ALL_GC_EVENTS_MASK (PROFLOG_GC_EVENTS | PROFLOG_GC_MOVE_EVENTS | PROFLOG_GC_ROOT_EVENTS)
1305 static void
1306 gc_event (MonoProfiler *profiler, MonoProfilerGCEvent ev, uint32_t generation, gboolean is_serial)
1308 gboolean is_major = generation == mono_gc_max_generation ();
1310 if (ENABLED (PROFLOG_GC_EVENTS)) {
1311 ENTER_LOG (&gc_events_ctr, logbuffer,
1312 EVENT_SIZE /* event */ +
1313 BYTE_SIZE /* gc event */ +
1314 BYTE_SIZE /* generation */
1317 emit_event (logbuffer, TYPE_GC_EVENT | TYPE_GC);
1318 emit_byte (logbuffer, ev);
1319 emit_byte (logbuffer, generation);
1321 EXIT_LOG;
1324 switch (ev) {
1325 case MONO_GC_EVENT_PRE_STOP_WORLD_LOCKED:
1326 switch (log_config.hs_mode) {
1327 case MONO_PROFILER_HEAPSHOT_NONE:
1328 log_profiler.do_heap_walk = FALSE;
1329 break;
1330 case MONO_PROFILER_HEAPSHOT_MAJOR:
1331 log_profiler.do_heap_walk = is_major;
1332 break;
1333 case MONO_PROFILER_HEAPSHOT_ON_DEMAND:
1334 // Handled below.
1335 break;
1336 case MONO_PROFILER_HEAPSHOT_X_GC:
1337 log_profiler.do_heap_walk = !(log_profiler.gc_count % log_config.hs_freq_gc);
1338 break;
1339 case MONO_PROFILER_HEAPSHOT_X_MS:
1340 log_profiler.do_heap_walk = (current_time () - log_profiler.last_hs_time) / 1000 * 1000 >= log_config.hs_freq_ms;
1341 break;
1342 default:
1343 g_assert_not_reached ();
1347 * heapshot_requested is set either because a heapshot was triggered
1348 * manually (through the API or command server) or because we're doing
1349 * a shutdown heapshot. Either way, a manually triggered heapshot
1350 * overrides any decision we made in the switch above.
1352 if (is_major && is_serial && mono_atomic_load_i32 (&log_profiler.heapshot_requested)) {
1353 log_profiler.do_heap_walk = TRUE;
1354 } else if (log_profiler.do_heap_walk && (!is_major || !is_serial)) {
1355 /* Do a heap walk later, when we get invoked from the finalizer in serial mode */
1356 trigger_heapshot ();
1357 log_profiler.do_heap_walk = FALSE;
1360 if (ENABLED (PROFLOG_GC_ROOT_EVENTS) &&
1361 (log_config.always_do_root_report || log_profiler.do_heap_walk))
1362 mono_profiler_set_gc_roots_callback (log_profiler.handle, gc_roots);
1365 * Ensure that no thread can be in the middle of writing to
1366 * a buffer when the world stops...
1368 buffer_lock_excl ();
1370 break;
1371 case MONO_GC_EVENT_POST_STOP_WORLD:
1373 * ... So that we now have a consistent view of all buffers.
1374 * This allows us to flush them. We need to do this because
1375 * they may contain object allocation events that need to be
1376 * committed to the log file before any object move events
1377 * that will be produced during this GC.
1379 if (ENABLED (ALL_GC_EVENTS_MASK))
1380 sync_point (SYNC_POINT_WORLD_STOP);
1382 // Surround heapshots with HEAP_START/HEAP_END events.
1383 if (log_profiler.do_heap_walk) {
1384 ENTER_LOG (&heap_starts_ctr, logbuffer,
1385 EVENT_SIZE /* event */
1388 emit_event (logbuffer, TYPE_HEAP_START | TYPE_HEAP);
1390 EXIT_LOG;
1393 break;
1394 case MONO_GC_EVENT_START:
1395 if (is_major)
1396 log_profiler.gc_count++;
1398 break;
1399 case MONO_GC_EVENT_PRE_START_WORLD:
1400 mono_profiler_set_gc_roots_callback (log_profiler.handle, NULL);
1402 if (log_profiler.do_heap_walk) {
1403 g_assert (is_major && is_serial);
1404 mono_gc_walk_heap (0, gc_reference, NULL);
1406 ENTER_LOG (&heap_ends_ctr, logbuffer,
1407 EVENT_SIZE /* event */
1410 emit_event (logbuffer, TYPE_HEAP_END | TYPE_HEAP);
1412 EXIT_LOG;
1414 log_profiler.do_heap_walk = FALSE;
1415 log_profiler.last_hs_time = current_time ();
1417 mono_atomic_store_i32 (&log_profiler.heapshot_requested, 0);
1421 * Similarly, we must now make sure that any object moves
1422 * written to the GC thread's buffer are flushed. Otherwise,
1423 * object allocation events for certain addresses could come
1424 * after the move events that made those addresses available.
1426 if (ENABLED (ALL_GC_EVENTS_MASK))
1427 sync_point_mark (SYNC_POINT_WORLD_START);
1429 break;
1430 case MONO_GC_EVENT_POST_START_WORLD_UNLOCKED:
1432 * Finally, it is safe to allow other threads to write to
1433 * their buffers again.
1435 buffer_unlock_excl ();
1437 break;
1438 default:
1439 break;
1443 static void
1444 gc_resize (MonoProfiler *profiler, uintptr_t new_size)
1446 ENTER_LOG (&gc_resizes_ctr, logbuffer,
1447 EVENT_SIZE /* event */ +
1448 LEB128_SIZE /* new size */
1451 emit_event (logbuffer, TYPE_GC_RESIZE | TYPE_GC);
1452 emit_uvalue (logbuffer, new_size);
1454 EXIT_LOG;
1457 typedef struct {
1458 int count;
1459 MonoMethod* methods [MAX_FRAMES];
1460 int32_t il_offsets [MAX_FRAMES];
1461 int32_t native_offsets [MAX_FRAMES];
1462 } FrameData;
1464 static mono_bool
1465 walk_stack (MonoMethod *method, int32_t native_offset, int32_t il_offset, mono_bool managed, void* data)
1467 FrameData *frame = (FrameData *)data;
1468 if (method && frame->count < log_config.num_frames) {
1469 frame->il_offsets [frame->count] = il_offset;
1470 frame->native_offsets [frame->count] = native_offset;
1471 frame->methods [frame->count++] = method;
1473 return frame->count == log_config.num_frames;
1477 * a note about stack walks: they can cause more profiler events to fire,
1478 * so we need to make sure they don't happen after we started emitting an
1479 * event, hence the collect_bt/emit_bt split.
1481 static void
1482 collect_bt (FrameData *data)
1484 data->count = 0;
1485 mono_stack_walk_no_il (walk_stack, data);
1488 static void
1489 emit_bt (LogBuffer *logbuffer, FrameData *data)
1491 emit_value (logbuffer, data->count);
1493 while (data->count)
1494 emit_method (logbuffer, data->methods [--data->count]);
1497 static void
1498 gc_alloc (MonoProfiler *prof, MonoObject *obj)
1500 int do_bt = (!log_config.enter_leave && mono_atomic_load_i32 (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_ALLOC_BT : 0;
1501 FrameData data;
1502 uintptr_t len = mono_object_get_size (obj);
1503 /* account for object alignment in the heap */
1504 len += 7;
1505 len &= ~7;
1507 if (do_bt)
1508 collect_bt (&data);
1510 ENTER_LOG (&gc_allocs_ctr, logbuffer,
1511 EVENT_SIZE /* event */ +
1512 LEB128_SIZE /* vtable */ +
1513 LEB128_SIZE /* obj */ +
1514 LEB128_SIZE /* size */ +
1515 (do_bt ? (
1516 LEB128_SIZE /* count */ +
1517 data.count * (
1518 LEB128_SIZE /* method */
1520 ) : 0)
1523 emit_event (logbuffer, do_bt | TYPE_ALLOC);
1524 emit_ptr (logbuffer, mono_object_get_vtable (obj));
1525 emit_obj (logbuffer, obj);
1526 emit_value (logbuffer, len);
1528 if (do_bt)
1529 emit_bt (logbuffer, &data);
1531 EXIT_LOG;
1534 static void
1535 gc_moves (MonoProfiler *prof, MonoObject *const *objects, uint64_t num)
1537 ENTER_LOG (&gc_moves_ctr, logbuffer,
1538 EVENT_SIZE /* event */ +
1539 LEB128_SIZE /* num */ +
1540 num * (
1541 LEB128_SIZE /* object */
1545 emit_event (logbuffer, TYPE_GC_MOVE | TYPE_GC);
1546 emit_value (logbuffer, num);
1548 for (int i = 0; i < num; ++i)
1549 emit_obj (logbuffer, objects [i]);
1551 EXIT_LOG;
1554 static void
1555 gc_handle (MonoProfiler *prof, int op, MonoGCHandleType type, uint32_t handle, MonoObject *obj)
1557 int do_bt = !log_config.enter_leave && mono_atomic_load_i32 (&log_profiler.runtime_inited) && log_config.num_frames;
1558 FrameData data;
1560 if (do_bt)
1561 collect_bt (&data);
1563 gint32 *ctr = op == MONO_PROFILER_GC_HANDLE_CREATED ? &gc_handle_creations_ctr : &gc_handle_deletions_ctr;
1565 ENTER_LOG (ctr, logbuffer,
1566 EVENT_SIZE /* event */ +
1567 LEB128_SIZE /* type */ +
1568 LEB128_SIZE /* handle */ +
1569 (op == MONO_PROFILER_GC_HANDLE_CREATED ? (
1570 LEB128_SIZE /* obj */
1571 ) : 0) +
1572 (do_bt ? (
1573 LEB128_SIZE /* count */ +
1574 data.count * (
1575 LEB128_SIZE /* method */
1577 ) : 0)
1580 if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1581 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_CREATED_BT : TYPE_GC_HANDLE_CREATED) | TYPE_GC);
1582 else if (op == MONO_PROFILER_GC_HANDLE_DESTROYED)
1583 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_DESTROYED_BT : TYPE_GC_HANDLE_DESTROYED) | TYPE_GC);
1584 else
1585 g_assert_not_reached ();
1587 emit_value (logbuffer, type);
1588 emit_value (logbuffer, handle);
1590 if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1591 emit_obj (logbuffer, obj);
1593 if (do_bt)
1594 emit_bt (logbuffer, &data);
1596 EXIT_LOG;
1599 static void
1600 gc_handle_created (MonoProfiler *prof, uint32_t handle, MonoGCHandleType type, MonoObject *obj)
1602 gc_handle (prof, MONO_PROFILER_GC_HANDLE_CREATED, type, handle, obj);
1605 static void
1606 gc_handle_deleted (MonoProfiler *prof, uint32_t handle, MonoGCHandleType type)
1608 gc_handle (prof, MONO_PROFILER_GC_HANDLE_DESTROYED, type, handle, NULL);
1611 static void
1612 finalize_begin (MonoProfiler *prof)
1614 ENTER_LOG (&finalize_begins_ctr, buf,
1615 EVENT_SIZE /* event */
1618 emit_event (buf, TYPE_GC_FINALIZE_START | TYPE_GC);
1620 EXIT_LOG;
1623 static void
1624 finalize_end (MonoProfiler *prof)
1626 process_heapshot ();
1628 if (ENABLED (PROFLOG_GC_FINALIZATION_EVENTS)) {
1629 ENTER_LOG (&finalize_ends_ctr, buf,
1630 EVENT_SIZE /* event */
1633 emit_event (buf, TYPE_GC_FINALIZE_END | TYPE_GC);
1635 EXIT_LOG;
1639 static void
1640 finalize_object_begin (MonoProfiler *prof, MonoObject *obj)
1642 ENTER_LOG (&finalize_object_begins_ctr, buf,
1643 EVENT_SIZE /* event */ +
1644 LEB128_SIZE /* obj */
1647 emit_event (buf, TYPE_GC_FINALIZE_OBJECT_START | TYPE_GC);
1648 emit_obj (buf, obj);
1650 EXIT_LOG;
1653 static void
1654 finalize_object_end (MonoProfiler *prof, MonoObject *obj)
1656 ENTER_LOG (&finalize_object_ends_ctr, buf,
1657 EVENT_SIZE /* event */ +
1658 LEB128_SIZE /* obj */
1661 emit_event (buf, TYPE_GC_FINALIZE_OBJECT_END | TYPE_GC);
1662 emit_obj (buf, obj);
1664 EXIT_LOG;
1667 static char*
1668 push_nesting (char *p, MonoClass *klass)
1670 MonoClass *nesting;
1671 const char *name;
1672 const char *nspace;
1673 nesting = mono_class_get_nesting_type (klass);
1674 if (nesting) {
1675 p = push_nesting (p, nesting);
1676 *p++ = '/';
1677 *p = 0;
1679 name = mono_class_get_name (klass);
1680 nspace = mono_class_get_namespace (klass);
1681 if (*nspace) {
1682 strcpy (p, nspace);
1683 p += strlen (nspace);
1684 *p++ = '.';
1685 *p = 0;
1687 strcpy (p, name);
1688 p += strlen (name);
1689 return p;
1692 static char*
1693 type_name (MonoClass *klass)
1695 char buf [1024];
1696 char *p;
1697 push_nesting (buf, klass);
1698 p = (char *) g_malloc (strlen (buf) + 1);
1699 strcpy (p, buf);
1700 return p;
1703 static void
1704 image_loaded (MonoProfiler *prof, MonoImage *image)
1706 const char *name = mono_image_get_filename (image);
1707 int nlen = strlen (name) + 1;
1709 ENTER_LOG (&image_loads_ctr, logbuffer,
1710 EVENT_SIZE /* event */ +
1711 BYTE_SIZE /* type */ +
1712 LEB128_SIZE /* image */ +
1713 nlen /* name */
1716 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1717 emit_byte (logbuffer, TYPE_IMAGE);
1718 emit_ptr (logbuffer, image);
1719 memcpy (logbuffer->cursor, name, nlen);
1720 logbuffer->cursor += nlen;
1722 EXIT_LOG;
1725 static void
1726 image_unloaded (MonoProfiler *prof, MonoImage *image)
1728 const char *name = mono_image_get_filename (image);
1729 int nlen = strlen (name) + 1;
1731 ENTER_LOG (&image_unloads_ctr, logbuffer,
1732 EVENT_SIZE /* event */ +
1733 BYTE_SIZE /* type */ +
1734 LEB128_SIZE /* image */ +
1735 nlen /* name */
1738 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1739 emit_byte (logbuffer, TYPE_IMAGE);
1740 emit_ptr (logbuffer, image);
1741 memcpy (logbuffer->cursor, name, nlen);
1742 logbuffer->cursor += nlen;
1744 EXIT_LOG;
1747 static void
1748 assembly_loaded (MonoProfiler *prof, MonoAssembly *assembly)
1750 char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1751 int nlen = strlen (name) + 1;
1752 MonoImage *image = mono_assembly_get_image (assembly);
1754 ENTER_LOG (&assembly_loads_ctr, logbuffer,
1755 EVENT_SIZE /* event */ +
1756 BYTE_SIZE /* type */ +
1757 LEB128_SIZE /* assembly */ +
1758 LEB128_SIZE /* image */ +
1759 nlen /* name */
1762 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1763 emit_byte (logbuffer, TYPE_ASSEMBLY);
1764 emit_ptr (logbuffer, assembly);
1765 emit_ptr (logbuffer, image);
1766 memcpy (logbuffer->cursor, name, nlen);
1767 logbuffer->cursor += nlen;
1769 EXIT_LOG;
1771 mono_free (name);
1774 static void
1775 assembly_unloaded (MonoProfiler *prof, MonoAssembly *assembly)
1777 char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1778 int nlen = strlen (name) + 1;
1779 MonoImage *image = mono_assembly_get_image (assembly);
1781 ENTER_LOG (&assembly_unloads_ctr, logbuffer,
1782 EVENT_SIZE /* event */ +
1783 BYTE_SIZE /* type */ +
1784 LEB128_SIZE /* assembly */ +
1785 LEB128_SIZE /* image */ +
1786 nlen /* name */
1789 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1790 emit_byte (logbuffer, TYPE_ASSEMBLY);
1791 emit_ptr (logbuffer, assembly);
1792 emit_ptr (logbuffer, image);
1793 memcpy (logbuffer->cursor, name, nlen);
1794 logbuffer->cursor += nlen;
1796 EXIT_LOG;
1798 mono_free (name);
1801 static void
1802 class_loaded (MonoProfiler *prof, MonoClass *klass)
1804 char *name;
1806 if (mono_atomic_load_i32 (&log_profiler.runtime_inited))
1807 name = mono_type_get_name (mono_class_get_type (klass));
1808 else
1809 name = type_name (klass);
1811 int nlen = strlen (name) + 1;
1812 MonoImage *image = mono_class_get_image (klass);
1814 ENTER_LOG (&class_loads_ctr, logbuffer,
1815 EVENT_SIZE /* event */ +
1816 BYTE_SIZE /* type */ +
1817 LEB128_SIZE /* klass */ +
1818 LEB128_SIZE /* image */ +
1819 nlen /* name */
1822 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1823 emit_byte (logbuffer, TYPE_CLASS);
1824 emit_ptr (logbuffer, klass);
1825 emit_ptr (logbuffer, image);
1826 memcpy (logbuffer->cursor, name, nlen);
1827 logbuffer->cursor += nlen;
1829 EXIT_LOG;
1831 if (mono_atomic_load_i32 (&log_profiler.runtime_inited))
1832 mono_free (name);
1833 else
1834 g_free (name);
1837 static void
1838 vtable_loaded (MonoProfiler *prof, MonoVTable *vtable)
1840 MonoClass *klass = mono_vtable_class (vtable);
1841 MonoDomain *domain = mono_vtable_domain (vtable);
1842 uint32_t domain_id = domain ? mono_domain_get_id (domain) : 0;
1844 ENTER_LOG (&vtable_loads_ctr, logbuffer,
1845 EVENT_SIZE /* event */ +
1846 BYTE_SIZE /* type */ +
1847 LEB128_SIZE /* vtable */ +
1848 LEB128_SIZE /* domain id */ +
1849 LEB128_SIZE /* klass */
1852 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1853 emit_byte (logbuffer, TYPE_VTABLE);
1854 emit_ptr (logbuffer, vtable);
1855 emit_ptr (logbuffer, (void *)(uintptr_t) domain_id);
1856 emit_ptr (logbuffer, klass);
1858 EXIT_LOG;
1861 static void
1862 method_enter (MonoProfiler *prof, MonoMethod *method, MonoProfilerCallContext *ctx)
1864 if (get_thread ()->call_depth++ <= log_config.max_call_depth) {
1865 ENTER_LOG (&method_entries_ctr, logbuffer,
1866 EVENT_SIZE /* event */ +
1867 LEB128_SIZE /* method */
1870 emit_event (logbuffer, TYPE_ENTER | TYPE_METHOD);
1871 emit_method (logbuffer, method);
1873 EXIT_LOG;
1877 static void
1878 method_leave (MonoProfiler *prof, MonoMethod *method, MonoProfilerCallContext *ctx)
1880 if (--get_thread ()->call_depth <= log_config.max_call_depth) {
1881 ENTER_LOG (&method_exits_ctr, logbuffer,
1882 EVENT_SIZE /* event */ +
1883 LEB128_SIZE /* method */
1886 emit_event (logbuffer, TYPE_LEAVE | TYPE_METHOD);
1887 emit_method (logbuffer, method);
1889 EXIT_LOG;
1893 static void
1894 tail_call (MonoProfiler *prof, MonoMethod *method, MonoMethod *target)
1896 method_leave (prof, method, NULL);
1899 static void
1900 method_exc_leave (MonoProfiler *prof, MonoMethod *method, MonoObject *exc)
1902 if (--get_thread ()->call_depth <= log_config.max_call_depth) {
1903 ENTER_LOG (&method_exception_exits_ctr, logbuffer,
1904 EVENT_SIZE /* event */ +
1905 LEB128_SIZE /* method */
1908 emit_event (logbuffer, TYPE_EXC_LEAVE | TYPE_METHOD);
1909 emit_method (logbuffer, method);
1911 EXIT_LOG;
1915 static MonoProfilerCallInstrumentationFlags
1916 method_filter (MonoProfiler *prof, MonoMethod *method)
1918 if (log_config.callspec.len > 0 &&
1919 !mono_callspec_eval (method, &log_config.callspec))
1920 return MONO_PROFILER_CALL_INSTRUMENTATION_NONE;
1922 return MONO_PROFILER_CALL_INSTRUMENTATION_ENTER |
1923 MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE |
1924 MONO_PROFILER_CALL_INSTRUMENTATION_TAIL_CALL |
1925 MONO_PROFILER_CALL_INSTRUMENTATION_EXCEPTION_LEAVE;
1928 static void
1929 method_jitted (MonoProfiler *prof, MonoMethod *method, MonoJitInfo *ji)
1931 buffer_lock ();
1933 register_method_local (method, ji);
1935 buffer_unlock ();
1938 static void
1939 code_buffer_new (MonoProfiler *prof, const mono_byte *buffer, uint64_t size, MonoProfilerCodeBufferType type, const void *data)
1941 const char *name;
1942 int nlen;
1944 if (type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE) {
1945 name = (const char *) data;
1946 nlen = strlen (name) + 1;
1947 } else {
1948 name = NULL;
1949 nlen = 0;
1952 ENTER_LOG (&code_buffers_ctr, logbuffer,
1953 EVENT_SIZE /* event */ +
1954 BYTE_SIZE /* type */ +
1955 LEB128_SIZE /* buffer */ +
1956 LEB128_SIZE /* size */ +
1957 (name ? (
1958 nlen /* name */
1959 ) : 0)
1962 emit_event (logbuffer, TYPE_JITHELPER | TYPE_RUNTIME);
1963 emit_byte (logbuffer, type);
1964 emit_ptr (logbuffer, buffer);
1965 emit_value (logbuffer, size);
1967 if (name) {
1968 memcpy (logbuffer->cursor, name, nlen);
1969 logbuffer->cursor += nlen;
1972 EXIT_LOG;
1975 static void
1976 throw_exc (MonoProfiler *prof, MonoObject *object)
1978 int do_bt = (!log_config.enter_leave && mono_atomic_load_i32 (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_THROW_BT : 0;
1979 FrameData data;
1981 if (do_bt)
1982 collect_bt (&data);
1984 ENTER_LOG (&exception_throws_ctr, logbuffer,
1985 EVENT_SIZE /* event */ +
1986 LEB128_SIZE /* object */ +
1987 (do_bt ? (
1988 LEB128_SIZE /* count */ +
1989 data.count * (
1990 LEB128_SIZE /* method */
1992 ) : 0)
1995 emit_event (logbuffer, do_bt | TYPE_EXCEPTION);
1996 emit_obj (logbuffer, object);
1998 if (do_bt)
1999 emit_bt (logbuffer, &data);
2001 EXIT_LOG;
2004 static void
2005 clause_exc (MonoProfiler *prof, MonoMethod *method, uint32_t clause_num, MonoExceptionEnum clause_type, MonoObject *exc)
2007 ENTER_LOG (&exception_clauses_ctr, logbuffer,
2008 EVENT_SIZE /* event */ +
2009 BYTE_SIZE /* clause type */ +
2010 LEB128_SIZE /* clause num */ +
2011 LEB128_SIZE /* method */ +
2012 LEB128_SIZE /* exc */
2015 emit_event (logbuffer, TYPE_EXCEPTION | TYPE_CLAUSE);
2016 emit_byte (logbuffer, clause_type);
2017 emit_value (logbuffer, clause_num);
2018 emit_method (logbuffer, method);
2019 emit_obj (logbuffer, exc);
2021 EXIT_LOG;
2024 static void
2025 monitor_event (MonoProfiler *profiler, MonoObject *object, MonoProfilerMonitorEvent ev)
2027 int do_bt = (!log_config.enter_leave && mono_atomic_load_i32 (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_MONITOR_BT : 0;
2028 FrameData data;
2030 if (do_bt)
2031 collect_bt (&data);
2033 ENTER_LOG (&monitor_events_ctr, logbuffer,
2034 EVENT_SIZE /* event */ +
2035 BYTE_SIZE /* ev */ +
2036 LEB128_SIZE /* object */ +
2037 (do_bt ? (
2038 LEB128_SIZE /* count */ +
2039 data.count * (
2040 LEB128_SIZE /* method */
2042 ) : 0)
2045 emit_event (logbuffer, do_bt | TYPE_MONITOR);
2046 emit_byte (logbuffer, ev);
2047 emit_obj (logbuffer, object);
2049 if (do_bt)
2050 emit_bt (logbuffer, &data);
2052 EXIT_LOG;
2055 static void
2056 monitor_contention (MonoProfiler *prof, MonoObject *object)
2058 monitor_event (prof, object, MONO_PROFILER_MONITOR_CONTENTION);
2061 static void
2062 monitor_acquired (MonoProfiler *prof, MonoObject *object)
2064 monitor_event (prof, object, MONO_PROFILER_MONITOR_DONE);
2067 static void
2068 monitor_failed (MonoProfiler *prof, MonoObject *object)
2070 monitor_event (prof, object, MONO_PROFILER_MONITOR_FAIL);
2073 static void
2074 thread_start (MonoProfiler *prof, uintptr_t tid)
2076 ENTER_LOG (&thread_starts_ctr, logbuffer,
2077 EVENT_SIZE /* event */ +
2078 BYTE_SIZE /* type */ +
2079 LEB128_SIZE /* tid */
2082 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2083 emit_byte (logbuffer, TYPE_THREAD);
2084 emit_ptr (logbuffer, (void*) tid);
2086 EXIT_LOG;
2089 static void
2090 thread_end (MonoProfiler *prof, uintptr_t tid)
2092 ENTER_LOG (&thread_ends_ctr, logbuffer,
2093 EVENT_SIZE /* event */ +
2094 BYTE_SIZE /* type */ +
2095 LEB128_SIZE /* tid */
2098 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2099 emit_byte (logbuffer, TYPE_THREAD);
2100 emit_ptr (logbuffer, (void*) tid);
2102 EXIT_LOG_EXPLICIT (NO_SEND);
2104 MonoProfilerThread *thread = get_thread ();
2106 thread->ended = TRUE;
2107 remove_thread (thread);
2109 PROF_TLS_SET (MONO_PROFILER_THREAD_DEAD);
2112 static void
2113 thread_name (MonoProfiler *prof, uintptr_t tid, const char *name)
2115 int len = strlen (name) + 1;
2117 ENTER_LOG (&thread_names_ctr, logbuffer,
2118 EVENT_SIZE /* event */ +
2119 BYTE_SIZE /* type */ +
2120 LEB128_SIZE /* tid */ +
2121 len /* name */
2124 emit_event (logbuffer, TYPE_METADATA);
2125 emit_byte (logbuffer, TYPE_THREAD);
2126 emit_ptr (logbuffer, (void*)tid);
2127 memcpy (logbuffer->cursor, name, len);
2128 logbuffer->cursor += len;
2130 EXIT_LOG;
2133 static void
2134 domain_loaded (MonoProfiler *prof, MonoDomain *domain)
2136 ENTER_LOG (&domain_loads_ctr, logbuffer,
2137 EVENT_SIZE /* event */ +
2138 BYTE_SIZE /* type */ +
2139 LEB128_SIZE /* domain id */
2142 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2143 emit_byte (logbuffer, TYPE_DOMAIN);
2144 emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2146 EXIT_LOG;
2149 static void
2150 domain_unloaded (MonoProfiler *prof, MonoDomain *domain)
2152 ENTER_LOG (&domain_unloads_ctr, logbuffer,
2153 EVENT_SIZE /* event */ +
2154 BYTE_SIZE /* type */ +
2155 LEB128_SIZE /* domain id */
2158 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2159 emit_byte (logbuffer, TYPE_DOMAIN);
2160 emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2162 EXIT_LOG;
2165 static void
2166 domain_name (MonoProfiler *prof, MonoDomain *domain, const char *name)
2168 int nlen = strlen (name) + 1;
2170 ENTER_LOG (&domain_names_ctr, logbuffer,
2171 EVENT_SIZE /* event */ +
2172 BYTE_SIZE /* type */ +
2173 LEB128_SIZE /* domain id */ +
2174 nlen /* name */
2177 emit_event (logbuffer, TYPE_METADATA);
2178 emit_byte (logbuffer, TYPE_DOMAIN);
2179 emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2180 memcpy (logbuffer->cursor, name, nlen);
2181 logbuffer->cursor += nlen;
2183 EXIT_LOG;
2186 static void
2187 context_loaded (MonoProfiler *prof, MonoAppContext *context)
2189 ENTER_LOG (&context_loads_ctr, logbuffer,
2190 EVENT_SIZE /* event */ +
2191 BYTE_SIZE /* type */ +
2192 LEB128_SIZE /* context id */ +
2193 LEB128_SIZE /* domain id */
2196 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2197 emit_byte (logbuffer, TYPE_CONTEXT);
2198 emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2199 emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2201 EXIT_LOG;
2204 static void
2205 context_unloaded (MonoProfiler *prof, MonoAppContext *context)
2207 ENTER_LOG (&context_unloads_ctr, logbuffer,
2208 EVENT_SIZE /* event */ +
2209 BYTE_SIZE /* type */ +
2210 LEB128_SIZE /* context id */ +
2211 LEB128_SIZE /* domain id */
2214 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2215 emit_byte (logbuffer, TYPE_CONTEXT);
2216 emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2217 emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2219 EXIT_LOG;
2222 typedef struct {
2223 MonoMethod *method;
2224 MonoDomain *domain;
2225 void *base_address;
2226 int offset;
2227 } AsyncFrameInfo;
2229 typedef struct {
2230 MonoLockFreeQueueNode node;
2231 uint64_t time;
2232 uintptr_t tid;
2233 const void *ip;
2234 int count;
2235 AsyncFrameInfo frames [MONO_ZERO_LEN_ARRAY];
2236 } SampleHit;
2238 static mono_bool
2239 async_walk_stack (MonoMethod *method, MonoDomain *domain, void *base_address, int offset, void *data)
2241 SampleHit *sample = (SampleHit *) data;
2243 if (sample->count < log_config.num_frames) {
2244 int i = sample->count;
2246 sample->frames [i].method = method;
2247 sample->frames [i].domain = domain;
2248 sample->frames [i].base_address = base_address;
2249 sample->frames [i].offset = offset;
2251 sample->count++;
2254 return sample->count == log_config.num_frames;
2257 #define SAMPLE_SLOT_SIZE(FRAMES) (sizeof (SampleHit) + sizeof (AsyncFrameInfo) * (FRAMES - MONO_ZERO_LEN_ARRAY))
2258 #define SAMPLE_BLOCK_SIZE (mono_pagesize ())
2260 static void
2261 enqueue_sample_hit (gpointer p)
2263 SampleHit *sample = p;
2265 mono_lock_free_queue_node_unpoison (&sample->node);
2266 mono_lock_free_queue_enqueue (&log_profiler.dumper_queue, &sample->node);
2267 mono_os_sem_post (&log_profiler.dumper_queue_sem);
2270 static void
2271 mono_sample_hit (MonoProfiler *profiler, const mono_byte *ip, const void *context)
2274 * Please note: We rely on the runtime loading the profiler with
2275 * MONO_DL_EAGER (RTLD_NOW) so that references to runtime functions within
2276 * this function (and its siblings) are resolved when the profiler is
2277 * loaded. Otherwise, we would potentially invoke the dynamic linker when
2278 * invoking runtime functions, which is not async-signal-safe.
2281 if (mono_atomic_load_i32 (&log_profiler.in_shutdown))
2282 return;
2284 SampleHit *sample = (SampleHit *) mono_lock_free_queue_dequeue (&profiler->sample_reuse_queue);
2286 if (!sample) {
2288 * If we're out of reusable sample events and we're not allowed to
2289 * allocate more, we have no choice but to drop the event.
2291 if (mono_atomic_load_i32 (&sample_allocations_ctr) >= log_config.max_allocated_sample_hits)
2292 return;
2294 sample = mono_lock_free_alloc (&profiler->sample_allocator);
2295 mono_lock_free_queue_node_init (&sample->node, TRUE);
2297 mono_atomic_inc_i32 (&sample_allocations_ctr);
2300 sample->count = 0;
2301 mono_stack_walk_async_safe (&async_walk_stack, (void *) context, sample);
2303 sample->time = current_time ();
2304 sample->tid = thread_id ();
2305 sample->ip = ip;
2307 mono_thread_hazardous_try_free (sample, enqueue_sample_hit);
2310 static uintptr_t *code_pages = 0;
2311 static int num_code_pages = 0;
2312 static int size_code_pages = 0;
2313 #define CPAGE_SHIFT (9)
2314 #define CPAGE_SIZE (1 << CPAGE_SHIFT)
2315 #define CPAGE_MASK (~(CPAGE_SIZE - 1))
2316 #define CPAGE_ADDR(p) ((p) & CPAGE_MASK)
2318 static uintptr_t
2319 add_code_page (uintptr_t *hash, uintptr_t hsize, uintptr_t page)
2321 uintptr_t i;
2322 uintptr_t start_pos;
2323 start_pos = (page >> CPAGE_SHIFT) % hsize;
2324 i = start_pos;
2325 do {
2326 if (hash [i] && CPAGE_ADDR (hash [i]) == CPAGE_ADDR (page)) {
2327 return 0;
2328 } else if (!hash [i]) {
2329 hash [i] = page;
2330 return 1;
2332 /* wrap around */
2333 if (++i == hsize)
2334 i = 0;
2335 } while (i != start_pos);
2336 g_assert_not_reached ();
2337 return 0;
2340 static void
2341 add_code_pointer (uintptr_t ip)
2343 uintptr_t i;
2344 if (num_code_pages * 2 >= size_code_pages) {
2345 uintptr_t *n;
2346 uintptr_t old_size = size_code_pages;
2347 size_code_pages *= 2;
2348 if (size_code_pages == 0)
2349 size_code_pages = 16;
2350 n = (uintptr_t *) g_calloc (sizeof (uintptr_t) * size_code_pages, 1);
2351 for (i = 0; i < old_size; ++i) {
2352 if (code_pages [i])
2353 add_code_page (n, size_code_pages, code_pages [i]);
2355 if (code_pages)
2356 g_free (code_pages);
2357 code_pages = n;
2359 num_code_pages += add_code_page (code_pages, size_code_pages, ip & CPAGE_MASK);
2362 /* ELF code crashes on some systems. */
2363 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2364 #if 0
2365 static void
2366 dump_ubin (const char *filename, uintptr_t load_addr, uint64_t offset, uintptr_t size)
2368 int len = strlen (filename) + 1;
2370 ENTER_LOG (&sample_ubins_ctr, logbuffer,
2371 EVENT_SIZE /* event */ +
2372 LEB128_SIZE /* load address */ +
2373 LEB128_SIZE /* offset */ +
2374 LEB128_SIZE /* size */ +
2375 len /* file name */
2378 emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_UBIN);
2379 emit_ptr (logbuffer, load_addr);
2380 emit_uvalue (logbuffer, offset);
2381 emit_uvalue (logbuffer, size);
2382 memcpy (logbuffer->cursor, filename, len);
2383 logbuffer->cursor += len;
2385 EXIT_LOG;
2387 #endif
2389 static void
2390 dump_usym (const char *name, uintptr_t value, uintptr_t size)
2392 int len = strlen (name) + 1;
2394 ENTER_LOG (&sample_usyms_ctr, logbuffer,
2395 EVENT_SIZE /* event */ +
2396 LEB128_SIZE /* value */ +
2397 LEB128_SIZE /* size */ +
2398 len /* name */
2401 emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_USYM);
2402 emit_ptr (logbuffer, (void*)value);
2403 emit_value (logbuffer, size);
2404 memcpy (logbuffer->cursor, name, len);
2405 logbuffer->cursor += len;
2407 EXIT_LOG;
2410 /* ELF code crashes on some systems. */
2411 //#if defined(ELFMAG0)
2412 #if 0
2414 #if SIZEOF_VOID_P == 4
2415 #define ELF_WSIZE 32
2416 #else
2417 #define ELF_WSIZE 64
2418 #endif
2419 #ifndef ElfW
2420 #define ElfW(type) _ElfW (Elf, ELF_WSIZE, type)
2421 #define _ElfW(e,w,t) _ElfW_1 (e, w, _##t)
2422 #define _ElfW_1(e,w,t) e##w##t
2423 #endif
2425 static void
2426 dump_elf_symbols (ElfW(Sym) *symbols, int num_symbols, const char *strtab, void *load_addr)
2428 int i;
2429 for (i = 0; i < num_symbols; ++i) {
2430 const char* sym;
2431 sym = strtab + symbols [i].st_name;
2432 if (!symbols [i].st_name || !symbols [i].st_size || (symbols [i].st_info & 0xf) != STT_FUNC)
2433 continue;
2434 dump_usym (sym, (uintptr_t)load_addr + symbols [i].st_value, symbols [i].st_size);
2438 static int
2439 read_elf_symbols (MonoProfiler *prof, const char *filename, void *load_addr)
2441 int fd, i;
2442 void *data;
2443 struct stat statb;
2444 uint64_t file_size;
2445 ElfW(Ehdr) *header;
2446 ElfW(Shdr) *sheader;
2447 ElfW(Shdr) *shstrtabh;
2448 ElfW(Shdr) *symtabh = NULL;
2449 ElfW(Shdr) *strtabh = NULL;
2450 ElfW(Sym) *symbols = NULL;
2451 const char *strtab;
2452 int num_symbols;
2454 fd = open (filename, O_RDONLY);
2455 if (fd < 0)
2456 return 0;
2457 if (fstat (fd, &statb) != 0) {
2458 close (fd);
2459 return 0;
2461 file_size = statb.st_size;
2462 data = mmap (NULL, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
2463 close (fd);
2464 if (data == MAP_FAILED)
2465 return 0;
2466 header = data;
2467 if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2468 header->e_ident [EI_MAG1] != ELFMAG1 ||
2469 header->e_ident [EI_MAG2] != ELFMAG2 ||
2470 header->e_ident [EI_MAG3] != ELFMAG3 ) {
2471 munmap (data, file_size);
2472 return 0;
2474 sheader = (void*)((char*)data + header->e_shoff);
2475 shstrtabh = (void*)((char*)sheader + (header->e_shentsize * header->e_shstrndx));
2476 strtab = (const char*)data + shstrtabh->sh_offset;
2477 for (i = 0; i < header->e_shnum; ++i) {
2478 if (sheader->sh_type == SHT_SYMTAB) {
2479 symtabh = sheader;
2480 strtabh = (void*)((char*)data + header->e_shoff + sheader->sh_link * header->e_shentsize);
2481 break;
2483 sheader = (void*)((char*)sheader + header->e_shentsize);
2485 if (!symtabh || !strtabh) {
2486 munmap (data, file_size);
2487 return 0;
2489 strtab = (const char*)data + strtabh->sh_offset;
2490 num_symbols = symtabh->sh_size / symtabh->sh_entsize;
2491 symbols = (void*)((char*)data + symtabh->sh_offset);
2492 dump_elf_symbols (symbols, num_symbols, strtab, load_addr);
2493 munmap (data, file_size);
2494 return 1;
2496 #endif
2498 /* ELF code crashes on some systems. */
2499 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2500 #if 0
2501 static int
2502 elf_dl_callback (struct dl_phdr_info *info, size_t size, void *data)
2504 char buf [256];
2505 const char *filename;
2506 BinaryObject *obj;
2507 char *a = (void*)info->dlpi_addr;
2508 int i, num_sym;
2509 ElfW(Dyn) *dyn = NULL;
2510 ElfW(Sym) *symtab = NULL;
2511 ElfW(Word) *hash_table = NULL;
2512 ElfW(Ehdr) *header = NULL;
2513 const char* strtab = NULL;
2514 for (obj = log_profiler.binary_objects; obj; obj = obj->next) {
2515 if (obj->addr == a)
2516 return 0;
2518 filename = info->dlpi_name;
2519 if (!filename)
2520 return 0;
2521 if (!info->dlpi_addr && !filename [0]) {
2522 int l = readlink ("/proc/self/exe", buf, sizeof (buf) - 1);
2523 if (l > 0) {
2524 buf [l] = 0;
2525 filename = buf;
2528 obj = g_calloc (sizeof (BinaryObject), 1);
2529 obj->addr = (void*)info->dlpi_addr;
2530 obj->name = pstrdup (filename);
2531 obj->next = log_profiler.binary_objects;
2532 log_profiler.binary_objects = obj;
2533 a = NULL;
2534 for (i = 0; i < info->dlpi_phnum; ++i) {
2535 if (info->dlpi_phdr[i].p_type == PT_LOAD && !header) {
2536 header = (ElfW(Ehdr)*)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2537 if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2538 header->e_ident [EI_MAG1] != ELFMAG1 ||
2539 header->e_ident [EI_MAG2] != ELFMAG2 ||
2540 header->e_ident [EI_MAG3] != ELFMAG3 ) {
2541 header = NULL;
2543 dump_ubin (filename, info->dlpi_addr + info->dlpi_phdr[i].p_vaddr, info->dlpi_phdr[i].p_offset, info->dlpi_phdr[i].p_memsz);
2544 } else if (info->dlpi_phdr[i].p_type == PT_DYNAMIC) {
2545 dyn = (ElfW(Dyn) *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2548 if (read_elf_symbols (filename, (void*)info->dlpi_addr))
2549 return 0;
2550 if (!info->dlpi_name || !info->dlpi_name[0])
2551 return 0;
2552 if (!dyn)
2553 return 0;
2554 for (i = 0; dyn [i].d_tag != DT_NULL; ++i) {
2555 if (dyn [i].d_tag == DT_SYMTAB) {
2556 symtab = (ElfW(Sym) *)(a + dyn [i].d_un.d_ptr);
2557 } else if (dyn [i].d_tag == DT_HASH) {
2558 hash_table = (ElfW(Word) *)(a + dyn [i].d_un.d_ptr);
2559 } else if (dyn [i].d_tag == DT_STRTAB) {
2560 strtab = (const char*)(a + dyn [i].d_un.d_ptr);
2563 if (!hash_table)
2564 return 0;
2565 num_sym = hash_table [1];
2566 dump_elf_symbols (symtab, num_sym, strtab, (void*)info->dlpi_addr);
2567 return 0;
2570 static int
2571 load_binaries (void)
2573 dl_iterate_phdr (elf_dl_callback, NULL);
2574 return 1;
2576 #else
2577 static int
2578 load_binaries (void)
2580 return 0;
2582 #endif
2584 static const char*
2585 symbol_for (uintptr_t code)
2587 #ifdef HAVE_DLADDR
2588 void *ip = (void*)code;
2589 Dl_info di;
2590 if (dladdr (ip, &di)) {
2591 if (di.dli_sname)
2592 return di.dli_sname;
2593 } else {
2594 /* char **names;
2595 names = backtrace_symbols (&ip, 1);
2596 if (names) {
2597 const char* p = names [0];
2598 g_free (names);
2599 return p;
2603 #endif
2604 return NULL;
2607 static void
2608 dump_unmanaged_coderefs (void)
2610 int i;
2611 const char* last_symbol;
2612 uintptr_t addr, page_end;
2614 if (load_binaries ())
2615 return;
2616 for (i = 0; i < size_code_pages; ++i) {
2617 const char* sym;
2618 if (!code_pages [i] || code_pages [i] & 1)
2619 continue;
2620 last_symbol = NULL;
2621 addr = CPAGE_ADDR (code_pages [i]);
2622 page_end = addr + CPAGE_SIZE;
2623 code_pages [i] |= 1;
2624 /* we dump the symbols for the whole page */
2625 for (; addr < page_end; addr += 16) {
2626 sym = symbol_for (addr);
2627 if (sym && sym == last_symbol)
2628 continue;
2629 last_symbol = sym;
2630 if (!sym)
2631 continue;
2632 dump_usym (sym, addr, 0); /* let's not guess the size */
2637 static void
2638 counters_add_agent (MonoCounter *counter)
2640 if (mono_atomic_load_i32 (&log_profiler.in_shutdown))
2641 return;
2643 MonoCounterAgent *agent, *item;
2645 mono_os_mutex_lock (&log_profiler.counters_mutex);
2647 for (agent = log_profiler.counters; agent; agent = agent->next) {
2648 if (agent->counter == counter) {
2649 agent->value_size = 0;
2650 if (agent->value) {
2651 g_free (agent->value);
2652 agent->value = NULL;
2654 goto done;
2658 agent = (MonoCounterAgent *) g_malloc (sizeof (MonoCounterAgent));
2659 agent->counter = counter;
2660 agent->value = NULL;
2661 agent->value_size = 0;
2662 agent->index = log_profiler.counters_index++;
2663 agent->emitted = FALSE;
2664 agent->next = NULL;
2666 if (!log_profiler.counters) {
2667 log_profiler.counters = agent;
2668 } else {
2669 item = log_profiler.counters;
2670 while (item->next)
2671 item = item->next;
2672 item->next = agent;
2675 done:
2676 mono_os_mutex_unlock (&log_profiler.counters_mutex);
2679 static mono_bool
2680 counters_init_foreach_callback (MonoCounter *counter, gpointer data)
2682 counters_add_agent (counter);
2683 return TRUE;
2686 static void
2687 counters_init (void)
2689 mono_os_mutex_init (&log_profiler.counters_mutex);
2691 log_profiler.counters_index = 1;
2693 mono_counters_on_register (&counters_add_agent);
2694 mono_counters_foreach (counters_init_foreach_callback, NULL);
2697 static void
2698 counters_emit (void)
2700 MonoCounterAgent *agent;
2701 int len = 0;
2702 int size =
2703 EVENT_SIZE /* event */ +
2704 LEB128_SIZE /* len */
2707 mono_os_mutex_lock (&log_profiler.counters_mutex);
2709 for (agent = log_profiler.counters; agent; agent = agent->next) {
2710 if (agent->emitted)
2711 continue;
2713 size +=
2714 LEB128_SIZE /* section */ +
2715 strlen (mono_counter_get_name (agent->counter)) + 1 /* name */ +
2716 BYTE_SIZE /* type */ +
2717 BYTE_SIZE /* unit */ +
2718 BYTE_SIZE /* variance */ +
2719 LEB128_SIZE /* index */
2722 len++;
2725 if (!len)
2726 goto done;
2728 ENTER_LOG (&counter_descriptors_ctr, logbuffer, size);
2730 emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
2731 emit_value (logbuffer, len);
2733 for (agent = log_profiler.counters; agent; agent = agent->next) {
2734 const char *name;
2736 if (agent->emitted)
2737 continue;
2739 name = mono_counter_get_name (agent->counter);
2740 emit_value (logbuffer, mono_counter_get_section (agent->counter));
2741 emit_string (logbuffer, name, strlen (name) + 1);
2742 emit_value (logbuffer, mono_counter_get_type (agent->counter));
2743 emit_value (logbuffer, mono_counter_get_unit (agent->counter));
2744 emit_value (logbuffer, mono_counter_get_variance (agent->counter));
2745 emit_value (logbuffer, agent->index);
2747 agent->emitted = TRUE;
2750 EXIT_LOG;
2752 done:
2753 mono_os_mutex_unlock (&log_profiler.counters_mutex);
2756 static void
2757 counters_sample (uint64_t timestamp)
2759 MonoCounterAgent *agent;
2760 MonoCounter *counter;
2761 int type;
2762 int buffer_size;
2763 void *buffer;
2764 int size;
2766 counters_emit ();
2768 buffer_size = 8;
2769 buffer = g_calloc (1, buffer_size);
2771 mono_os_mutex_lock (&log_profiler.counters_mutex);
2773 size =
2774 EVENT_SIZE /* event */
2777 for (agent = log_profiler.counters; agent; agent = agent->next) {
2779 * FIXME: This calculation is incorrect for string counters since
2780 * mono_counter_get_size () just returns 0 in that case. We should
2781 * address this if we ever actually add any string counters to Mono.
2784 size +=
2785 LEB128_SIZE /* index */ +
2786 BYTE_SIZE /* type */ +
2787 mono_counter_get_size (agent->counter) /* value */
2791 size +=
2792 LEB128_SIZE /* stop marker */
2795 ENTER_LOG (&counter_samples_ctr, logbuffer, size);
2797 emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
2799 for (agent = log_profiler.counters; agent; agent = agent->next) {
2800 size_t size;
2802 counter = agent->counter;
2804 size = mono_counter_get_size (counter);
2806 if (size > buffer_size) {
2807 buffer_size = size;
2808 buffer = g_realloc (buffer, buffer_size);
2811 memset (buffer, 0, buffer_size);
2813 g_assert (mono_counters_sample (counter, buffer, size));
2815 type = mono_counter_get_type (counter);
2817 if (!agent->value) {
2818 agent->value = g_calloc (1, size);
2819 agent->value_size = size;
2820 } else {
2821 if (type == MONO_COUNTER_STRING) {
2822 if (strcmp (agent->value, buffer) == 0)
2823 continue;
2824 } else {
2825 if (agent->value_size == size && memcmp (agent->value, buffer, size) == 0)
2826 continue;
2830 emit_uvalue (logbuffer, agent->index);
2831 emit_value (logbuffer, type);
2832 switch (type) {
2833 case MONO_COUNTER_INT:
2834 #if SIZEOF_VOID_P == 4
2835 case MONO_COUNTER_WORD:
2836 #endif
2837 emit_svalue (logbuffer, *(int*)buffer - *(int*)agent->value);
2838 break;
2839 case MONO_COUNTER_UINT:
2840 emit_uvalue (logbuffer, *(guint*)buffer - *(guint*)agent->value);
2841 break;
2842 case MONO_COUNTER_TIME_INTERVAL:
2843 case MONO_COUNTER_LONG:
2844 #if SIZEOF_VOID_P == 8
2845 case MONO_COUNTER_WORD:
2846 #endif
2847 emit_svalue (logbuffer, *(gint64*)buffer - *(gint64*)agent->value);
2848 break;
2849 case MONO_COUNTER_ULONG:
2850 emit_uvalue (logbuffer, *(guint64*)buffer - *(guint64*)agent->value);
2851 break;
2852 case MONO_COUNTER_DOUBLE:
2853 emit_double (logbuffer, *(double*)buffer);
2854 break;
2855 case MONO_COUNTER_STRING:
2856 if (size == 0) {
2857 emit_byte (logbuffer, 0);
2858 } else {
2859 emit_byte (logbuffer, 1);
2860 emit_string (logbuffer, (char*)buffer, size);
2862 break;
2863 default:
2864 g_assert_not_reached ();
2867 if (type == MONO_COUNTER_STRING && size > agent->value_size) {
2868 agent->value = g_realloc (agent->value, size);
2869 agent->value_size = size;
2872 if (size > 0)
2873 memcpy (agent->value, buffer, size);
2875 g_free (buffer);
2877 emit_value (logbuffer, 0);
2879 EXIT_LOG;
2881 mono_os_mutex_unlock (&log_profiler.counters_mutex);
2884 static void
2885 perfcounters_emit (void)
2887 PerfCounterAgent *pcagent;
2888 int len = 0;
2889 int size =
2890 EVENT_SIZE /* event */ +
2891 LEB128_SIZE /* len */
2894 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2895 if (pcagent->emitted)
2896 continue;
2898 size +=
2899 LEB128_SIZE /* section */ +
2900 strlen (pcagent->category_name) + 1 /* category name */ +
2901 strlen (pcagent->name) + 1 /* name */ +
2902 BYTE_SIZE /* type */ +
2903 BYTE_SIZE /* unit */ +
2904 BYTE_SIZE /* variance */ +
2905 LEB128_SIZE /* index */
2908 len++;
2911 if (!len)
2912 return;
2914 ENTER_LOG (&perfcounter_descriptors_ctr, logbuffer, size);
2916 emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
2917 emit_value (logbuffer, len);
2919 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2920 if (pcagent->emitted)
2921 continue;
2923 emit_value (logbuffer, MONO_COUNTER_PERFCOUNTERS);
2924 emit_string (logbuffer, pcagent->category_name, strlen (pcagent->category_name) + 1);
2925 emit_string (logbuffer, pcagent->name, strlen (pcagent->name) + 1);
2926 emit_byte (logbuffer, MONO_COUNTER_LONG);
2927 emit_byte (logbuffer, MONO_COUNTER_RAW);
2928 emit_byte (logbuffer, MONO_COUNTER_VARIABLE);
2929 emit_value (logbuffer, pcagent->index);
2931 pcagent->emitted = TRUE;
2934 EXIT_LOG;
2937 static gboolean
2938 perfcounters_foreach (char *category_name, char *name, unsigned char type, gint64 value, gpointer user_data)
2940 PerfCounterAgent *pcagent;
2942 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2943 if (strcmp (pcagent->category_name, category_name) != 0 || strcmp (pcagent->name, name) != 0)
2944 continue;
2945 if (pcagent->value == value)
2946 return TRUE;
2948 pcagent->value = value;
2949 pcagent->updated = TRUE;
2950 pcagent->deleted = FALSE;
2951 return TRUE;
2954 pcagent = g_new0 (PerfCounterAgent, 1);
2955 pcagent->next = log_profiler.perfcounters;
2956 pcagent->index = log_profiler.counters_index++;
2957 pcagent->category_name = g_strdup (category_name);
2958 pcagent->name = g_strdup (name);
2959 pcagent->value = value;
2960 pcagent->emitted = FALSE;
2961 pcagent->updated = TRUE;
2962 pcagent->deleted = FALSE;
2964 log_profiler.perfcounters = pcagent;
2966 return TRUE;
2969 static void
2970 perfcounters_sample (uint64_t timestamp)
2972 PerfCounterAgent *pcagent;
2973 int len = 0;
2974 int size;
2976 mono_os_mutex_lock (&log_profiler.counters_mutex);
2978 /* mark all perfcounters as deleted, foreach will unmark them as necessary */
2979 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next)
2980 pcagent->deleted = TRUE;
2982 mono_perfcounter_foreach (perfcounters_foreach, NULL);
2984 perfcounters_emit ();
2986 size =
2987 EVENT_SIZE /* event */
2990 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2991 if (pcagent->deleted || !pcagent->updated)
2992 continue;
2994 size +=
2995 LEB128_SIZE /* index */ +
2996 BYTE_SIZE /* type */ +
2997 LEB128_SIZE /* value */
3000 len++;
3003 if (!len)
3004 goto done;
3006 size +=
3007 LEB128_SIZE /* stop marker */
3010 ENTER_LOG (&perfcounter_samples_ctr, logbuffer, size);
3012 emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
3014 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
3015 if (pcagent->deleted || !pcagent->updated)
3016 continue;
3017 emit_uvalue (logbuffer, pcagent->index);
3018 emit_byte (logbuffer, MONO_COUNTER_LONG);
3019 emit_svalue (logbuffer, pcagent->value);
3021 pcagent->updated = FALSE;
3024 emit_value (logbuffer, 0);
3026 EXIT_LOG;
3028 done:
3029 mono_os_mutex_unlock (&log_profiler.counters_mutex);
3032 static void
3033 counters_and_perfcounters_sample (void)
3035 uint64_t now = current_time ();
3037 counters_sample (now);
3038 perfcounters_sample (now);
3041 typedef struct {
3042 MonoLockFreeQueueNode node;
3043 MonoMethod *method;
3044 } MethodNode;
3046 typedef struct {
3047 int offset;
3048 int counter;
3049 char *filename;
3050 int line;
3051 int column;
3052 } CoverageEntry;
3054 static void
3055 free_coverage_entry (gpointer data, gpointer userdata)
3057 CoverageEntry *entry = (CoverageEntry *)data;
3058 g_free (entry->filename);
3059 g_free (entry);
3062 static void
3063 obtain_coverage_for_method (MonoProfiler *prof, const MonoProfilerCoverageData *entry)
3065 int offset = entry->il_offset - log_profiler.coverage_previous_offset;
3066 CoverageEntry *e = g_new (CoverageEntry, 1);
3068 log_profiler.coverage_previous_offset = entry->il_offset;
3070 e->offset = offset;
3071 e->counter = entry->counter;
3072 e->filename = g_strdup(entry->file_name ? entry->file_name : "");
3073 e->line = entry->line;
3074 e->column = entry->column;
3076 g_ptr_array_add (log_profiler.coverage_data, e);
3079 static char *
3080 parse_generic_type_names(char *name)
3082 char *new_name, *ret;
3083 int within_generic_declaration = 0, generic_members = 1;
3085 if (name == NULL || *name == '\0')
3086 return g_strdup ("");
3088 if (!(ret = new_name = (char *) g_calloc (strlen (name) * 4 + 1, sizeof (char))))
3089 return NULL;
3091 do {
3092 switch (*name) {
3093 case '<':
3094 within_generic_declaration = 1;
3095 break;
3097 case '>':
3098 within_generic_declaration = 0;
3100 if (*(name - 1) != '<') {
3101 *new_name++ = '`';
3102 *new_name++ = '0' + generic_members;
3103 } else {
3104 memcpy (new_name, "&lt;&gt;", 8);
3105 new_name += 8;
3108 generic_members = 0;
3109 break;
3111 case ',':
3112 generic_members++;
3113 break;
3115 default:
3116 if (!within_generic_declaration)
3117 *new_name++ = *name;
3119 break;
3121 } while (*name++);
3123 return ret;
3126 static void
3127 build_method_buffer (gpointer key, gpointer value, gpointer userdata)
3129 MonoMethod *method = (MonoMethod *)value;
3130 MonoClass *klass;
3131 MonoImage *image;
3132 char *class_name;
3133 const char *image_name, *method_name, *sig, *first_filename;
3134 guint i;
3136 log_profiler.coverage_previous_offset = 0;
3137 log_profiler.coverage_data = g_ptr_array_new ();
3139 mono_profiler_get_coverage_data (log_profiler.handle, method, obtain_coverage_for_method);
3141 klass = mono_method_get_class (method);
3142 image = mono_class_get_image (klass);
3143 image_name = mono_image_get_name (image);
3145 sig = mono_signature_get_desc (mono_method_signature (method), TRUE);
3146 class_name = parse_generic_type_names (mono_type_get_name (mono_class_get_type (klass)));
3147 method_name = mono_method_get_name (method);
3149 if (log_profiler.coverage_data->len != 0) {
3150 CoverageEntry *entry = (CoverageEntry *)log_profiler.coverage_data->pdata[0];
3151 first_filename = entry->filename ? entry->filename : "";
3152 } else
3153 first_filename = "";
3155 image_name = image_name ? image_name : "";
3156 sig = sig ? sig : "";
3157 method_name = method_name ? method_name : "";
3159 ENTER_LOG (&coverage_methods_ctr, logbuffer,
3160 EVENT_SIZE /* event */ +
3161 strlen (image_name) + 1 /* image name */ +
3162 strlen (class_name) + 1 /* class name */ +
3163 strlen (method_name) + 1 /* method name */ +
3164 strlen (sig) + 1 /* signature */ +
3165 strlen (first_filename) + 1 /* first file name */ +
3166 LEB128_SIZE /* token */ +
3167 LEB128_SIZE /* method id */ +
3168 LEB128_SIZE /* entries */
3171 emit_event (logbuffer, TYPE_COVERAGE_METHOD | TYPE_COVERAGE);
3172 emit_string (logbuffer, image_name, strlen (image_name) + 1);
3173 emit_string (logbuffer, class_name, strlen (class_name) + 1);
3174 emit_string (logbuffer, method_name, strlen (method_name) + 1);
3175 emit_string (logbuffer, sig, strlen (sig) + 1);
3176 emit_string (logbuffer, first_filename, strlen (first_filename) + 1);
3178 emit_uvalue (logbuffer, mono_method_get_token (method));
3179 emit_uvalue (logbuffer, log_profiler.coverage_method_id);
3180 emit_value (logbuffer, log_profiler.coverage_data->len);
3182 EXIT_LOG;
3184 for (i = 0; i < log_profiler.coverage_data->len; i++) {
3185 CoverageEntry *entry = (CoverageEntry *)log_profiler.coverage_data->pdata[i];
3187 ENTER_LOG (&coverage_statements_ctr, logbuffer,
3188 EVENT_SIZE /* event */ +
3189 LEB128_SIZE /* method id */ +
3190 LEB128_SIZE /* offset */ +
3191 LEB128_SIZE /* counter */ +
3192 LEB128_SIZE /* line */ +
3193 LEB128_SIZE /* column */
3196 emit_event (logbuffer, TYPE_COVERAGE_STATEMENT | TYPE_COVERAGE);
3197 emit_uvalue (logbuffer, log_profiler.coverage_method_id);
3198 emit_uvalue (logbuffer, entry->offset);
3199 emit_uvalue (logbuffer, entry->counter);
3200 emit_uvalue (logbuffer, entry->line);
3201 emit_uvalue (logbuffer, entry->column);
3203 EXIT_LOG;
3206 log_profiler.coverage_method_id++;
3208 g_free (class_name);
3210 g_ptr_array_foreach (log_profiler.coverage_data, free_coverage_entry, NULL);
3211 g_ptr_array_free (log_profiler.coverage_data, TRUE);
3214 /* This empties the queue */
3215 static guint
3216 count_queue (MonoLockFreeQueue *queue)
3218 MonoLockFreeQueueNode *node;
3219 guint count = 0;
3221 while ((node = mono_lock_free_queue_dequeue (queue))) {
3222 count++;
3223 mono_thread_hazardous_try_free (node, g_free);
3226 return count;
3229 static void
3230 build_class_buffer (gpointer key, gpointer value, gpointer userdata)
3232 MonoClass *klass = (MonoClass *)key;
3233 MonoLockFreeQueue *class_methods = (MonoLockFreeQueue *)value;
3234 MonoImage *image;
3235 char *class_name;
3236 const char *assembly_name;
3237 int number_of_methods, partially_covered;
3238 guint fully_covered;
3240 image = mono_class_get_image (klass);
3241 assembly_name = mono_image_get_name (image);
3242 class_name = mono_type_get_name (mono_class_get_type (klass));
3244 assembly_name = assembly_name ? assembly_name : "";
3245 number_of_methods = mono_class_num_methods (klass);
3246 fully_covered = count_queue (class_methods);
3247 /* We don't handle partial covered yet */
3248 partially_covered = 0;
3250 ENTER_LOG (&coverage_classes_ctr, logbuffer,
3251 EVENT_SIZE /* event */ +
3252 strlen (assembly_name) + 1 /* assembly name */ +
3253 strlen (class_name) + 1 /* class name */ +
3254 LEB128_SIZE /* no. methods */ +
3255 LEB128_SIZE /* fully covered */ +
3256 LEB128_SIZE /* partially covered */
3259 emit_event (logbuffer, TYPE_COVERAGE_CLASS | TYPE_COVERAGE);
3260 emit_string (logbuffer, assembly_name, strlen (assembly_name) + 1);
3261 emit_string (logbuffer, class_name, strlen (class_name) + 1);
3262 emit_uvalue (logbuffer, number_of_methods);
3263 emit_uvalue (logbuffer, fully_covered);
3264 emit_uvalue (logbuffer, partially_covered);
3266 EXIT_LOG;
3268 g_free (class_name);
3271 static void
3272 get_coverage_for_image (MonoImage *image, int *number_of_methods, guint *fully_covered, int *partially_covered)
3274 MonoLockFreeQueue *image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (log_profiler.coverage_image_to_methods, image);
3276 *number_of_methods = mono_image_get_table_rows (image, MONO_TABLE_METHOD);
3277 if (image_methods)
3278 *fully_covered = count_queue (image_methods);
3279 else
3280 *fully_covered = 0;
3282 // FIXME: We don't handle partially covered yet.
3283 *partially_covered = 0;
3286 static void
3287 build_assembly_buffer (gpointer key, gpointer value, gpointer userdata)
3289 MonoAssembly *assembly = (MonoAssembly *)value;
3290 MonoImage *image = mono_assembly_get_image (assembly);
3291 const char *name, *guid, *filename;
3292 int number_of_methods = 0, partially_covered = 0;
3293 guint fully_covered = 0;
3295 name = mono_image_get_name (image);
3296 guid = mono_image_get_guid (image);
3297 filename = mono_image_get_filename (image);
3299 name = name ? name : "";
3300 guid = guid ? guid : "";
3301 filename = filename ? filename : "";
3303 get_coverage_for_image (image, &number_of_methods, &fully_covered, &partially_covered);
3305 ENTER_LOG (&coverage_assemblies_ctr, logbuffer,
3306 EVENT_SIZE /* event */ +
3307 strlen (name) + 1 /* name */ +
3308 strlen (guid) + 1 /* guid */ +
3309 strlen (filename) + 1 /* file name */ +
3310 LEB128_SIZE /* no. methods */ +
3311 LEB128_SIZE /* fully covered */ +
3312 LEB128_SIZE /* partially covered */
3315 emit_event (logbuffer, TYPE_COVERAGE_ASSEMBLY | TYPE_COVERAGE);
3316 emit_string (logbuffer, name, strlen (name) + 1);
3317 emit_string (logbuffer, guid, strlen (guid) + 1);
3318 emit_string (logbuffer, filename, strlen (filename) + 1);
3319 emit_uvalue (logbuffer, number_of_methods);
3320 emit_uvalue (logbuffer, fully_covered);
3321 emit_uvalue (logbuffer, partially_covered);
3323 EXIT_LOG;
3326 static void
3327 dump_coverage (void)
3329 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3330 mono_conc_hashtable_foreach (log_profiler.coverage_assemblies, build_assembly_buffer, NULL);
3331 mono_conc_hashtable_foreach (log_profiler.coverage_classes, build_class_buffer, NULL);
3332 mono_conc_hashtable_foreach (log_profiler.coverage_methods, build_method_buffer, NULL);
3333 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3336 static MonoLockFreeQueueNode *
3337 create_method_node (MonoMethod *method)
3339 MethodNode *node = (MethodNode *) g_malloc (sizeof (MethodNode));
3340 mono_lock_free_queue_node_init ((MonoLockFreeQueueNode *) node, FALSE);
3341 node->method = method;
3343 return (MonoLockFreeQueueNode *) node;
3346 static gboolean
3347 coverage_filter (MonoProfiler *prof, MonoMethod *method)
3349 MonoError error;
3350 MonoClass *klass;
3351 MonoImage *image;
3352 MonoAssembly *assembly;
3353 MonoMethodHeader *header;
3354 guint32 iflags, flags, code_size;
3355 char *fqn, *classname;
3356 gboolean has_positive, found;
3357 MonoLockFreeQueue *image_methods, *class_methods;
3358 MonoLockFreeQueueNode *node;
3360 flags = mono_method_get_flags (method, &iflags);
3361 if ((iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3362 (flags & METHOD_ATTRIBUTE_PINVOKE_IMPL))
3363 return FALSE;
3365 // Don't need to do anything else if we're already tracking this method
3366 if (mono_conc_hashtable_lookup (log_profiler.coverage_methods, method))
3367 return TRUE;
3369 klass = mono_method_get_class (method);
3370 image = mono_class_get_image (klass);
3372 // Don't handle coverage for the core assemblies
3373 if (mono_conc_hashtable_lookup (log_profiler.coverage_suppressed_assemblies, (gpointer) mono_image_get_name (image)) != NULL)
3374 return FALSE;
3376 if (prof->coverage_filters) {
3377 /* Check already filtered classes first */
3378 if (mono_conc_hashtable_lookup (log_profiler.coverage_filtered_classes, klass))
3379 return FALSE;
3381 classname = mono_type_get_name (mono_class_get_type (klass));
3383 fqn = g_strdup_printf ("[%s]%s", mono_image_get_name (image), classname);
3385 // Check positive filters first
3386 has_positive = FALSE;
3387 found = FALSE;
3388 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3389 char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3391 if (filter [0] == '+') {
3392 filter = &filter [1];
3394 if (strstr (fqn, filter) != NULL)
3395 found = TRUE;
3397 has_positive = TRUE;
3401 if (has_positive && !found) {
3402 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3403 mono_conc_hashtable_insert (log_profiler.coverage_filtered_classes, klass, klass);
3404 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3405 g_free (fqn);
3406 g_free (classname);
3408 return FALSE;
3411 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3412 // FIXME: Is substring search sufficient?
3413 char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3414 if (filter [0] == '+')
3415 continue;
3417 // Skip '-'
3418 filter = &filter [1];
3420 if (strstr (fqn, filter) != NULL) {
3421 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3422 mono_conc_hashtable_insert (log_profiler.coverage_filtered_classes, klass, klass);
3423 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3424 g_free (fqn);
3425 g_free (classname);
3427 return FALSE;
3431 g_free (fqn);
3432 g_free (classname);
3435 header = mono_method_get_header_checked (method, &error);
3436 mono_error_cleanup (&error);
3438 mono_method_header_get_code (header, &code_size, NULL);
3440 assembly = mono_image_get_assembly (image);
3442 // Need to keep the assemblies around for as long as they are kept in the hashtable
3443 // Nunit, for example, has a habit of unloading them before the coverage statistics are
3444 // generated causing a crash. See https://bugzilla.xamarin.com/show_bug.cgi?id=39325
3445 mono_assembly_addref (assembly);
3447 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3448 mono_conc_hashtable_insert (log_profiler.coverage_methods, method, method);
3449 mono_conc_hashtable_insert (log_profiler.coverage_assemblies, assembly, assembly);
3450 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3452 image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (log_profiler.coverage_image_to_methods, image);
3454 if (image_methods == NULL) {
3455 image_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3456 mono_lock_free_queue_init (image_methods);
3457 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3458 mono_conc_hashtable_insert (log_profiler.coverage_image_to_methods, image, image_methods);
3459 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3462 node = create_method_node (method);
3463 mono_lock_free_queue_enqueue (image_methods, node);
3465 class_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (log_profiler.coverage_classes, klass);
3467 if (class_methods == NULL) {
3468 class_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3469 mono_lock_free_queue_init (class_methods);
3470 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3471 mono_conc_hashtable_insert (log_profiler.coverage_classes, klass, class_methods);
3472 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3475 node = create_method_node (method);
3476 mono_lock_free_queue_enqueue (class_methods, node);
3478 return TRUE;
3481 #define LINE_BUFFER_SIZE 4096
3482 /* Max file limit of 128KB */
3483 #define MAX_FILE_SIZE 128 * 1024
3484 static char *
3485 get_file_content (FILE *stream)
3487 char *buffer;
3488 ssize_t bytes_read;
3489 long filesize;
3490 int res, offset = 0;
3492 res = fseek (stream, 0, SEEK_END);
3493 if (res < 0)
3494 return NULL;
3496 filesize = ftell (stream);
3497 if (filesize < 0)
3498 return NULL;
3500 res = fseek (stream, 0, SEEK_SET);
3501 if (res < 0)
3502 return NULL;
3504 if (filesize > MAX_FILE_SIZE)
3505 return NULL;
3507 buffer = (char *) g_malloc ((filesize + 1) * sizeof (char));
3508 while ((bytes_read = fread (buffer + offset, 1, LINE_BUFFER_SIZE, stream)) > 0)
3509 offset += bytes_read;
3511 /* NULL terminate our buffer */
3512 buffer[filesize] = '\0';
3513 return buffer;
3516 static char *
3517 get_next_line (char *contents, char **next_start)
3519 char *p = contents;
3521 if (p == NULL || *p == '\0') {
3522 *next_start = NULL;
3523 return NULL;
3526 while (*p != '\n' && *p != '\0')
3527 p++;
3529 if (*p == '\n') {
3530 *p = '\0';
3531 *next_start = p + 1;
3532 } else
3533 *next_start = NULL;
3535 return contents;
3538 static void
3539 init_suppressed_assemblies (void)
3541 char *content;
3542 char *line;
3543 FILE *sa_file;
3545 log_profiler.coverage_suppressed_assemblies = mono_conc_hashtable_new (g_str_hash, g_str_equal);
3546 sa_file = fopen (SUPPRESSION_DIR "/mono-profiler-log.suppression", "r");
3547 if (sa_file == NULL)
3548 return;
3550 /* Don't need to free @content as it is referred to by the lines stored in @suppressed_assemblies */
3551 content = get_file_content (sa_file);
3552 if (content == NULL)
3553 g_error ("mono-profiler-log.suppression is greater than 128kb - aborting.");
3555 while ((line = get_next_line (content, &content))) {
3556 line = g_strchomp (g_strchug (line));
3557 /* No locking needed as we're doing initialization */
3558 mono_conc_hashtable_insert (log_profiler.coverage_suppressed_assemblies, line, line);
3561 fclose (sa_file);
3564 static void
3565 parse_cov_filter_file (GPtrArray *filters, const char *file)
3567 FILE *filter_file = fopen (file, "r");
3569 if (filter_file == NULL) {
3570 mono_profiler_printf_err ("Could not open coverage filter file '%s'.", file);
3571 return;
3574 /* Don't need to free content as it is referred to by the lines stored in @filters */
3575 char *content = get_file_content (filter_file);
3577 if (content == NULL)
3578 mono_profiler_printf_err ("Coverage filter file '%s' is larger than 128kb - ignoring.", file);
3580 char *line;
3582 while ((line = get_next_line (content, &content)))
3583 g_ptr_array_add (filters, g_strchug (g_strchomp (line)));
3585 fclose (filter_file);
3588 static void
3589 coverage_init (void)
3591 mono_os_mutex_init (&log_profiler.coverage_mutex);
3592 log_profiler.coverage_methods = mono_conc_hashtable_new (NULL, NULL);
3593 log_profiler.coverage_assemblies = mono_conc_hashtable_new (NULL, NULL);
3594 log_profiler.coverage_classes = mono_conc_hashtable_new (NULL, NULL);
3595 log_profiler.coverage_filtered_classes = mono_conc_hashtable_new (NULL, NULL);
3596 log_profiler.coverage_image_to_methods = mono_conc_hashtable_new (NULL, NULL);
3597 init_suppressed_assemblies ();
3600 static void
3601 unref_coverage_assemblies (gpointer key, gpointer value, gpointer userdata)
3603 MonoAssembly *assembly = (MonoAssembly *)value;
3604 mono_assembly_close (assembly);
3607 static void
3608 free_sample_hit (gpointer p)
3610 mono_lock_free_free (p, SAMPLE_BLOCK_SIZE);
3613 static void
3614 cleanup_reusable_samples (void)
3616 SampleHit *sample;
3618 while ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&log_profiler.sample_reuse_queue)))
3619 mono_thread_hazardous_try_free (sample, free_sample_hit);
3622 static void
3623 log_early_shutdown (MonoProfiler *prof)
3625 if (log_config.hs_on_shutdown) {
3626 mono_atomic_store_i32 (&log_profiler.heapshot_requested, 1);
3627 mono_gc_collect (mono_gc_max_generation ());
3631 static void
3632 log_shutdown (MonoProfiler *prof)
3634 mono_atomic_store_i32 (&log_profiler.in_shutdown, 1);
3636 if (ENABLED (PROFLOG_COUNTER_EVENTS))
3637 counters_and_perfcounters_sample ();
3639 if (log_config.collect_coverage)
3640 dump_coverage ();
3642 char c = 1;
3644 if (write (prof->pipes [1], &c, 1) != 1) {
3645 mono_profiler_printf_err ("Could not write to log profiler pipe: %s", g_strerror (errno));
3646 exit (1);
3649 mono_native_thread_join (prof->helper_thread);
3651 mono_os_mutex_destroy (&log_profiler.counters_mutex);
3653 MonoCounterAgent *mc_next;
3655 for (MonoCounterAgent *cur = log_profiler.counters; cur; cur = mc_next) {
3656 mc_next = cur->next;
3657 g_free (cur);
3660 PerfCounterAgent *pc_next;
3662 for (PerfCounterAgent *cur = log_profiler.perfcounters; cur; cur = pc_next) {
3663 pc_next = cur->next;
3664 g_free (cur);
3668 * Ensure that we empty the LLS completely, even if some nodes are
3669 * not immediately removed upon calling mono_lls_remove (), by
3670 * iterating until the head is NULL.
3672 while (log_profiler.profiler_thread_list.head) {
3673 MONO_LLS_FOREACH_SAFE (&log_profiler.profiler_thread_list, MonoProfilerThread, thread) {
3674 g_assert (thread->attached && "Why is a thread in the LLS not attached?");
3676 remove_thread (thread);
3677 } MONO_LLS_FOREACH_SAFE_END
3681 * Ensure that all threads have been freed, so that we don't miss any
3682 * buffers when we shut down the writer thread below.
3684 mono_thread_hazardous_try_free_all ();
3686 mono_atomic_store_i32 (&prof->run_dumper_thread, 0);
3687 mono_os_sem_post (&prof->dumper_queue_sem);
3688 mono_native_thread_join (prof->dumper_thread);
3689 mono_os_sem_destroy (&prof->dumper_queue_sem);
3691 mono_atomic_store_i32 (&prof->run_writer_thread, 0);
3692 mono_os_sem_post (&prof->writer_queue_sem);
3693 mono_native_thread_join (prof->writer_thread);
3694 mono_os_sem_destroy (&prof->writer_queue_sem);
3697 * Free all writer queue entries, and ensure that all sample hits will be
3698 * added to the sample reuse queue.
3700 mono_thread_hazardous_try_free_all ();
3702 cleanup_reusable_samples ();
3705 * Finally, make sure that all sample hits are freed. This should cover all
3706 * hazardous data from the profiler. We can now be sure that the runtime
3707 * won't later invoke free functions in the profiler library after it has
3708 * been unloaded.
3710 mono_thread_hazardous_try_free_all ();
3712 gint32 state = mono_atomic_load_i32 (&log_profiler.buffer_lock_state);
3714 g_assert (!(state & 0xFFFF) && "Why is the reader count still non-zero?");
3715 g_assert (!(state >> 16) && "Why is the exclusive lock still held?");
3717 #if defined (HAVE_SYS_ZLIB)
3718 if (prof->gzfile)
3719 gzclose (prof->gzfile);
3720 #endif
3721 if (prof->pipe_output)
3722 pclose (prof->file);
3723 else
3724 fclose (prof->file);
3726 mono_conc_hashtable_destroy (prof->method_table);
3727 mono_os_mutex_destroy (&prof->method_table_mutex);
3729 if (log_config.collect_coverage) {
3730 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3731 mono_conc_hashtable_foreach (log_profiler.coverage_assemblies, unref_coverage_assemblies, NULL);
3732 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3734 mono_conc_hashtable_destroy (log_profiler.coverage_methods);
3735 mono_conc_hashtable_destroy (log_profiler.coverage_assemblies);
3736 mono_conc_hashtable_destroy (log_profiler.coverage_classes);
3737 mono_conc_hashtable_destroy (log_profiler.coverage_filtered_classes);
3739 mono_conc_hashtable_destroy (log_profiler.coverage_image_to_methods);
3740 mono_conc_hashtable_destroy (log_profiler.coverage_suppressed_assemblies);
3741 mono_os_mutex_destroy (&log_profiler.coverage_mutex);
3744 mono_coop_mutex_destroy (&log_profiler.api_mutex);
3746 PROF_TLS_FREE ();
3748 g_free (prof->args);
3751 static char*
3752 new_filename (const char* filename)
3754 time_t t = time (NULL);
3755 int pid = process_id ();
3756 char pid_buf [16];
3757 char time_buf [16];
3758 char *res, *d;
3759 const char *p;
3760 int count_dates = 0;
3761 int count_pids = 0;
3762 int s_date, s_pid;
3763 struct tm *ts;
3764 for (p = filename; *p; p++) {
3765 if (*p != '%')
3766 continue;
3767 p++;
3768 if (*p == 't')
3769 count_dates++;
3770 else if (*p == 'p')
3771 count_pids++;
3772 else if (*p == 0)
3773 break;
3775 if (!count_dates && !count_pids)
3776 return pstrdup (filename);
3777 snprintf (pid_buf, sizeof (pid_buf), "%d", pid);
3778 ts = gmtime (&t);
3779 snprintf (time_buf, sizeof (time_buf), "%d%02d%02d%02d%02d%02d",
3780 1900 + ts->tm_year, 1 + ts->tm_mon, ts->tm_mday, ts->tm_hour, ts->tm_min, ts->tm_sec);
3781 s_date = strlen (time_buf);
3782 s_pid = strlen (pid_buf);
3783 d = res = (char *) g_malloc (strlen (filename) + s_date * count_dates + s_pid * count_pids);
3784 for (p = filename; *p; p++) {
3785 if (*p != '%') {
3786 *d++ = *p;
3787 continue;
3789 p++;
3790 if (*p == 't') {
3791 strcpy (d, time_buf);
3792 d += s_date;
3793 continue;
3794 } else if (*p == 'p') {
3795 strcpy (d, pid_buf);
3796 d += s_pid;
3797 continue;
3798 } else if (*p == '%') {
3799 *d++ = '%';
3800 continue;
3801 } else if (*p == 0)
3802 break;
3803 *d++ = '%';
3804 *d++ = *p;
3806 *d = 0;
3807 return res;
3810 static void
3811 add_to_fd_set (fd_set *set, int fd, int *max_fd)
3814 * This should only trigger for the basic FDs (server socket, pipes) at
3815 * startup if for some mysterious reason they're too large. In this case,
3816 * the profiler really can't function, and we're better off printing an
3817 * error and exiting.
3819 if (fd >= FD_SETSIZE) {
3820 mono_profiler_printf_err ("File descriptor is out of bounds for fd_set: %d", fd);
3821 exit (1);
3824 FD_SET (fd, set);
3826 if (*max_fd < fd)
3827 *max_fd = fd;
3830 static void *
3831 helper_thread (void *arg)
3833 mono_threads_attach_tools_thread ();
3834 mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler helper");
3836 MonoProfilerThread *thread = init_thread (FALSE);
3838 GArray *command_sockets = g_array_new (FALSE, FALSE, sizeof (int));
3840 while (1) {
3841 fd_set rfds;
3842 int max_fd = -1;
3844 FD_ZERO (&rfds);
3846 add_to_fd_set (&rfds, log_profiler.server_socket, &max_fd);
3847 add_to_fd_set (&rfds, log_profiler.pipes [0], &max_fd);
3849 for (gint i = 0; i < command_sockets->len; i++)
3850 add_to_fd_set (&rfds, g_array_index (command_sockets, int, i), &max_fd);
3852 struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
3854 // Sleep for 1sec or until a file descriptor has data.
3855 if (select (max_fd + 1, &rfds, NULL, NULL, &tv) == -1) {
3856 if (errno == EINTR)
3857 continue;
3859 mono_profiler_printf_err ("Could not poll in log profiler helper thread: %s", g_strerror (errno));
3860 exit (1);
3863 if (ENABLED (PROFLOG_COUNTER_EVENTS))
3864 counters_and_perfcounters_sample ();
3866 buffer_lock_excl ();
3868 sync_point (SYNC_POINT_PERIODIC);
3870 buffer_unlock_excl ();
3872 // Are we shutting down?
3873 if (FD_ISSET (log_profiler.pipes [0], &rfds)) {
3874 char c;
3875 read (log_profiler.pipes [0], &c, 1);
3876 break;
3879 for (gint i = 0; i < command_sockets->len; i++) {
3880 int fd = g_array_index (command_sockets, int, i);
3882 if (!FD_ISSET (fd, &rfds))
3883 continue;
3885 char buf [64];
3886 int len = read (fd, buf, sizeof (buf) - 1);
3888 if (len == -1)
3889 continue;
3891 if (!len) {
3892 // The other end disconnected.
3893 g_array_remove_index (command_sockets, i);
3894 close (fd);
3896 continue;
3899 buf [len] = 0;
3901 if (!strcmp (buf, "heapshot\n"))
3902 trigger_heapshot ();
3905 if (FD_ISSET (log_profiler.server_socket, &rfds)) {
3906 int fd = accept (log_profiler.server_socket, NULL, NULL);
3908 if (fd != -1) {
3909 if (fd >= FD_SETSIZE)
3910 close (fd);
3911 else
3912 g_array_append_val (command_sockets, fd);
3917 for (gint i = 0; i < command_sockets->len; i++)
3918 close (g_array_index (command_sockets, int, i));
3920 g_array_free (command_sockets, TRUE);
3922 send_log_unsafe (FALSE);
3923 deinit_thread (thread);
3925 mono_thread_info_detach ();
3927 return NULL;
3930 static void
3931 start_helper_thread (void)
3933 if (pipe (log_profiler.pipes) == -1) {
3934 mono_profiler_printf_err ("Could not create log profiler pipe: %s", g_strerror (errno));
3935 exit (1);
3938 log_profiler.server_socket = socket (PF_INET, SOCK_STREAM, 0);
3940 if (log_profiler.server_socket == -1) {
3941 mono_profiler_printf_err ("Could not create log profiler server socket: %s", g_strerror (errno));
3942 exit (1);
3945 struct sockaddr_in server_address;
3947 memset (&server_address, 0, sizeof (server_address));
3948 server_address.sin_family = AF_INET;
3949 server_address.sin_addr.s_addr = INADDR_ANY;
3950 server_address.sin_port = htons (log_profiler.command_port);
3952 if (bind (log_profiler.server_socket, (struct sockaddr *) &server_address, sizeof (server_address)) == -1) {
3953 mono_profiler_printf_err ("Could not bind log profiler server socket on port %d: %s", log_profiler.command_port, g_strerror (errno));
3954 close (log_profiler.server_socket);
3955 exit (1);
3958 if (listen (log_profiler.server_socket, 1) == -1) {
3959 mono_profiler_printf_err ("Could not listen on log profiler server socket: %s", g_strerror (errno));
3960 close (log_profiler.server_socket);
3961 exit (1);
3964 socklen_t slen = sizeof (server_address);
3966 if (getsockname (log_profiler.server_socket, (struct sockaddr *) &server_address, &slen)) {
3967 mono_profiler_printf_err ("Could not retrieve assigned port for log profiler server socket: %s", g_strerror (errno));
3968 close (log_profiler.server_socket);
3969 exit (1);
3972 log_profiler.command_port = ntohs (server_address.sin_port);
3974 if (!mono_native_thread_create (&log_profiler.helper_thread, helper_thread, NULL)) {
3975 mono_profiler_printf_err ("Could not start log profiler helper thread");
3976 close (log_profiler.server_socket);
3977 exit (1);
3981 static void
3982 free_writer_entry (gpointer p)
3984 mono_lock_free_free (p, WRITER_ENTRY_BLOCK_SIZE);
3987 static gboolean
3988 handle_writer_queue_entry (void)
3990 WriterQueueEntry *entry;
3992 if ((entry = (WriterQueueEntry *) mono_lock_free_queue_dequeue (&log_profiler.writer_queue))) {
3993 if (!entry->methods)
3994 goto no_methods;
3996 gboolean wrote_methods = FALSE;
3999 * Encode the method events in a temporary log buffer that we
4000 * flush to disk before the main buffer, ensuring that all
4001 * methods have metadata emitted before they're referenced.
4003 * We use a 'proper' thread-local buffer for this as opposed
4004 * to allocating and freeing a buffer by hand because the call
4005 * to mono_method_full_name () below may trigger class load
4006 * events when it retrieves the signature of the method. So a
4007 * thread-local buffer needs to exist when such events occur.
4009 for (guint i = 0; i < entry->methods->len; i++) {
4010 MethodInfo *info = (MethodInfo *) g_ptr_array_index (entry->methods, i);
4012 if (mono_conc_hashtable_lookup (log_profiler.method_table, info->method))
4013 goto free_info; // This method already has metadata emitted.
4016 * Other threads use this hash table to get a general
4017 * idea of whether a method has already been emitted to
4018 * the stream. Due to the way we add to this table, it
4019 * can easily happen that multiple threads queue up the
4020 * same methods, but that's OK since eventually all
4021 * methods will be in this table and the thread-local
4022 * method lists will just be empty for the rest of the
4023 * app's lifetime.
4025 mono_os_mutex_lock (&log_profiler.method_table_mutex);
4026 mono_conc_hashtable_insert (log_profiler.method_table, info->method, info->method);
4027 mono_os_mutex_unlock (&log_profiler.method_table_mutex);
4029 char *name = mono_method_full_name (info->method, 1);
4030 int nlen = strlen (name) + 1;
4031 void *cstart = info->ji ? mono_jit_info_get_code_start (info->ji) : NULL;
4032 int csize = info->ji ? mono_jit_info_get_code_size (info->ji) : 0;
4034 ENTER_LOG (&method_jits_ctr, logbuffer,
4035 EVENT_SIZE /* event */ +
4036 LEB128_SIZE /* method */ +
4037 LEB128_SIZE /* start */ +
4038 LEB128_SIZE /* size */ +
4039 nlen /* name */
4042 emit_event_time (logbuffer, TYPE_JIT | TYPE_METHOD, info->time);
4043 emit_method_inner (logbuffer, info->method);
4044 emit_ptr (logbuffer, cstart);
4045 emit_value (logbuffer, csize);
4047 memcpy (logbuffer->cursor, name, nlen);
4048 logbuffer->cursor += nlen;
4050 EXIT_LOG_EXPLICIT (NO_SEND);
4052 mono_free (name);
4054 wrote_methods = TRUE;
4056 free_info:
4057 g_free (info);
4060 g_ptr_array_free (entry->methods, TRUE);
4062 if (wrote_methods) {
4063 MonoProfilerThread *thread = get_thread ();
4065 dump_buffer_threadless (thread->buffer);
4066 init_buffer_state (thread);
4069 no_methods:
4070 dump_buffer (entry->buffer);
4072 mono_thread_hazardous_try_free (entry, free_writer_entry);
4074 return TRUE;
4077 return FALSE;
4080 static void *
4081 writer_thread (void *arg)
4083 mono_threads_attach_tools_thread ();
4084 mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler writer");
4086 dump_header ();
4088 MonoProfilerThread *thread = init_thread (FALSE);
4090 while (mono_atomic_load_i32 (&log_profiler.run_writer_thread)) {
4091 mono_os_sem_wait (&log_profiler.writer_queue_sem, MONO_SEM_FLAGS_NONE);
4092 handle_writer_queue_entry ();
4095 /* Drain any remaining entries on shutdown. */
4096 while (handle_writer_queue_entry ());
4098 free_buffer (thread->buffer, thread->buffer->size);
4099 deinit_thread (thread);
4101 mono_thread_info_detach ();
4103 return NULL;
4106 static void
4107 start_writer_thread (void)
4109 mono_atomic_store_i32 (&log_profiler.run_writer_thread, 1);
4111 if (!mono_native_thread_create (&log_profiler.writer_thread, writer_thread, NULL)) {
4112 mono_profiler_printf_err ("Could not start log profiler writer thread");
4113 exit (1);
4117 static void
4118 reuse_sample_hit (gpointer p)
4120 SampleHit *sample = p;
4122 mono_lock_free_queue_node_unpoison (&sample->node);
4123 mono_lock_free_queue_enqueue (&log_profiler.sample_reuse_queue, &sample->node);
4126 static gboolean
4127 handle_dumper_queue_entry (void)
4129 SampleHit *sample;
4131 if ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&log_profiler.dumper_queue))) {
4132 for (int i = 0; i < sample->count; ++i) {
4133 MonoMethod *method = sample->frames [i].method;
4134 MonoDomain *domain = sample->frames [i].domain;
4135 void *address = sample->frames [i].base_address;
4137 if (!method) {
4138 g_assert (domain && "What happened to the domain pointer?");
4139 g_assert (address && "What happened to the instruction pointer?");
4141 MonoJitInfo *ji = mono_jit_info_table_find (domain, (char *) address);
4143 if (ji)
4144 sample->frames [i].method = mono_jit_info_get_method (ji);
4148 ENTER_LOG (&sample_hits_ctr, logbuffer,
4149 EVENT_SIZE /* event */ +
4150 LEB128_SIZE /* tid */ +
4151 LEB128_SIZE /* count */ +
4152 1 * (
4153 LEB128_SIZE /* ip */
4155 LEB128_SIZE /* managed count */ +
4156 sample->count * (
4157 LEB128_SIZE /* method */
4161 emit_event_time (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_HIT, sample->time);
4162 emit_ptr (logbuffer, (void *) sample->tid);
4163 emit_value (logbuffer, 1);
4165 // TODO: Actual native unwinding.
4166 for (int i = 0; i < 1; ++i) {
4167 emit_ptr (logbuffer, sample->ip);
4168 add_code_pointer ((uintptr_t) sample->ip);
4171 /* new in data version 6 */
4172 emit_uvalue (logbuffer, sample->count);
4174 for (int i = 0; i < sample->count; ++i)
4175 emit_method (logbuffer, sample->frames [i].method);
4177 EXIT_LOG;
4179 mono_thread_hazardous_try_free (sample, reuse_sample_hit);
4181 dump_unmanaged_coderefs ();
4184 return FALSE;
4187 static void *
4188 dumper_thread (void *arg)
4190 mono_threads_attach_tools_thread ();
4191 mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler dumper");
4193 MonoProfilerThread *thread = init_thread (FALSE);
4195 while (mono_atomic_load_i32 (&log_profiler.run_dumper_thread)) {
4197 * Flush samples every second so it doesn't seem like the profiler is
4198 * not working if the program is mostly idle.
4200 if (mono_os_sem_timedwait (&log_profiler.dumper_queue_sem, 1000, MONO_SEM_FLAGS_NONE) == MONO_SEM_TIMEDWAIT_RET_TIMEDOUT)
4201 send_log_unsafe (FALSE);
4203 handle_dumper_queue_entry ();
4206 /* Drain any remaining entries on shutdown. */
4207 while (handle_dumper_queue_entry ());
4209 send_log_unsafe (FALSE);
4210 deinit_thread (thread);
4212 mono_thread_info_detach ();
4214 return NULL;
4217 static void
4218 start_dumper_thread (void)
4220 mono_atomic_store_i32 (&log_profiler.run_dumper_thread, 1);
4222 if (!mono_native_thread_create (&log_profiler.dumper_thread, dumper_thread, NULL)) {
4223 mono_profiler_printf_err ("Could not start log profiler dumper thread");
4224 exit (1);
4228 static void
4229 register_counter (const char *name, gint32 *counter)
4231 mono_counters_register (name, MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, counter);
4234 ICALL_EXPORT gint32
4235 proflog_icall_GetMaxStackTraceFrames (void)
4237 return MAX_FRAMES;
4240 ICALL_EXPORT gint32
4241 proflog_icall_GetStackTraceFrames (void)
4243 return log_config.num_frames;
4246 ICALL_EXPORT void
4247 proflog_icall_SetStackTraceFrames (gint32 value)
4249 log_config.num_frames = value;
4252 ICALL_EXPORT MonoProfilerHeapshotMode
4253 proflog_icall_GetHeapshotMode (void)
4255 return log_config.hs_mode;
4258 ICALL_EXPORT void
4259 proflog_icall_SetHeapshotMode (MonoProfilerHeapshotMode value)
4261 log_config.hs_mode = value;
4264 ICALL_EXPORT gint32
4265 proflog_icall_GetHeapshotMillisecondsFrequency (void)
4267 return log_config.hs_freq_ms;
4270 ICALL_EXPORT void
4271 proflog_icall_SetHeapshotMillisecondsFrequency (gint32 value)
4273 log_config.hs_freq_ms = value;
4276 ICALL_EXPORT gint32
4277 proflog_icall_GetHeapshotCollectionsFrequency (void)
4279 return log_config.hs_freq_gc;
4282 ICALL_EXPORT void
4283 proflog_icall_SetHeapshotCollectionsFrequency (gint32 value)
4285 log_config.hs_freq_gc = value;
4288 ICALL_EXPORT void
4289 proflog_icall_TriggerHeapshot (void)
4291 trigger_heapshot ();
4294 ICALL_EXPORT gint32
4295 proflog_icall_GetCallDepth (void)
4297 return log_config.max_call_depth;
4300 ICALL_EXPORT void
4301 proflog_icall_SetCallDepth (gint32 value)
4303 log_config.max_call_depth = value;
4306 ICALL_EXPORT void
4307 proflog_icall_GetSampleMode (MonoProfilerSampleMode *mode, gint32 *frequency)
4309 uint32_t freq;
4311 mono_profiler_get_sample_mode (log_profiler.handle, mode, &freq);
4313 *frequency = freq;
4316 ICALL_EXPORT MonoBoolean
4317 proflog_icall_SetSampleMode (MonoProfilerSampleMode mode, gint32 frequency)
4319 mono_coop_mutex_lock (&log_profiler.api_mutex);
4321 mono_bool result = mono_profiler_set_sample_mode (log_profiler.handle, mode, frequency);
4323 if (mode != MONO_PROFILER_SAMPLE_MODE_NONE) {
4324 ENABLE (PROFLOG_SAMPLE_EVENTS);
4325 mono_profiler_set_sample_hit_callback (log_profiler.handle, mono_sample_hit);
4326 } else {
4327 DISABLE (PROFLOG_SAMPLE_EVENTS);
4328 mono_profiler_set_sample_hit_callback (log_profiler.handle, NULL);
4331 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4333 return result;
4336 ICALL_EXPORT MonoBoolean
4337 proflog_icall_GetExceptionEvents (void)
4339 return ENABLED (PROFLOG_EXCEPTION_EVENTS);
4342 ICALL_EXPORT void
4343 proflog_icall_SetExceptionEvents (MonoBoolean value)
4345 mono_coop_mutex_lock (&log_profiler.api_mutex);
4347 if (value) {
4348 ENABLE (PROFLOG_EXCEPTION_EVENTS);
4349 mono_profiler_set_exception_throw_callback (log_profiler.handle, throw_exc);
4350 mono_profiler_set_exception_clause_callback (log_profiler.handle, clause_exc);
4351 } else {
4352 DISABLE (PROFLOG_EXCEPTION_EVENTS);
4353 mono_profiler_set_exception_throw_callback (log_profiler.handle, NULL);
4354 mono_profiler_set_exception_clause_callback (log_profiler.handle, NULL);
4357 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4360 ICALL_EXPORT MonoBoolean
4361 proflog_icall_GetMonitorEvents (void)
4363 return ENABLED (PROFLOG_MONITOR_EVENTS);
4366 ICALL_EXPORT void
4367 proflog_icall_SetMonitorEvents (MonoBoolean value)
4369 mono_coop_mutex_lock (&log_profiler.api_mutex);
4371 if (value) {
4372 ENABLE (PROFLOG_MONITOR_EVENTS);
4373 mono_profiler_set_monitor_contention_callback (log_profiler.handle, monitor_contention);
4374 mono_profiler_set_monitor_acquired_callback (log_profiler.handle, monitor_acquired);
4375 mono_profiler_set_monitor_failed_callback (log_profiler.handle, monitor_failed);
4376 } else {
4377 DISABLE (PROFLOG_MONITOR_EVENTS);
4378 mono_profiler_set_monitor_contention_callback (log_profiler.handle, NULL);
4379 mono_profiler_set_monitor_acquired_callback (log_profiler.handle, NULL);
4380 mono_profiler_set_monitor_failed_callback (log_profiler.handle, NULL);
4383 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4386 ICALL_EXPORT MonoBoolean
4387 proflog_icall_GetGCEvents (void)
4389 return ENABLED (PROFLOG_GC_EVENTS);
4392 ICALL_EXPORT void
4393 proflog_icall_SetGCEvents (MonoBoolean value)
4395 mono_coop_mutex_lock (&log_profiler.api_mutex);
4397 if (value)
4398 ENABLE (PROFLOG_GC_EVENTS);
4399 else
4400 DISABLE (PROFLOG_GC_EVENTS);
4402 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4405 ICALL_EXPORT MonoBoolean
4406 proflog_icall_GetGCAllocationEvents (void)
4408 return ENABLED (PROFLOG_GC_ALLOCATION_EVENTS);
4411 ICALL_EXPORT void
4412 proflog_icall_SetGCAllocationEvents (MonoBoolean value)
4414 mono_coop_mutex_lock (&log_profiler.api_mutex);
4416 if (value) {
4417 ENABLE (PROFLOG_GC_ALLOCATION_EVENTS);
4418 mono_profiler_set_gc_allocation_callback (log_profiler.handle, gc_alloc);
4419 } else {
4420 DISABLE (PROFLOG_GC_ALLOCATION_EVENTS);
4421 mono_profiler_set_gc_allocation_callback (log_profiler.handle, NULL);
4424 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4427 ICALL_EXPORT MonoBoolean
4428 proflog_icall_GetGCMoveEvents (void)
4430 return ENABLED (PROFLOG_GC_MOVE_EVENTS);
4433 ICALL_EXPORT void
4434 proflog_icall_SetGCMoveEvents (MonoBoolean value)
4436 mono_coop_mutex_lock (&log_profiler.api_mutex);
4438 if (value) {
4439 ENABLE (PROFLOG_GC_MOVE_EVENTS);
4440 mono_profiler_set_gc_moves_callback (log_profiler.handle, gc_moves);
4441 } else {
4442 DISABLE (PROFLOG_GC_MOVE_EVENTS);
4443 mono_profiler_set_gc_moves_callback (log_profiler.handle, NULL);
4446 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4449 ICALL_EXPORT MonoBoolean
4450 proflog_icall_GetGCRootEvents (void)
4452 return ENABLED (PROFLOG_GC_ROOT_EVENTS);
4455 ICALL_EXPORT void
4456 proflog_icall_SetGCRootEvents (MonoBoolean value)
4458 mono_coop_mutex_lock (&log_profiler.api_mutex);
4460 if (value)
4461 ENABLE (PROFLOG_GC_ROOT_EVENTS);
4462 else
4463 DISABLE (PROFLOG_GC_ROOT_EVENTS);
4465 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4468 ICALL_EXPORT MonoBoolean
4469 proflog_icall_GetGCHandleEvents (void)
4471 return ENABLED (PROFLOG_GC_HANDLE_EVENTS);
4474 ICALL_EXPORT void
4475 proflog_icall_SetGCHandleEvents (MonoBoolean value)
4477 mono_coop_mutex_lock (&log_profiler.api_mutex);
4479 if (value) {
4480 ENABLE (PROFLOG_GC_HANDLE_EVENTS);
4481 mono_profiler_set_gc_handle_created_callback (log_profiler.handle, gc_handle_created);
4482 mono_profiler_set_gc_handle_deleted_callback (log_profiler.handle, gc_handle_deleted);
4483 } else {
4484 DISABLE (PROFLOG_GC_HANDLE_EVENTS);
4485 mono_profiler_set_gc_handle_created_callback (log_profiler.handle, NULL);
4486 mono_profiler_set_gc_handle_deleted_callback (log_profiler.handle, NULL);
4489 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4492 ICALL_EXPORT MonoBoolean
4493 proflog_icall_GetGCFinalizationEvents (void)
4495 return ENABLED (PROFLOG_GC_FINALIZATION_EVENTS);
4498 ICALL_EXPORT void
4499 proflog_icall_SetGCFinalizationEvents (MonoBoolean value)
4501 mono_coop_mutex_lock (&log_profiler.api_mutex);
4503 if (value) {
4504 ENABLE (PROFLOG_GC_FINALIZATION_EVENTS);
4505 mono_profiler_set_gc_finalizing_callback (log_profiler.handle, finalize_begin);
4506 mono_profiler_set_gc_finalizing_object_callback (log_profiler.handle, finalize_object_begin);
4507 mono_profiler_set_gc_finalized_object_callback (log_profiler.handle, finalize_object_end);
4508 } else {
4509 DISABLE (PROFLOG_GC_FINALIZATION_EVENTS);
4510 mono_profiler_set_gc_finalizing_callback (log_profiler.handle, NULL);
4511 mono_profiler_set_gc_finalizing_object_callback (log_profiler.handle, NULL);
4512 mono_profiler_set_gc_finalized_object_callback (log_profiler.handle, NULL);
4515 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4518 ICALL_EXPORT MonoBoolean
4519 proflog_icall_GetCounterEvents (void)
4521 return ENABLED (PROFLOG_COUNTER_EVENTS);
4524 ICALL_EXPORT void
4525 proflog_icall_SetCounterEvents (MonoBoolean value)
4527 mono_coop_mutex_lock (&log_profiler.api_mutex);
4529 if (value)
4530 ENABLE (PROFLOG_COUNTER_EVENTS);
4531 else
4532 DISABLE (PROFLOG_COUNTER_EVENTS);
4534 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4537 ICALL_EXPORT MonoBoolean
4538 proflog_icall_GetJitEvents (void)
4540 return ENABLED (PROFLOG_JIT_EVENTS);
4543 ICALL_EXPORT void
4544 proflog_icall_SetJitEvents (MonoBoolean value)
4546 mono_coop_mutex_lock (&log_profiler.api_mutex);
4548 if (value) {
4549 ENABLE (PROFLOG_JIT_EVENTS);
4550 mono_profiler_set_jit_code_buffer_callback (log_profiler.handle, code_buffer_new);
4551 } else {
4552 DISABLE (PROFLOG_JIT_EVENTS);
4553 mono_profiler_set_jit_code_buffer_callback (log_profiler.handle, NULL);
4556 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4559 static void
4560 runtime_initialized (MonoProfiler *profiler)
4562 mono_atomic_store_i32 (&log_profiler.runtime_inited, 1);
4564 register_counter ("Sample events allocated", &sample_allocations_ctr);
4565 register_counter ("Log buffers allocated", &buffer_allocations_ctr);
4567 register_counter ("Event: Sync points", &sync_points_ctr);
4568 register_counter ("Event: Heap objects", &heap_objects_ctr);
4569 register_counter ("Event: Heap starts", &heap_starts_ctr);
4570 register_counter ("Event: Heap ends", &heap_ends_ctr);
4571 register_counter ("Event: Heap roots", &heap_roots_ctr);
4572 register_counter ("Event: GC events", &gc_events_ctr);
4573 register_counter ("Event: GC resizes", &gc_resizes_ctr);
4574 register_counter ("Event: GC allocations", &gc_allocs_ctr);
4575 register_counter ("Event: GC moves", &gc_moves_ctr);
4576 register_counter ("Event: GC handle creations", &gc_handle_creations_ctr);
4577 register_counter ("Event: GC handle deletions", &gc_handle_deletions_ctr);
4578 register_counter ("Event: GC finalize starts", &finalize_begins_ctr);
4579 register_counter ("Event: GC finalize ends", &finalize_ends_ctr);
4580 register_counter ("Event: GC finalize object starts", &finalize_object_begins_ctr);
4581 register_counter ("Event: GC finalize object ends", &finalize_object_ends_ctr);
4582 register_counter ("Event: Image loads", &image_loads_ctr);
4583 register_counter ("Event: Image unloads", &image_unloads_ctr);
4584 register_counter ("Event: Assembly loads", &assembly_loads_ctr);
4585 register_counter ("Event: Assembly unloads", &assembly_unloads_ctr);
4586 register_counter ("Event: Class loads", &class_loads_ctr);
4587 register_counter ("Event: VTable loads", &vtable_loads_ctr);
4588 register_counter ("Event: Method entries", &method_entries_ctr);
4589 register_counter ("Event: Method exits", &method_exits_ctr);
4590 register_counter ("Event: Method exception leaves", &method_exception_exits_ctr);
4591 register_counter ("Event: Method JITs", &method_jits_ctr);
4592 register_counter ("Event: Code buffers", &code_buffers_ctr);
4593 register_counter ("Event: Exception throws", &exception_throws_ctr);
4594 register_counter ("Event: Exception clauses", &exception_clauses_ctr);
4595 register_counter ("Event: Monitor events", &monitor_events_ctr);
4596 register_counter ("Event: Thread starts", &thread_starts_ctr);
4597 register_counter ("Event: Thread ends", &thread_ends_ctr);
4598 register_counter ("Event: Thread names", &thread_names_ctr);
4599 register_counter ("Event: Domain loads", &domain_loads_ctr);
4600 register_counter ("Event: Domain unloads", &domain_unloads_ctr);
4601 register_counter ("Event: Domain names", &domain_names_ctr);
4602 register_counter ("Event: Context loads", &context_loads_ctr);
4603 register_counter ("Event: Context unloads", &context_unloads_ctr);
4604 register_counter ("Event: Sample binaries", &sample_ubins_ctr);
4605 register_counter ("Event: Sample symbols", &sample_usyms_ctr);
4606 register_counter ("Event: Sample hits", &sample_hits_ctr);
4607 register_counter ("Event: Counter descriptors", &counter_descriptors_ctr);
4608 register_counter ("Event: Counter samples", &counter_samples_ctr);
4609 register_counter ("Event: Performance counter descriptors", &perfcounter_descriptors_ctr);
4610 register_counter ("Event: Performance counter samples", &perfcounter_samples_ctr);
4611 register_counter ("Event: Coverage methods", &coverage_methods_ctr);
4612 register_counter ("Event: Coverage statements", &coverage_statements_ctr);
4613 register_counter ("Event: Coverage classes", &coverage_classes_ctr);
4614 register_counter ("Event: Coverage assemblies", &coverage_assemblies_ctr);
4616 counters_init ();
4619 * We must start the helper thread before the writer thread. This is
4620 * because the helper thread sets up the command port which is written to
4621 * the log header by the writer thread.
4623 start_helper_thread ();
4624 start_writer_thread ();
4625 start_dumper_thread ();
4627 mono_coop_mutex_init (&log_profiler.api_mutex);
4629 #define ADD_ICALL(NAME) \
4630 mono_add_internal_call ("Mono.Profiler.Log.LogProfiler::" EGLIB_STRINGIFY (NAME), proflog_icall_ ## NAME);
4632 ADD_ICALL (GetMaxStackTraceFrames);
4633 ADD_ICALL (GetStackTraceFrames);
4634 ADD_ICALL (SetStackTraceFrames);
4635 ADD_ICALL (GetHeapshotMode);
4636 ADD_ICALL (SetHeapshotMode);
4637 ADD_ICALL (GetHeapshotMillisecondsFrequency);
4638 ADD_ICALL (SetHeapshotMillisecondsFrequency);
4639 ADD_ICALL (GetHeapshotCollectionsFrequency);
4640 ADD_ICALL (SetHeapshotCollectionsFrequency);
4641 ADD_ICALL (TriggerHeapshot);
4642 ADD_ICALL (GetCallDepth);
4643 ADD_ICALL (SetCallDepth);
4644 ADD_ICALL (GetSampleMode);
4645 ADD_ICALL (SetSampleMode);
4646 ADD_ICALL (GetExceptionEvents);
4647 ADD_ICALL (SetExceptionEvents);
4648 ADD_ICALL (GetMonitorEvents);
4649 ADD_ICALL (SetMonitorEvents);
4650 ADD_ICALL (GetGCEvents);
4651 ADD_ICALL (SetGCEvents);
4652 ADD_ICALL (GetGCAllocationEvents);
4653 ADD_ICALL (SetGCAllocationEvents);
4654 ADD_ICALL (GetGCMoveEvents);
4655 ADD_ICALL (SetGCMoveEvents);
4656 ADD_ICALL (GetGCRootEvents);
4657 ADD_ICALL (SetGCRootEvents);
4658 ADD_ICALL (GetGCHandleEvents);
4659 ADD_ICALL (SetGCHandleEvents);
4660 ADD_ICALL (GetGCFinalizationEvents);
4661 ADD_ICALL (SetGCFinalizationEvents);
4662 ADD_ICALL (GetCounterEvents);
4663 ADD_ICALL (SetCounterEvents);
4664 ADD_ICALL (GetJitEvents);
4665 ADD_ICALL (SetJitEvents);
4667 #undef ADD_ICALL
4670 static void
4671 create_profiler (const char *args, const char *filename, GPtrArray *filters)
4673 char *nf;
4675 log_profiler.args = pstrdup (args);
4676 log_profiler.command_port = log_config.command_port;
4678 //If filename begin with +, append the pid at the end
4679 if (filename && *filename == '+')
4680 filename = g_strdup_printf ("%s.%d", filename + 1, getpid ());
4682 if (!filename) {
4683 if (log_config.do_report)
4684 filename = "|mprof-report -";
4685 else
4686 filename = "output.mlpd";
4687 nf = (char*)filename;
4688 } else {
4689 nf = new_filename (filename);
4690 if (log_config.do_report) {
4691 int s = strlen (nf) + 32;
4692 char *p = (char *) g_malloc (s);
4693 snprintf (p, s, "|mprof-report '--out=%s' -", nf);
4694 g_free (nf);
4695 nf = p;
4698 if (*nf == '|') {
4699 log_profiler.file = popen (nf + 1, "w");
4700 log_profiler.pipe_output = 1;
4701 } else if (*nf == '#') {
4702 int fd = strtol (nf + 1, NULL, 10);
4703 log_profiler.file = fdopen (fd, "a");
4704 } else
4705 log_profiler.file = fopen (nf, "wb");
4707 if (!log_profiler.file) {
4708 mono_profiler_printf_err ("Could not create log profiler output file '%s': %s", nf, g_strerror (errno));
4709 exit (1);
4712 #if defined (HAVE_SYS_ZLIB)
4713 if (log_config.use_zip)
4714 log_profiler.gzfile = gzdopen (fileno (log_profiler.file), "wb");
4715 #endif
4717 // FIXME: We should free this stuff too.
4718 mono_lock_free_allocator_init_size_class (&log_profiler.sample_size_class, SAMPLE_SLOT_SIZE (log_config.num_frames), SAMPLE_BLOCK_SIZE);
4719 mono_lock_free_allocator_init_allocator (&log_profiler.sample_allocator, &log_profiler.sample_size_class, MONO_MEM_ACCOUNT_PROFILER);
4721 mono_lock_free_queue_init (&log_profiler.sample_reuse_queue);
4723 // FIXME: We should free this stuff too.
4724 mono_lock_free_allocator_init_size_class (&log_profiler.writer_entry_size_class, sizeof (WriterQueueEntry), WRITER_ENTRY_BLOCK_SIZE);
4725 mono_lock_free_allocator_init_allocator (&log_profiler.writer_entry_allocator, &log_profiler.writer_entry_size_class, MONO_MEM_ACCOUNT_PROFILER);
4727 mono_lock_free_queue_init (&log_profiler.writer_queue);
4728 mono_os_sem_init (&log_profiler.writer_queue_sem, 0);
4730 mono_lock_free_queue_init (&log_profiler.dumper_queue);
4731 mono_os_sem_init (&log_profiler.dumper_queue_sem, 0);
4733 mono_os_mutex_init (&log_profiler.method_table_mutex);
4734 log_profiler.method_table = mono_conc_hashtable_new (NULL, NULL);
4736 if (log_config.collect_coverage)
4737 coverage_init ();
4739 log_profiler.coverage_filters = filters;
4741 log_profiler.startup_time = current_time ();
4744 MONO_API void
4745 mono_profiler_init_log (const char *desc);
4747 void
4748 mono_profiler_init_log (const char *desc)
4751 * If you hit this assert while increasing MAX_FRAMES, you need to increase
4752 * SAMPLE_BLOCK_SIZE as well.
4754 g_assert (SAMPLE_SLOT_SIZE (MAX_FRAMES) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (SAMPLE_BLOCK_SIZE));
4755 g_assert (sizeof (WriterQueueEntry) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (WRITER_ENTRY_BLOCK_SIZE));
4757 GPtrArray *filters = NULL;
4759 proflog_parse_args (&log_config, desc [3] == ':' ? desc + 4 : "");
4761 if (log_config.cov_filter_files) {
4762 filters = g_ptr_array_new ();
4763 int i;
4764 for (i = 0; i < log_config.cov_filter_files->len; ++i) {
4765 const char *name = log_config.cov_filter_files->pdata [i];
4766 parse_cov_filter_file (filters, name);
4770 MonoProfilerHandle handle = log_profiler.handle = mono_profiler_create (&log_profiler);
4772 if (log_config.enter_leave)
4773 mono_profiler_set_call_instrumentation_filter_callback (handle, method_filter);
4776 * If the runtime was invoked for the purpose of AOT compilation only, the
4777 * only thing we want to do is install the call instrumentation filter.
4779 if (mono_jit_aot_compiling ())
4780 goto done;
4782 init_time ();
4784 PROF_TLS_INIT ();
4786 create_profiler (desc, log_config.output_filename, filters);
4788 mono_lls_init (&log_profiler.profiler_thread_list, NULL);
4791 * Required callbacks. These are either necessary for the profiler itself
4792 * to function, or provide metadata that's needed if other events (e.g.
4793 * allocations, exceptions) are dynamically enabled/disabled.
4796 mono_profiler_set_runtime_shutdown_begin_callback (handle, log_early_shutdown);
4797 mono_profiler_set_runtime_shutdown_end_callback (handle, log_shutdown);
4798 mono_profiler_set_runtime_initialized_callback (handle, runtime_initialized);
4800 mono_profiler_set_gc_event2_callback (handle, gc_event);
4802 mono_profiler_set_thread_started_callback (handle, thread_start);
4803 mono_profiler_set_thread_exited_callback (handle, thread_end);
4804 mono_profiler_set_thread_name_callback (handle, thread_name);
4806 mono_profiler_set_domain_loaded_callback (handle, domain_loaded);
4807 mono_profiler_set_domain_unloading_callback (handle, domain_unloaded);
4808 mono_profiler_set_domain_name_callback (handle, domain_name);
4810 mono_profiler_set_context_loaded_callback (handle, context_loaded);
4811 mono_profiler_set_context_unloaded_callback (handle, context_unloaded);
4813 mono_profiler_set_assembly_loaded_callback (handle, assembly_loaded);
4814 mono_profiler_set_assembly_unloading_callback (handle, assembly_unloaded);
4816 mono_profiler_set_image_loaded_callback (handle, image_loaded);
4817 mono_profiler_set_image_unloading_callback (handle, image_unloaded);
4819 mono_profiler_set_class_loaded_callback (handle, class_loaded);
4821 mono_profiler_set_vtable_loaded_callback (handle, vtable_loaded);
4823 mono_profiler_set_jit_done_callback (handle, method_jitted);
4825 mono_profiler_set_gc_root_register_callback (handle, gc_root_register);
4826 mono_profiler_set_gc_root_unregister_callback (handle, gc_root_deregister);
4828 if (ENABLED (PROFLOG_EXCEPTION_EVENTS)) {
4829 mono_profiler_set_exception_throw_callback (handle, throw_exc);
4830 mono_profiler_set_exception_clause_callback (handle, clause_exc);
4833 if (ENABLED (PROFLOG_MONITOR_EVENTS)) {
4834 mono_profiler_set_monitor_contention_callback (handle, monitor_contention);
4835 mono_profiler_set_monitor_acquired_callback (handle, monitor_acquired);
4836 mono_profiler_set_monitor_failed_callback (handle, monitor_failed);
4839 if (ENABLED (PROFLOG_GC_EVENTS))
4840 mono_profiler_set_gc_resize_callback (handle, gc_resize);
4842 if (ENABLED (PROFLOG_GC_ALLOCATION_EVENTS))
4843 mono_profiler_set_gc_allocation_callback (handle, gc_alloc);
4845 if (ENABLED (PROFLOG_GC_MOVE_EVENTS))
4846 mono_profiler_set_gc_moves_callback (handle, gc_moves);
4848 if (ENABLED (PROFLOG_GC_HANDLE_EVENTS)) {
4849 mono_profiler_set_gc_handle_created_callback (handle, gc_handle_created);
4850 mono_profiler_set_gc_handle_deleted_callback (handle, gc_handle_deleted);
4853 if (ENABLED (PROFLOG_GC_FINALIZATION_EVENTS)) {
4854 mono_profiler_set_gc_finalizing_callback (handle, finalize_begin);
4855 mono_profiler_set_gc_finalized_callback (handle, finalize_end);
4856 mono_profiler_set_gc_finalizing_object_callback (handle, finalize_object_begin);
4859 //On Demand heapshot uses the finalizer thread to force a collection and thus a heapshot
4860 mono_profiler_set_gc_finalized_callback (handle, finalize_end);
4862 if (ENABLED (PROFLOG_SAMPLE_EVENTS))
4863 mono_profiler_set_sample_hit_callback (handle, mono_sample_hit);
4865 if (ENABLED (PROFLOG_JIT_EVENTS))
4866 mono_profiler_set_jit_code_buffer_callback (handle, code_buffer_new);
4868 if (log_config.enter_leave) {
4869 mono_profiler_set_method_enter_callback (handle, method_enter);
4870 mono_profiler_set_method_leave_callback (handle, method_leave);
4871 mono_profiler_set_method_tail_call_callback (handle, tail_call);
4872 mono_profiler_set_method_exception_leave_callback (handle, method_exc_leave);
4875 if (log_config.collect_coverage) {
4876 mono_profiler_enable_coverage ();
4877 mono_profiler_set_coverage_filter_callback (handle, coverage_filter);
4880 mono_profiler_enable_allocations ();
4881 mono_profiler_enable_sampling (handle);
4884 * If no sample option was given by the user, this just leaves the sampling
4885 * thread in idle mode. We do this even if no option was given so that we
4886 * can warn if another profiler controls sampling parameters.
4888 if (!mono_profiler_set_sample_mode (handle, log_config.sampling_mode, log_config.sample_freq))
4889 mono_profiler_printf_err ("Another profiler controls sampling parameters; the log profiler will not be able to modify them.");
4891 done: