[WinForms] Matching Mono PropertyGrid behaviour with .Net one (#11618)
[mono-project.git] / mono / profiler / log.c
blob66f6acf9cdb8bac60cf62e7562b4e29c0f443623
1 /*
2 * log.c: mono log profiler
4 * Authors:
5 * Paolo Molaro (lupus@ximian.com)
6 * Alex Rønne Petersen (alexrp@xamarin.com)
8 * Copyright 2010 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
13 #include <config.h>
14 #include <mono/metadata/assembly.h>
15 #include <mono/metadata/assembly-internals.h>
16 #include <mono/metadata/class-internals.h>
17 #include <mono/metadata/debug-helpers.h>
18 #include <mono/metadata/icall-internals.h>
19 #include <mono/metadata/loader.h>
20 #include <mono/metadata/loader-internals.h>
21 #include <mono/metadata/metadata-internals.h>
22 #include <mono/metadata/mono-config.h>
23 #include <mono/metadata/mono-gc.h>
24 #include <mono/metadata/mono-perfcounters.h>
25 #include <mono/metadata/object-internals.h>
26 #include <mono/metadata/runtime.h>
27 #include <mono/metadata/tabledefs.h>
28 #include <mono/metadata/threads.h>
29 #include <mono/metadata/threads-types.h>
30 #include <mono/mini/jit.h>
31 #include <mono/utils/atomic.h>
32 #include <mono/utils/hazard-pointer.h>
33 #include <mono/utils/lock-free-alloc.h>
34 #include <mono/utils/lock-free-queue.h>
35 #include <mono/utils/mono-conc-hashtable.h>
36 #include <mono/utils/mono-coop-mutex.h>
37 #include <mono/utils/mono-counters.h>
38 #include <mono/utils/mono-logger-internals.h>
39 #include <mono/utils/mono-linked-list-set.h>
40 #include <mono/utils/mono-membar.h>
41 #include <mono/utils/mono-mmap.h>
42 #include <mono/utils/mono-os-mutex.h>
43 #include <mono/utils/mono-os-semaphore.h>
44 #include <mono/utils/mono-threads.h>
45 #include <mono/utils/mono-threads-api.h>
46 #include <mono/utils/mono-error-internals.h>
47 #include <mono/utils/os-event.h>
48 #include "log.h"
50 #ifdef HAVE_DLFCN_H
51 #include <dlfcn.h>
52 #endif
53 #include <fcntl.h>
54 #ifdef HAVE_LINK_H
55 #include <link.h>
56 #endif
57 #ifdef HAVE_UNISTD_H
58 #include <unistd.h>
59 #endif
60 #if defined(__APPLE__)
61 #include <mach/mach_time.h>
62 #endif
63 #include <netinet/in.h>
64 #ifdef HAVE_SYS_MMAN_H
65 #include <sys/mman.h>
66 #endif
67 #include <sys/socket.h>
68 #if defined (HAVE_SYS_ZLIB)
69 #include <zlib.h>
70 #endif
72 // Statistics for internal profiler data structures.
73 static gint32 sample_allocations_ctr,
74 buffer_allocations_ctr;
76 // Statistics for profiler events.
77 static gint32 sync_points_ctr,
78 aot_ids_ctr,
79 heap_objects_ctr,
80 heap_starts_ctr,
81 heap_ends_ctr,
82 heap_roots_ctr,
83 heap_root_registers_ctr,
84 heap_root_unregisters_ctr,
85 gc_events_ctr,
86 gc_resizes_ctr,
87 gc_allocs_ctr,
88 gc_moves_ctr,
89 gc_handle_creations_ctr,
90 gc_handle_deletions_ctr,
91 finalize_begins_ctr,
92 finalize_ends_ctr,
93 finalize_object_begins_ctr,
94 finalize_object_ends_ctr,
95 image_loads_ctr,
96 image_unloads_ctr,
97 assembly_loads_ctr,
98 assembly_unloads_ctr,
99 class_loads_ctr,
100 vtable_loads_ctr,
101 method_entries_ctr,
102 method_exits_ctr,
103 method_exception_exits_ctr,
104 method_jits_ctr,
105 code_buffers_ctr,
106 exception_throws_ctr,
107 exception_clauses_ctr,
108 monitor_events_ctr,
109 thread_starts_ctr,
110 thread_ends_ctr,
111 thread_names_ctr,
112 domain_loads_ctr,
113 domain_unloads_ctr,
114 domain_names_ctr,
115 context_loads_ctr,
116 context_unloads_ctr,
117 sample_ubins_ctr,
118 sample_usyms_ctr,
119 sample_hits_ctr,
120 counter_descriptors_ctr,
121 counter_samples_ctr,
122 perfcounter_descriptors_ctr,
123 perfcounter_samples_ctr;
125 // Pending data to be written to the log, for a single thread.
126 // Threads periodically flush their own LogBuffers by calling safe_send
127 typedef struct _LogBuffer LogBuffer;
128 struct _LogBuffer {
129 // Next (older) LogBuffer in processing queue
130 LogBuffer *next;
132 uint64_t time_base;
133 uint64_t last_time;
134 gboolean has_ptr_base;
135 uintptr_t ptr_base;
136 uintptr_t method_base;
137 uintptr_t last_method;
138 uintptr_t obj_base;
139 uintptr_t thread_id;
141 // Bytes allocated for this LogBuffer
142 int size;
144 // Start of currently unused space in buffer
145 unsigned char* cursor;
147 // Pointer to start-of-structure-plus-size (for convenience)
148 unsigned char* buf_end;
150 // Start of data in buffer. Contents follow "buffer format" described above.
151 unsigned char buf [1];
154 typedef struct {
155 MonoLinkedListSetNode node;
157 // Was this thread added to the LLS?
158 gboolean attached;
160 // Did this thread detach from the runtime? Only used for internal profiler threads.
161 gboolean did_detach;
163 // The current log buffer for this thread.
164 LogBuffer *buffer;
166 // Methods referenced by events in `buffer`, see `MethodInfo`.
167 GPtrArray *methods;
169 // Current call depth for enter/leave events.
170 int call_depth;
172 // Indicates whether this thread is currently writing to its `buffer`.
173 gboolean busy;
175 // Has this thread written a thread end event to `buffer`?
176 gboolean ended;
178 // Stored in `buffer_lock_state` to take the exclusive lock.
179 int small_id;
180 } MonoProfilerThread;
182 // Default value in `profiler_tls` for new threads.
183 #define MONO_PROFILER_THREAD_ZERO ((MonoProfilerThread *) NULL)
185 // This is written to `profiler_tls` to indicate that a thread has stopped.
186 #define MONO_PROFILER_THREAD_DEAD ((MonoProfilerThread *) -1)
188 // Do not use these TLS macros directly unless you know what you're doing.
190 #define PROF_TLS_SET(VAL) mono_thread_info_set_tools_data (VAL)
191 #define PROF_TLS_GET mono_thread_info_get_tools_data
193 static uintptr_t
194 thread_id (void)
196 return (uintptr_t) mono_native_thread_id_get ();
199 static uintptr_t
200 process_id (void)
202 #ifdef HOST_WIN32
203 return (uintptr_t) GetCurrentProcessId ();
204 #else
205 return (uintptr_t) getpid ();
206 #endif
209 #define ENABLED(EVT) (!!(log_config.effective_mask & (EVT)))
210 #define ENABLE(EVT) do { log_config.effective_mask |= (EVT); } while (0)
211 #define DISABLE(EVT) do { log_config.effective_mask &= ~(EVT); } while (0)
214 * These macros should be used when writing an event to a log buffer. They
215 * take care of a bunch of stuff that can be repetitive and error-prone, such
216 * as attaching the current thread, acquiring/releasing the buffer lock,
217 * incrementing the event counter, expanding the log buffer, etc. They also
218 * create a scope so that it's harder to leak the LogBuffer pointer, which can
219 * be problematic as the pointer is unstable when the buffer lock isn't
220 * acquired.
222 * If the calling thread is already attached, these macros will not alter its
223 * attach mode (i.e. whether it's added to the LLS). If the thread is not
224 * attached, init_thread () will be called with add_to_lls = TRUE.
227 #define ENTER_LOG(COUNTER, BUFFER, SIZE) \
228 do { \
229 MonoProfilerThread *thread__ = get_thread (); \
230 g_assert (!thread__->busy && "Why are we trying to write a new event while already writing one?"); \
231 thread__->busy = TRUE; \
232 mono_atomic_inc_i32 ((COUNTER)); \
233 if (thread__->attached) \
234 buffer_lock (); \
235 LogBuffer *BUFFER = ensure_logbuf_unsafe (thread__, (SIZE))
237 #define EXIT_LOG_EXPLICIT(SEND) \
238 if ((SEND)) \
239 send_log_unsafe (TRUE); \
240 if (thread__->attached) \
241 buffer_unlock (); \
242 thread__->busy = FALSE; \
243 } while (0)
245 // Pass these to EXIT_LOG_EXPLICIT () for easier reading.
246 #define DO_SEND TRUE
247 #define NO_SEND FALSE
249 #define EXIT_LOG EXIT_LOG_EXPLICIT (DO_SEND)
251 typedef struct _BinaryObject BinaryObject;
252 struct _BinaryObject {
253 BinaryObject *next;
254 void *addr;
255 char *name;
258 typedef struct MonoCounterAgent {
259 MonoCounter *counter;
260 // MonoCounterAgent specific data :
261 void *value;
262 size_t value_size;
263 guint32 index;
264 gboolean emitted;
265 struct MonoCounterAgent *next;
266 } MonoCounterAgent;
268 typedef struct _PerfCounterAgent PerfCounterAgent;
269 struct _PerfCounterAgent {
270 PerfCounterAgent *next;
271 guint32 index;
272 char *category_name;
273 char *name;
274 gint64 value;
275 gboolean emitted;
276 gboolean updated;
277 gboolean deleted;
280 struct _MonoProfiler {
281 MonoProfilerHandle handle;
283 FILE* file;
284 #if defined (HAVE_SYS_ZLIB)
285 gzFile gzfile;
286 #endif
288 char *args;
289 uint64_t startup_time;
290 int timer_overhead;
292 #ifdef __APPLE__
293 mach_timebase_info_data_t timebase_info;
294 #elif defined (HOST_WIN32)
295 LARGE_INTEGER pcounter_freq;
296 #endif
298 int pipe_output;
299 int command_port;
300 int server_socket;
301 int pipes [2];
303 MonoLinkedListSet profiler_thread_list;
304 volatile gint32 buffer_lock_state;
305 volatile gint32 buffer_lock_exclusive_intent;
307 volatile gint32 runtime_inited;
308 volatile gint32 detach_threads;
309 volatile gint32 in_shutdown;
311 MonoSemType attach_threads_sem;
312 MonoSemType detach_threads_sem;
314 MonoNativeThreadId helper_thread;
315 MonoOSEvent helper_thread_exited;
317 MonoNativeThreadId writer_thread;
318 volatile gint32 run_writer_thread;
319 MonoOSEvent writer_thread_exited;
320 MonoLockFreeQueue writer_queue;
321 MonoSemType writer_queue_sem;
323 MonoLockFreeAllocSizeClass writer_entry_size_class;
324 MonoLockFreeAllocator writer_entry_allocator;
326 MonoConcurrentHashTable *method_table;
327 mono_mutex_t method_table_mutex;
329 MonoNativeThreadId dumper_thread;
330 volatile gint32 run_dumper_thread;
331 MonoOSEvent dumper_thread_exited;
332 MonoLockFreeQueue dumper_queue;
333 MonoSemType dumper_queue_sem;
335 MonoLockFreeAllocSizeClass sample_size_class;
336 MonoLockFreeAllocator sample_allocator;
337 MonoLockFreeQueue sample_reuse_queue;
339 BinaryObject *binary_objects;
341 volatile gint32 heapshot_requested;
342 guint64 gc_count;
343 guint64 last_hs_time;
344 gboolean do_heap_walk;
346 mono_mutex_t counters_mutex;
347 MonoCounterAgent *counters;
348 PerfCounterAgent *perfcounters;
349 guint32 counters_index;
351 MonoCoopMutex api_mutex;
354 static ProfilerConfig log_config;
355 static struct _MonoProfiler log_profiler;
357 typedef struct {
358 MonoLockFreeQueueNode node;
359 GPtrArray *methods;
360 LogBuffer *buffer;
361 } WriterQueueEntry;
363 #define WRITER_ENTRY_BLOCK_SIZE (mono_pagesize ())
365 typedef struct {
366 MonoMethod *method;
367 MonoJitInfo *ji;
368 uint64_t time;
369 } MethodInfo;
371 #define TICKS_PER_SEC 1000000000LL
372 #define TICKS_PER_MSEC (TICKS_PER_SEC / 1000)
374 static uint64_t
375 current_time (void)
377 #ifdef __APPLE__
378 uint64_t time = mach_absolute_time ();
380 time *= log_profiler.timebase_info.numer;
381 time /= log_profiler.timebase_info.denom;
383 return time;
384 #elif defined (HOST_WIN32)
385 LARGE_INTEGER value;
387 QueryPerformanceCounter (&value);
389 return value.QuadPart * TICKS_PER_SEC / log_profiler.pcounter_freq.QuadPart;
390 #elif defined (CLOCK_MONOTONIC)
391 struct timespec tspec;
393 clock_gettime (CLOCK_MONOTONIC, &tspec);
395 return ((uint64_t) tspec.tv_sec * TICKS_PER_SEC + tspec.tv_nsec);
396 #else
397 struct timeval tv;
399 gettimeofday (&tv, NULL);
401 return ((uint64_t) tv.tv_sec * TICKS_PER_SEC + tv.tv_usec * 1000);
402 #endif
405 static void
406 init_time (void)
408 #ifdef __APPLE__
409 mach_timebase_info (&log_profiler.timebase_info);
410 #elif defined (HOST_WIN32)
411 QueryPerformanceFrequency (&log_profiler.pcounter_freq);
412 #endif
414 uint64_t time_start = current_time ();
416 for (int i = 0; i < 256; ++i)
417 current_time ();
419 uint64_t time_end = current_time ();
421 log_profiler.timer_overhead = (time_end - time_start) / 256;
424 static char*
425 pstrdup (const char *s)
427 int len = strlen (s) + 1;
428 char *p = (char *) g_malloc (len);
429 memcpy (p, s, len);
430 return p;
433 #define BUFFER_SIZE (4096 * 16)
435 /* Worst-case size in bytes of a 64-bit value encoded with LEB128. */
436 #define LEB128_SIZE 10
438 /* Size of a value encoded as a single byte. */
439 #undef BYTE_SIZE // mach/i386/vm_param.h on OS X defines this to 8, but it isn't used for anything.
440 #define BYTE_SIZE 1
442 /* Size in bytes of the event prefix (ID + time). */
443 #define EVENT_SIZE (BYTE_SIZE + LEB128_SIZE)
445 static void *
446 alloc_buffer (int size)
448 return mono_valloc (NULL, size, MONO_MMAP_READ | MONO_MMAP_WRITE | MONO_MMAP_ANON | MONO_MMAP_PRIVATE, MONO_MEM_ACCOUNT_PROFILER);
451 static void
452 free_buffer (void *buf, int size)
454 mono_vfree (buf, size, MONO_MEM_ACCOUNT_PROFILER);
457 static LogBuffer*
458 create_buffer (uintptr_t tid, int bytes)
460 LogBuffer* buf = (LogBuffer *) alloc_buffer (MAX (BUFFER_SIZE, bytes));
462 mono_atomic_inc_i32 (&buffer_allocations_ctr);
464 buf->size = BUFFER_SIZE;
465 buf->time_base = current_time ();
466 buf->last_time = buf->time_base;
467 buf->buf_end = (unsigned char *) buf + buf->size;
468 buf->cursor = buf->buf;
469 buf->thread_id = tid;
471 return buf;
475 * Must be called with the reader lock held if thread is the current thread, or
476 * the exclusive lock if thread is a different thread. However, if thread is
477 * the current thread, and init_thread () was called with add_to_lls = FALSE,
478 * then no locking is necessary.
480 static void
481 init_buffer_state (MonoProfilerThread *thread)
483 thread->buffer = create_buffer (thread->node.key, 0);
484 thread->methods = NULL;
487 static void
488 clear_hazard_pointers (MonoThreadHazardPointers *hp)
490 mono_hazard_pointer_clear (hp, 0);
491 mono_hazard_pointer_clear (hp, 1);
492 mono_hazard_pointer_clear (hp, 2);
495 static MonoProfilerThread *
496 init_thread (gboolean add_to_lls)
498 MonoProfilerThread *thread = PROF_TLS_GET ();
500 g_assert (thread != MONO_PROFILER_THREAD_DEAD && "Why are we trying to resurrect a stopped thread?");
503 * Sometimes we may try to initialize a thread twice. One example is the
504 * main thread: We initialize it when setting up the profiler, but we will
505 * also get a thread_start () callback for it. Another example is when
506 * attaching new threads to the runtime: We may get a gc_alloc () callback
507 * for that thread's thread object (where we initialize it), soon followed
508 * by a thread_start () callback.
510 * These cases are harmless anyhow. Just return if we've already done the
511 * initialization work.
513 if (thread != MONO_PROFILER_THREAD_ZERO)
514 return thread;
516 thread = g_malloc (sizeof (MonoProfilerThread));
517 thread->node.key = thread_id ();
518 thread->attached = add_to_lls;
519 thread->did_detach = FALSE;
520 thread->call_depth = 0;
521 thread->busy = FALSE;
522 thread->ended = FALSE;
524 init_buffer_state (thread);
526 thread->small_id = mono_thread_info_register_small_id ();
529 * Some internal profiler threads don't need to be cleaned up
530 * by the main thread on shutdown.
532 if (add_to_lls) {
533 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
534 g_assert (mono_lls_insert (&log_profiler.profiler_thread_list, hp, &thread->node) && "Why can't we insert the thread in the LLS?");
535 clear_hazard_pointers (hp);
538 g_assert (PROF_TLS_SET (thread));
540 return thread;
543 // Only valid if init_thread () was called with add_to_lls = FALSE.
544 static void
545 deinit_thread (MonoProfilerThread *thread)
547 g_assert (!thread->attached && "Why are we manually freeing an attached thread?");
549 g_free (thread);
550 PROF_TLS_SET (MONO_PROFILER_THREAD_DEAD);
553 static MonoProfilerThread *
554 get_thread (void)
556 return init_thread (TRUE);
559 // Only valid if init_thread () was called with add_to_lls = FALSE.
560 static LogBuffer *
561 ensure_logbuf_unsafe (MonoProfilerThread *thread, int bytes)
563 LogBuffer *old = thread->buffer;
565 if (old->cursor + bytes < old->buf_end)
566 return old;
568 LogBuffer *new_ = create_buffer (thread->node.key, bytes);
569 new_->next = old;
570 thread->buffer = new_;
572 return new_;
576 * This is a reader/writer spin lock of sorts used to protect log buffers.
577 * When a thread modifies its own log buffer, it increments the reader
578 * count. When a thread wants to access log buffers of other threads, it
579 * takes the exclusive lock.
581 * `buffer_lock_state` holds the reader count in its lower 16 bits, and
582 * the small ID of the thread currently holding the exclusive (writer)
583 * lock in its upper 16 bits. Both can be zero. It's important that the
584 * whole lock state is a single word that can be read/written atomically
585 * to avoid race conditions where there could end up being readers while
586 * the writer lock is held.
588 * The lock is writer-biased. When a thread wants to take the exclusive
589 * lock, it increments `buffer_lock_exclusive_intent` which will make new
590 * readers spin until it's back to zero, then takes the exclusive lock
591 * once the reader count has reached zero. After releasing the exclusive
592 * lock, it decrements `buffer_lock_exclusive_intent`, which, when it
593 * reaches zero again, allows readers to increment the reader count.
595 * The writer bias is necessary because we take the exclusive lock in
596 * `gc_event ()` during STW. If the writer bias was not there, and a
597 * program had a large number of threads, STW-induced pauses could be
598 * significantly longer than they have to be. Also, we emit periodic
599 * sync points from the helper thread, which requires taking the
600 * exclusive lock, and we need those to arrive with a reasonably
601 * consistent frequency so that readers don't have to queue up too many
602 * events between sync points.
604 * The lock does not support recursion.
607 static void
608 buffer_lock (void)
611 * If the thread holding the exclusive lock tries to modify the
612 * reader count, just make it a no-op. This way, we also avoid
613 * invoking the GC safe point macros below, which could break if
614 * done from a thread that is currently the initiator of STW.
616 * In other words, we rely on the fact that the GC thread takes
617 * the exclusive lock in the gc_event () callback when the world
618 * is about to stop.
620 if (mono_atomic_load_i32 (&log_profiler.buffer_lock_state) != get_thread ()->small_id << 16) {
621 MONO_ENTER_GC_SAFE;
623 gint32 old, new_;
625 do {
626 restart:
627 // Hold off if a thread wants to take the exclusive lock.
628 while (mono_atomic_load_i32 (&log_profiler.buffer_lock_exclusive_intent))
629 mono_thread_info_yield ();
631 old = mono_atomic_load_i32 (&log_profiler.buffer_lock_state);
633 // Is a thread holding the exclusive lock?
634 if (old >> 16) {
635 mono_thread_info_yield ();
636 goto restart;
639 new_ = old + 1;
640 } while (mono_atomic_cas_i32 (&log_profiler.buffer_lock_state, new_, old) != old);
642 MONO_EXIT_GC_SAFE;
645 mono_memory_barrier ();
648 static void
649 buffer_unlock (void)
651 mono_memory_barrier ();
653 gint32 state = mono_atomic_load_i32 (&log_profiler.buffer_lock_state);
655 // See the comment in buffer_lock ().
656 if (state == get_thread ()->small_id << 16)
657 return;
659 g_assert (state && "Why are we decrementing a zero reader count?");
660 g_assert (!(state >> 16) && "Why is the exclusive lock held?");
662 mono_atomic_dec_i32 (&log_profiler.buffer_lock_state);
665 static void
666 buffer_lock_excl (void)
668 gint32 new_ = get_thread ()->small_id << 16;
670 g_assert (mono_atomic_load_i32 (&log_profiler.buffer_lock_state) != new_ && "Why are we taking the exclusive lock twice?");
672 mono_atomic_inc_i32 (&log_profiler.buffer_lock_exclusive_intent);
674 MONO_ENTER_GC_SAFE;
676 while (mono_atomic_cas_i32 (&log_profiler.buffer_lock_state, new_, 0))
677 mono_thread_info_yield ();
679 MONO_EXIT_GC_SAFE;
681 mono_memory_barrier ();
684 static void
685 buffer_unlock_excl (void)
687 mono_memory_barrier ();
689 gint32 state = mono_atomic_load_i32 (&log_profiler.buffer_lock_state);
690 gint32 excl = state >> 16;
692 g_assert (excl && "Why is the exclusive lock not held?");
693 g_assert (excl == get_thread ()->small_id && "Why does another thread hold the exclusive lock?");
694 g_assert (!(state & 0xFFFF) && "Why are there readers when the exclusive lock is held?");
696 mono_atomic_store_i32 (&log_profiler.buffer_lock_state, 0);
697 mono_atomic_dec_i32 (&log_profiler.buffer_lock_exclusive_intent);
700 static void
701 encode_uleb128 (uint64_t value, uint8_t *buf, uint8_t **endbuf)
703 uint8_t *p = buf;
705 do {
706 uint8_t b = value & 0x7f;
707 value >>= 7;
709 if (value != 0) /* more bytes to come */
710 b |= 0x80;
712 *p ++ = b;
713 } while (value);
715 *endbuf = p;
718 static void
719 encode_sleb128 (intptr_t value, uint8_t *buf, uint8_t **endbuf)
721 int more = 1;
722 int negative = (value < 0);
723 unsigned int size = sizeof (intptr_t) * 8;
724 uint8_t byte;
725 uint8_t *p = buf;
727 while (more) {
728 byte = value & 0x7f;
729 value >>= 7;
731 /* the following is unnecessary if the
732 * implementation of >>= uses an arithmetic rather
733 * than logical shift for a signed left operand
735 if (negative)
736 /* sign extend */
737 value |= - ((intptr_t) 1 <<(size - 7));
739 /* sign bit of byte is second high order bit (0x40) */
740 if ((value == 0 && !(byte & 0x40)) ||
741 (value == -1 && (byte & 0x40)))
742 more = 0;
743 else
744 byte |= 0x80;
746 *p ++= byte;
749 *endbuf = p;
752 static void
753 emit_byte (LogBuffer *logbuffer, int value)
755 logbuffer->cursor [0] = value;
756 logbuffer->cursor++;
758 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
761 static void
762 emit_value (LogBuffer *logbuffer, int value)
764 encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
766 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
769 static void
770 emit_time (LogBuffer *logbuffer, uint64_t value)
772 uint64_t tdiff = value - logbuffer->last_time;
773 encode_uleb128 (tdiff, logbuffer->cursor, &logbuffer->cursor);
774 logbuffer->last_time = value;
776 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
779 static void
780 emit_event_time (LogBuffer *logbuffer, int event, uint64_t time)
782 emit_byte (logbuffer, event);
783 emit_time (logbuffer, time);
786 static void
787 emit_event (LogBuffer *logbuffer, int event)
789 emit_event_time (logbuffer, event, current_time ());
792 static void
793 emit_svalue (LogBuffer *logbuffer, int64_t value)
795 encode_sleb128 (value, logbuffer->cursor, &logbuffer->cursor);
797 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
800 static void
801 emit_uvalue (LogBuffer *logbuffer, uint64_t value)
803 encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
805 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
808 static void
809 emit_ptr (LogBuffer *logbuffer, const void *ptr)
811 if (!logbuffer->has_ptr_base) {
812 logbuffer->ptr_base = (uintptr_t) ptr;
813 logbuffer->has_ptr_base = TRUE;
816 emit_svalue (logbuffer, (intptr_t) ptr - logbuffer->ptr_base);
818 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
821 static void
822 emit_method_inner (LogBuffer *logbuffer, void *method)
824 if (!logbuffer->method_base) {
825 logbuffer->method_base = (intptr_t) method;
826 logbuffer->last_method = (intptr_t) method;
829 encode_sleb128 ((intptr_t) ((char *) method - (char *) logbuffer->last_method), logbuffer->cursor, &logbuffer->cursor);
830 logbuffer->last_method = (intptr_t) method;
832 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
835 static void
836 inc_method_ref_count (MonoMethod *method)
838 mono_image_addref (mono_class_get_image (mono_method_get_class (method)));
841 static void
842 dec_method_ref_count (MonoMethod *method)
844 mono_image_close (mono_class_get_image (mono_method_get_class (method)));
847 // The reader lock must be held.
848 static void
849 register_method_local (MonoMethod *method, MonoJitInfo *ji)
851 MonoProfilerThread *thread = get_thread ();
853 if (!mono_conc_hashtable_lookup (log_profiler.method_table, method)) {
854 MethodInfo *info = (MethodInfo *) g_malloc (sizeof (MethodInfo));
856 info->method = method;
857 info->ji = ji;
858 info->time = current_time ();
860 GPtrArray *arr = thread->methods ? thread->methods : (thread->methods = g_ptr_array_new ());
862 g_ptr_array_add (arr, info);
863 inc_method_ref_count (method);
867 static void
868 emit_method (LogBuffer *logbuffer, MonoMethod *method)
870 register_method_local (method, NULL);
871 emit_method_inner (logbuffer, method);
874 static void
875 emit_obj (LogBuffer *logbuffer, void *ptr)
877 if (!logbuffer->obj_base)
878 logbuffer->obj_base = (uintptr_t) ptr >> 3;
880 emit_svalue (logbuffer, ((uintptr_t) ptr >> 3) - logbuffer->obj_base);
882 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
885 static void
886 emit_string (LogBuffer *logbuffer, const char *str, size_t size)
888 size_t i = 0;
889 if (str) {
890 for (; i < size; i++) {
891 if (str[i] == '\0')
892 break;
893 emit_byte (logbuffer, str [i]);
896 emit_byte (logbuffer, '\0');
899 static void
900 emit_double (LogBuffer *logbuffer, double value)
902 int i;
903 unsigned char buffer[8];
904 memcpy (buffer, &value, 8);
905 #if G_BYTE_ORDER == G_BIG_ENDIAN
906 for (i = 7; i >= 0; i--)
907 #else
908 for (i = 0; i < 8; i++)
909 #endif
910 emit_byte (logbuffer, buffer[i]);
913 static char*
914 write_int16 (char *buf, int32_t value)
916 int i;
917 for (i = 0; i < 2; ++i) {
918 buf [i] = value;
919 value >>= 8;
921 return buf + 2;
924 static char*
925 write_int32 (char *buf, int32_t value)
927 int i;
928 for (i = 0; i < 4; ++i) {
929 buf [i] = value;
930 value >>= 8;
932 return buf + 4;
935 static char*
936 write_int64 (char *buf, int64_t value)
938 int i;
939 for (i = 0; i < 8; ++i) {
940 buf [i] = value;
941 value >>= 8;
943 return buf + 8;
946 static char *
947 write_header_string (char *p, const char *str)
949 size_t len = strlen (str) + 1;
951 p = write_int32 (p, len);
952 strcpy (p, str);
954 return p + len;
957 static void
958 dump_header (void)
960 const char *args = log_profiler.args;
961 const char *arch = mono_config_get_cpu ();
962 const char *os = mono_config_get_os ();
964 char *hbuf = g_malloc (
965 sizeof (gint32) /* header id */ +
966 sizeof (gint8) /* major version */ +
967 sizeof (gint8) /* minor version */ +
968 sizeof (gint8) /* data version */ +
969 sizeof (gint8) /* word size */ +
970 sizeof (gint64) /* startup time */ +
971 sizeof (gint64) /* startup time (nanoseconds) */ +
972 sizeof (gint32) /* timer overhead */ +
973 sizeof (gint32) /* flags */ +
974 sizeof (gint32) /* process id */ +
975 sizeof (gint16) /* command port */ +
976 sizeof (gint32) + strlen (args) + 1 /* arguments */ +
977 sizeof (gint32) + strlen (arch) + 1 /* architecture */ +
978 sizeof (gint32) + strlen (os) + 1 /* operating system */
980 char *p = hbuf;
982 p = write_int32 (p, LOG_HEADER_ID);
983 *p++ = LOG_VERSION_MAJOR;
984 *p++ = LOG_VERSION_MINOR;
985 *p++ = LOG_DATA_VERSION;
986 *p++ = sizeof (void *);
987 p = write_int64 (p, ((uint64_t) time (NULL)) * 1000);
988 p = write_int64 (p, current_time ());
989 p = write_int32 (p, log_profiler.timer_overhead);
990 p = write_int32 (p, 0); /* flags */
991 p = write_int32 (p, process_id ());
992 p = write_int16 (p, log_profiler.command_port);
993 p = write_header_string (p, args);
994 p = write_header_string (p, arch);
995 p = write_header_string (p, os);
997 #if defined (HAVE_SYS_ZLIB)
998 if (log_profiler.gzfile) {
999 gzwrite (log_profiler.gzfile, hbuf, p - hbuf);
1000 } else
1001 #endif
1003 fwrite (hbuf, p - hbuf, 1, log_profiler.file);
1004 fflush (log_profiler.file);
1007 g_free (hbuf);
1011 * Must be called with the reader lock held if thread is the current thread, or
1012 * the exclusive lock if thread is a different thread. However, if thread is
1013 * the current thread, and init_thread () was called with add_to_lls = FALSE,
1014 * then no locking is necessary.
1016 static void
1017 send_buffer (MonoProfilerThread *thread)
1019 WriterQueueEntry *entry = mono_lock_free_alloc (&log_profiler.writer_entry_allocator);
1020 entry->methods = thread->methods;
1021 entry->buffer = thread->buffer;
1023 mono_lock_free_queue_node_init (&entry->node, FALSE);
1025 mono_lock_free_queue_enqueue (&log_profiler.writer_queue, &entry->node);
1026 mono_os_sem_post (&log_profiler.writer_queue_sem);
1029 static void
1030 free_thread (gpointer p)
1032 MonoProfilerThread *thread = p;
1034 if (!thread->ended) {
1036 * The thread is being cleaned up by the main thread during
1037 * shutdown. This typically happens for internal runtime
1038 * threads. We need to synthesize a thread end event.
1041 mono_atomic_inc_i32 (&thread_ends_ctr);
1043 LogBuffer *buf = ensure_logbuf_unsafe (thread,
1044 EVENT_SIZE /* event */ +
1045 BYTE_SIZE /* type */ +
1046 LEB128_SIZE /* tid */
1049 emit_event (buf, TYPE_END_UNLOAD | TYPE_METADATA);
1050 emit_byte (buf, TYPE_THREAD);
1051 emit_ptr (buf, (void *) thread->node.key);
1054 send_buffer (thread);
1056 g_free (thread);
1059 static void
1060 remove_thread (MonoProfilerThread *thread)
1062 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
1064 if (mono_lls_remove (&log_profiler.profiler_thread_list, hp, &thread->node))
1065 mono_thread_hazardous_try_free (thread, free_thread);
1067 clear_hazard_pointers (hp);
1070 static void
1071 dump_buffer (LogBuffer *buf)
1073 char hbuf [128];
1074 char *p = hbuf;
1076 if (buf->next)
1077 dump_buffer (buf->next);
1079 if (buf->cursor - buf->buf) {
1080 p = write_int32 (p, BUF_ID);
1081 p = write_int32 (p, buf->cursor - buf->buf);
1082 p = write_int64 (p, buf->time_base);
1083 p = write_int64 (p, buf->ptr_base);
1084 p = write_int64 (p, buf->obj_base);
1085 p = write_int64 (p, buf->thread_id);
1086 p = write_int64 (p, buf->method_base);
1088 #if defined (HAVE_SYS_ZLIB)
1089 if (log_profiler.gzfile) {
1090 gzwrite (log_profiler.gzfile, hbuf, p - hbuf);
1091 gzwrite (log_profiler.gzfile, buf->buf, buf->cursor - buf->buf);
1092 } else
1093 #endif
1095 fwrite (hbuf, p - hbuf, 1, log_profiler.file);
1096 fwrite (buf->buf, buf->cursor - buf->buf, 1, log_profiler.file);
1097 fflush (log_profiler.file);
1101 free_buffer (buf, buf->size);
1104 // Only valid if init_thread () was called with add_to_lls = FALSE.
1105 static void
1106 send_log_unsafe (gboolean if_needed)
1108 MonoProfilerThread *thread = get_thread ();
1110 if (!if_needed || (if_needed && thread->buffer->next)) {
1111 send_buffer (thread);
1112 init_buffer_state (thread);
1116 static void
1117 dump_aot_id (void)
1119 const char *aotid = mono_runtime_get_aotid ();
1121 if (!aotid)
1122 return;
1124 int alen = strlen (aotid) + 1;
1126 ENTER_LOG (&aot_ids_ctr, logbuffer,
1127 EVENT_SIZE /* event */ +
1128 alen /* aot id */
1131 emit_event (logbuffer, TYPE_META | TYPE_AOT_ID);
1132 memcpy (logbuffer->cursor, aotid, alen);
1133 logbuffer->cursor += alen;
1135 EXIT_LOG;
1138 // Assumes that the exclusive lock is held.
1139 static void
1140 sync_point_flush (void)
1142 g_assert (mono_atomic_load_i32 (&log_profiler.buffer_lock_state) == get_thread ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1144 MONO_LLS_FOREACH_SAFE (&log_profiler.profiler_thread_list, MonoProfilerThread, thread) {
1145 g_assert (thread->attached && "Why is a thread in the LLS not attached?");
1147 send_buffer (thread);
1148 init_buffer_state (thread);
1149 } MONO_LLS_FOREACH_SAFE_END
1152 // Assumes that the exclusive lock is held.
1153 static void
1154 sync_point_mark (MonoProfilerSyncPointType type)
1156 g_assert (mono_atomic_load_i32 (&log_profiler.buffer_lock_state) == get_thread ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1158 ENTER_LOG (&sync_points_ctr, logbuffer,
1159 EVENT_SIZE /* event */ +
1160 BYTE_SIZE /* type */
1163 emit_event (logbuffer, TYPE_META | TYPE_SYNC_POINT);
1164 emit_byte (logbuffer, type);
1166 EXIT_LOG_EXPLICIT (NO_SEND);
1168 send_log_unsafe (FALSE);
1171 // Assumes that the exclusive lock is held.
1172 static void
1173 sync_point (MonoProfilerSyncPointType type)
1175 sync_point_flush ();
1176 sync_point_mark (type);
1179 static int
1180 gc_reference (MonoObject *obj, MonoClass *klass, uintptr_t size, uintptr_t num, MonoObject **refs, uintptr_t *offsets, void *data)
1182 /* account for object alignment in the heap */
1183 size += 7;
1184 size &= ~7;
1186 ENTER_LOG (&heap_objects_ctr, logbuffer,
1187 EVENT_SIZE /* event */ +
1188 LEB128_SIZE /* obj */ +
1189 LEB128_SIZE /* vtable */ +
1190 LEB128_SIZE /* size */ +
1191 BYTE_SIZE /* generation */ +
1192 LEB128_SIZE /* num */ +
1193 num * (
1194 LEB128_SIZE /* offset */ +
1195 LEB128_SIZE /* ref */
1199 emit_event (logbuffer, TYPE_HEAP_OBJECT | TYPE_HEAP);
1200 emit_obj (logbuffer, obj);
1201 emit_ptr (logbuffer, mono_object_get_vtable_internal (obj));
1202 emit_value (logbuffer, size);
1203 emit_byte (logbuffer, mono_gc_get_generation (obj));
1204 emit_value (logbuffer, num);
1206 uintptr_t last_offset = 0;
1208 for (int i = 0; i < num; ++i) {
1209 emit_value (logbuffer, offsets [i] - last_offset);
1210 last_offset = offsets [i];
1211 emit_obj (logbuffer, refs [i]);
1214 EXIT_LOG;
1216 return 0;
1219 static void
1220 gc_roots (MonoProfiler *prof, uint64_t num, const mono_byte *const *addresses, MonoObject *const *objects)
1222 ENTER_LOG (&heap_roots_ctr, logbuffer,
1223 EVENT_SIZE /* event */ +
1224 LEB128_SIZE /* num */ +
1225 num * (
1226 LEB128_SIZE /* address */ +
1227 LEB128_SIZE /* object */
1231 emit_event (logbuffer, TYPE_HEAP_ROOT | TYPE_HEAP);
1232 emit_value (logbuffer, num);
1234 for (int i = 0; i < num; ++i) {
1235 emit_ptr (logbuffer, addresses [i]);
1236 emit_obj (logbuffer, objects [i]);
1239 EXIT_LOG;
1242 static void
1243 gc_root_register (MonoProfiler *prof, const mono_byte *start, size_t size, MonoGCRootSource source, const void *key, const char *name)
1245 // We don't write raw domain/context pointers in metadata events.
1246 switch (source) {
1247 case MONO_ROOT_SOURCE_DOMAIN:
1248 if (key)
1249 key = (void *)(uintptr_t) mono_domain_get_id ((MonoDomain *) key);
1250 break;
1251 case MONO_ROOT_SOURCE_CONTEXT_STATIC:
1252 key = (void *)(uintptr_t) mono_context_get_id ((MonoAppContext *) key);
1253 break;
1254 default:
1255 break;
1258 int name_len = name ? strlen (name) + 1 : 0;
1260 ENTER_LOG (&heap_root_registers_ctr, logbuffer,
1261 EVENT_SIZE /* event */ +
1262 LEB128_SIZE /* start */ +
1263 LEB128_SIZE /* size */ +
1264 BYTE_SIZE /* source */ +
1265 LEB128_SIZE /* key */ +
1266 name_len /* name */
1269 emit_event (logbuffer, TYPE_HEAP_ROOT_REGISTER | TYPE_HEAP);
1270 emit_ptr (logbuffer, start);
1271 emit_uvalue (logbuffer, size);
1272 emit_byte (logbuffer, source);
1273 emit_ptr (logbuffer, key);
1274 emit_string (logbuffer, name, name_len);
1276 EXIT_LOG;
1279 static void
1280 gc_root_deregister (MonoProfiler *prof, const mono_byte *start)
1282 ENTER_LOG (&heap_root_unregisters_ctr, logbuffer,
1283 EVENT_SIZE /* event */ +
1284 LEB128_SIZE /* start */
1287 emit_event (logbuffer, TYPE_HEAP_ROOT_UNREGISTER | TYPE_HEAP);
1288 emit_ptr (logbuffer, start);
1290 EXIT_LOG;
1293 static void
1294 trigger_heapshot (void)
1296 // Rely on the finalization callback triggering a GC.
1297 mono_atomic_store_i32 (&log_profiler.heapshot_requested, 1);
1298 mono_gc_finalize_notify ();
1301 static void
1302 process_heapshot (void)
1304 if (mono_atomic_load_i32 (&log_profiler.heapshot_requested))
1305 mono_gc_collect (mono_gc_max_generation ());
1308 #define ALL_GC_EVENTS_MASK (PROFLOG_GC_EVENTS | PROFLOG_GC_MOVE_EVENTS | PROFLOG_GC_ROOT_EVENTS)
1310 static void
1311 gc_event (MonoProfiler *profiler, MonoProfilerGCEvent ev, uint32_t generation, gboolean is_serial)
1313 gboolean is_major = generation == mono_gc_max_generation ();
1315 if (ENABLED (PROFLOG_GC_EVENTS)) {
1316 ENTER_LOG (&gc_events_ctr, logbuffer,
1317 EVENT_SIZE /* event */ +
1318 BYTE_SIZE /* gc event */ +
1319 BYTE_SIZE /* generation */
1322 emit_event (logbuffer, TYPE_GC_EVENT | TYPE_GC);
1323 emit_byte (logbuffer, ev);
1324 emit_byte (logbuffer, generation);
1326 EXIT_LOG;
1329 switch (ev) {
1330 case MONO_GC_EVENT_PRE_STOP_WORLD_LOCKED:
1331 switch (log_config.hs_mode) {
1332 case MONO_PROFILER_HEAPSHOT_NONE:
1333 log_profiler.do_heap_walk = FALSE;
1334 break;
1335 case MONO_PROFILER_HEAPSHOT_MAJOR:
1336 log_profiler.do_heap_walk = is_major;
1337 break;
1338 case MONO_PROFILER_HEAPSHOT_ON_DEMAND:
1339 // Handled below.
1340 break;
1341 case MONO_PROFILER_HEAPSHOT_X_GC:
1342 log_profiler.do_heap_walk = !(log_profiler.gc_count % log_config.hs_freq_gc);
1343 break;
1344 case MONO_PROFILER_HEAPSHOT_X_MS:
1345 log_profiler.do_heap_walk = (current_time () - log_profiler.last_hs_time) / TICKS_PER_MSEC >= log_config.hs_freq_ms;
1346 break;
1347 default:
1348 g_assert_not_reached ();
1352 * heapshot_requested is set either because a heapshot was triggered
1353 * manually (through the API or command server) or because we're doing
1354 * a shutdown heapshot. Either way, a manually triggered heapshot
1355 * overrides any decision we made in the switch above.
1357 if (is_major && is_serial && mono_atomic_load_i32 (&log_profiler.heapshot_requested)) {
1358 log_profiler.do_heap_walk = TRUE;
1359 } else if (log_profiler.do_heap_walk && (!is_major || !is_serial)) {
1360 /* Do a heap walk later, when we get invoked from the finalizer in serial mode */
1361 trigger_heapshot ();
1362 log_profiler.do_heap_walk = FALSE;
1365 if (ENABLED (PROFLOG_GC_ROOT_EVENTS) &&
1366 (log_config.always_do_root_report || log_profiler.do_heap_walk))
1367 mono_profiler_set_gc_roots_callback (log_profiler.handle, gc_roots);
1370 * Ensure that no thread can be in the middle of writing to
1371 * a buffer when the world stops...
1373 buffer_lock_excl ();
1375 break;
1376 case MONO_GC_EVENT_POST_STOP_WORLD:
1378 * ... So that we now have a consistent view of all buffers.
1379 * This allows us to flush them. We need to do this because
1380 * they may contain object allocation events that need to be
1381 * committed to the log file before any object move events
1382 * that will be produced during this GC.
1384 if (ENABLED (ALL_GC_EVENTS_MASK))
1385 sync_point (SYNC_POINT_WORLD_STOP);
1387 // Surround heapshots with HEAP_START/HEAP_END events.
1388 if (log_profiler.do_heap_walk) {
1389 ENTER_LOG (&heap_starts_ctr, logbuffer,
1390 EVENT_SIZE /* event */
1393 emit_event (logbuffer, TYPE_HEAP_START | TYPE_HEAP);
1395 EXIT_LOG;
1398 break;
1399 case MONO_GC_EVENT_START:
1400 if (is_major)
1401 log_profiler.gc_count++;
1403 break;
1404 case MONO_GC_EVENT_PRE_START_WORLD:
1405 mono_profiler_set_gc_roots_callback (log_profiler.handle, NULL);
1407 if (log_profiler.do_heap_walk) {
1408 g_assert (is_major && is_serial);
1409 mono_gc_walk_heap (0, gc_reference, NULL);
1411 ENTER_LOG (&heap_ends_ctr, logbuffer,
1412 EVENT_SIZE /* event */
1415 emit_event (logbuffer, TYPE_HEAP_END | TYPE_HEAP);
1417 EXIT_LOG;
1419 log_profiler.do_heap_walk = FALSE;
1420 log_profiler.last_hs_time = current_time ();
1422 mono_atomic_store_i32 (&log_profiler.heapshot_requested, 0);
1426 * Similarly, we must now make sure that any object moves
1427 * written to the GC thread's buffer are flushed. Otherwise,
1428 * object allocation events for certain addresses could come
1429 * after the move events that made those addresses available.
1431 if (ENABLED (ALL_GC_EVENTS_MASK))
1432 sync_point_mark (SYNC_POINT_WORLD_START);
1434 break;
1435 case MONO_GC_EVENT_POST_START_WORLD_UNLOCKED:
1437 * Finally, it is safe to allow other threads to write to
1438 * their buffers again.
1440 buffer_unlock_excl ();
1442 break;
1443 default:
1444 break;
1448 static void
1449 gc_resize (MonoProfiler *profiler, uintptr_t new_size)
1451 ENTER_LOG (&gc_resizes_ctr, logbuffer,
1452 EVENT_SIZE /* event */ +
1453 LEB128_SIZE /* new size */
1456 emit_event (logbuffer, TYPE_GC_RESIZE | TYPE_GC);
1457 emit_uvalue (logbuffer, new_size);
1459 EXIT_LOG;
1462 typedef struct {
1463 int count;
1464 MonoMethod* methods [MAX_FRAMES];
1465 int32_t il_offsets [MAX_FRAMES];
1466 int32_t native_offsets [MAX_FRAMES];
1467 } FrameData;
1469 static mono_bool
1470 walk_stack (MonoMethod *method, int32_t native_offset, int32_t il_offset, mono_bool managed, void* data)
1472 FrameData *frame = (FrameData *)data;
1473 if (method && frame->count < log_config.num_frames) {
1474 frame->il_offsets [frame->count] = il_offset;
1475 frame->native_offsets [frame->count] = native_offset;
1476 frame->methods [frame->count++] = method;
1478 return frame->count == log_config.num_frames;
1482 * a note about stack walks: they can cause more profiler events to fire,
1483 * so we need to make sure they don't happen after we started emitting an
1484 * event, hence the collect_bt/emit_bt split.
1486 static void
1487 collect_bt (FrameData *data)
1489 data->count = 0;
1490 mono_stack_walk_no_il (walk_stack, data);
1493 static void
1494 emit_bt (LogBuffer *logbuffer, FrameData *data)
1496 emit_value (logbuffer, data->count);
1498 while (data->count)
1499 emit_method (logbuffer, data->methods [--data->count]);
1502 static void
1503 gc_alloc (MonoProfiler *prof, MonoObject *obj)
1505 int do_bt = (!log_config.enter_leave && mono_atomic_load_i32 (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_ALLOC_BT : 0;
1506 FrameData data;
1507 uintptr_t len = mono_object_get_size_internal (obj);
1508 /* account for object alignment in the heap */
1509 len += 7;
1510 len &= ~7;
1512 if (do_bt)
1513 collect_bt (&data);
1515 ENTER_LOG (&gc_allocs_ctr, logbuffer,
1516 EVENT_SIZE /* event */ +
1517 LEB128_SIZE /* vtable */ +
1518 LEB128_SIZE /* obj */ +
1519 LEB128_SIZE /* size */ +
1520 (do_bt ? (
1521 LEB128_SIZE /* count */ +
1522 data.count * (
1523 LEB128_SIZE /* method */
1525 ) : 0)
1528 emit_event (logbuffer, do_bt | TYPE_ALLOC);
1529 emit_ptr (logbuffer, mono_object_get_vtable_internal (obj));
1530 emit_obj (logbuffer, obj);
1531 emit_value (logbuffer, len);
1533 if (do_bt)
1534 emit_bt (logbuffer, &data);
1536 EXIT_LOG;
1539 static void
1540 gc_moves (MonoProfiler *prof, MonoObject *const *objects, uint64_t num)
1542 ENTER_LOG (&gc_moves_ctr, logbuffer,
1543 EVENT_SIZE /* event */ +
1544 LEB128_SIZE /* num */ +
1545 num * (
1546 LEB128_SIZE /* object */
1550 emit_event (logbuffer, TYPE_GC_MOVE | TYPE_GC);
1551 emit_value (logbuffer, num);
1553 for (int i = 0; i < num; ++i)
1554 emit_obj (logbuffer, objects [i]);
1556 EXIT_LOG;
1559 static void
1560 gc_handle (MonoProfiler *prof, int op, MonoGCHandleType type, uint32_t handle, MonoObject *obj)
1562 int do_bt = !log_config.enter_leave && mono_atomic_load_i32 (&log_profiler.runtime_inited) && log_config.num_frames;
1563 FrameData data;
1565 if (do_bt)
1566 collect_bt (&data);
1568 gint32 *ctr = op == MONO_PROFILER_GC_HANDLE_CREATED ? &gc_handle_creations_ctr : &gc_handle_deletions_ctr;
1570 ENTER_LOG (ctr, logbuffer,
1571 EVENT_SIZE /* event */ +
1572 LEB128_SIZE /* type */ +
1573 LEB128_SIZE /* handle */ +
1574 (op == MONO_PROFILER_GC_HANDLE_CREATED ? (
1575 LEB128_SIZE /* obj */
1576 ) : 0) +
1577 (do_bt ? (
1578 LEB128_SIZE /* count */ +
1579 data.count * (
1580 LEB128_SIZE /* method */
1582 ) : 0)
1585 if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1586 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_CREATED_BT : TYPE_GC_HANDLE_CREATED) | TYPE_GC);
1587 else if (op == MONO_PROFILER_GC_HANDLE_DESTROYED)
1588 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_DESTROYED_BT : TYPE_GC_HANDLE_DESTROYED) | TYPE_GC);
1589 else
1590 g_assert_not_reached ();
1592 emit_value (logbuffer, type);
1593 emit_value (logbuffer, handle);
1595 if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1596 emit_obj (logbuffer, obj);
1598 if (do_bt)
1599 emit_bt (logbuffer, &data);
1601 EXIT_LOG;
1604 static void
1605 gc_handle_created (MonoProfiler *prof, uint32_t handle, MonoGCHandleType type, MonoObject *obj)
1607 gc_handle (prof, MONO_PROFILER_GC_HANDLE_CREATED, type, handle, obj);
1610 static void
1611 gc_handle_deleted (MonoProfiler *prof, uint32_t handle, MonoGCHandleType type)
1613 gc_handle (prof, MONO_PROFILER_GC_HANDLE_DESTROYED, type, handle, NULL);
1616 static void
1617 finalize_begin (MonoProfiler *prof)
1619 ENTER_LOG (&finalize_begins_ctr, buf,
1620 EVENT_SIZE /* event */
1623 emit_event (buf, TYPE_GC_FINALIZE_START | TYPE_GC);
1625 EXIT_LOG;
1628 static void
1629 finalize_end (MonoProfiler *prof)
1631 process_heapshot ();
1633 if (ENABLED (PROFLOG_GC_FINALIZATION_EVENTS)) {
1634 ENTER_LOG (&finalize_ends_ctr, buf,
1635 EVENT_SIZE /* event */
1638 emit_event (buf, TYPE_GC_FINALIZE_END | TYPE_GC);
1640 EXIT_LOG;
1644 static void
1645 finalize_object_begin (MonoProfiler *prof, MonoObject *obj)
1647 ENTER_LOG (&finalize_object_begins_ctr, buf,
1648 EVENT_SIZE /* event */ +
1649 LEB128_SIZE /* obj */
1652 emit_event (buf, TYPE_GC_FINALIZE_OBJECT_START | TYPE_GC);
1653 emit_obj (buf, obj);
1655 EXIT_LOG;
1658 static void
1659 finalize_object_end (MonoProfiler *prof, MonoObject *obj)
1661 ENTER_LOG (&finalize_object_ends_ctr, buf,
1662 EVENT_SIZE /* event */ +
1663 LEB128_SIZE /* obj */
1666 emit_event (buf, TYPE_GC_FINALIZE_OBJECT_END | TYPE_GC);
1667 emit_obj (buf, obj);
1669 EXIT_LOG;
1672 static char*
1673 push_nesting (char *p, MonoClass *klass)
1675 MonoClass *nesting;
1676 const char *name;
1677 const char *nspace;
1678 nesting = mono_class_get_nesting_type (klass);
1679 if (nesting) {
1680 p = push_nesting (p, nesting);
1681 *p++ = '/';
1682 *p = 0;
1684 name = m_class_get_name (klass);
1685 nspace = m_class_get_name_space (klass);
1686 if (*nspace) {
1687 strcpy (p, nspace);
1688 p += strlen (nspace);
1689 *p++ = '.';
1690 *p = 0;
1692 strcpy (p, name);
1693 p += strlen (name);
1694 return p;
1697 static char*
1698 type_name (MonoClass *klass)
1700 char buf [1024];
1701 char *p;
1702 push_nesting (buf, klass);
1703 p = (char *) g_malloc (strlen (buf) + 1);
1704 strcpy (p, buf);
1705 return p;
1708 static void
1709 image_loaded (MonoProfiler *prof, MonoImage *image)
1711 const char *name = mono_image_get_filename (image);
1712 int nlen = strlen (name) + 1;
1713 const char *guid = mono_image_get_guid (image);
1715 // Dynamic images don't have a GUID set.
1716 if (!guid)
1717 guid = "";
1719 int glen = strlen (guid) + 1;
1721 ENTER_LOG (&image_loads_ctr, logbuffer,
1722 EVENT_SIZE /* event */ +
1723 BYTE_SIZE /* type */ +
1724 LEB128_SIZE /* image */ +
1725 nlen /* name */ +
1726 glen /* guid */
1729 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1730 emit_byte (logbuffer, TYPE_IMAGE);
1731 emit_ptr (logbuffer, image);
1732 memcpy (logbuffer->cursor, name, nlen);
1733 logbuffer->cursor += nlen;
1734 memcpy (logbuffer->cursor, guid, glen);
1735 logbuffer->cursor += glen;
1737 EXIT_LOG;
1740 static void
1741 image_unloaded (MonoProfiler *prof, MonoImage *image)
1743 const char *name = mono_image_get_filename (image);
1744 int nlen = strlen (name) + 1;
1746 ENTER_LOG (&image_unloads_ctr, logbuffer,
1747 EVENT_SIZE /* event */ +
1748 BYTE_SIZE /* type */ +
1749 LEB128_SIZE /* image */ +
1750 nlen /* name */
1753 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1754 emit_byte (logbuffer, TYPE_IMAGE);
1755 emit_ptr (logbuffer, image);
1756 memcpy (logbuffer->cursor, name, nlen);
1757 logbuffer->cursor += nlen;
1759 EXIT_LOG;
1762 static void
1763 assembly_loaded (MonoProfiler *prof, MonoAssembly *assembly)
1765 char *name = mono_stringify_assembly_name (mono_assembly_get_name_internal (assembly));
1766 int nlen = strlen (name) + 1;
1767 MonoImage *image = mono_assembly_get_image_internal (assembly);
1769 ENTER_LOG (&assembly_loads_ctr, logbuffer,
1770 EVENT_SIZE /* event */ +
1771 BYTE_SIZE /* type */ +
1772 LEB128_SIZE /* assembly */ +
1773 LEB128_SIZE /* image */ +
1774 nlen /* name */
1777 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1778 emit_byte (logbuffer, TYPE_ASSEMBLY);
1779 emit_ptr (logbuffer, assembly);
1780 emit_ptr (logbuffer, image);
1781 memcpy (logbuffer->cursor, name, nlen);
1782 logbuffer->cursor += nlen;
1784 EXIT_LOG;
1786 mono_free (name);
1789 static void
1790 assembly_unloaded (MonoProfiler *prof, MonoAssembly *assembly)
1792 char *name = mono_stringify_assembly_name (mono_assembly_get_name_internal (assembly));
1793 int nlen = strlen (name) + 1;
1794 MonoImage *image = mono_assembly_get_image_internal (assembly);
1796 ENTER_LOG (&assembly_unloads_ctr, logbuffer,
1797 EVENT_SIZE /* event */ +
1798 BYTE_SIZE /* type */ +
1799 LEB128_SIZE /* assembly */ +
1800 LEB128_SIZE /* image */ +
1801 nlen /* name */
1804 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1805 emit_byte (logbuffer, TYPE_ASSEMBLY);
1806 emit_ptr (logbuffer, assembly);
1807 emit_ptr (logbuffer, image);
1808 memcpy (logbuffer->cursor, name, nlen);
1809 logbuffer->cursor += nlen;
1811 EXIT_LOG;
1813 mono_free (name);
1816 static void
1817 class_loaded (MonoProfiler *prof, MonoClass *klass)
1819 char *name;
1821 if (mono_atomic_load_i32 (&log_profiler.runtime_inited))
1822 name = mono_type_get_name (m_class_get_byval_arg (klass));
1823 else
1824 name = type_name (klass);
1826 int nlen = strlen (name) + 1;
1827 MonoImage *image = mono_class_get_image (klass);
1829 ENTER_LOG (&class_loads_ctr, logbuffer,
1830 EVENT_SIZE /* event */ +
1831 BYTE_SIZE /* type */ +
1832 LEB128_SIZE /* klass */ +
1833 LEB128_SIZE /* image */ +
1834 nlen /* name */
1837 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1838 emit_byte (logbuffer, TYPE_CLASS);
1839 emit_ptr (logbuffer, klass);
1840 emit_ptr (logbuffer, image);
1841 memcpy (logbuffer->cursor, name, nlen);
1842 logbuffer->cursor += nlen;
1844 EXIT_LOG;
1846 if (mono_atomic_load_i32 (&log_profiler.runtime_inited))
1847 mono_free (name);
1848 else
1849 g_free (name);
1852 static void
1853 vtable_loaded (MonoProfiler *prof, MonoVTable *vtable)
1855 MonoClass *klass = mono_vtable_class_internal (vtable);
1856 MonoDomain *domain = mono_vtable_domain_internal (vtable);
1857 uint32_t domain_id = domain ? mono_domain_get_id (domain) : 0;
1859 ENTER_LOG (&vtable_loads_ctr, logbuffer,
1860 EVENT_SIZE /* event */ +
1861 BYTE_SIZE /* type */ +
1862 LEB128_SIZE /* vtable */ +
1863 LEB128_SIZE /* domain id */ +
1864 LEB128_SIZE /* klass */
1867 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1868 emit_byte (logbuffer, TYPE_VTABLE);
1869 emit_ptr (logbuffer, vtable);
1870 emit_ptr (logbuffer, (void *)(uintptr_t) domain_id);
1871 emit_ptr (logbuffer, klass);
1873 EXIT_LOG;
1876 static void
1877 method_enter (MonoProfiler *prof, MonoMethod *method, MonoProfilerCallContext *ctx)
1879 if (get_thread ()->call_depth++ <= log_config.max_call_depth) {
1880 ENTER_LOG (&method_entries_ctr, logbuffer,
1881 EVENT_SIZE /* event */ +
1882 LEB128_SIZE /* method */
1885 emit_event (logbuffer, TYPE_ENTER | TYPE_METHOD);
1886 emit_method (logbuffer, method);
1888 EXIT_LOG;
1892 static void
1893 method_leave (MonoProfiler *prof, MonoMethod *method, MonoProfilerCallContext *ctx)
1895 if (--get_thread ()->call_depth <= log_config.max_call_depth) {
1896 ENTER_LOG (&method_exits_ctr, logbuffer,
1897 EVENT_SIZE /* event */ +
1898 LEB128_SIZE /* method */
1901 emit_event (logbuffer, TYPE_LEAVE | TYPE_METHOD);
1902 emit_method (logbuffer, method);
1904 EXIT_LOG;
1908 static void
1909 tailcall (MonoProfiler *prof, MonoMethod *method, MonoMethod *target)
1911 method_leave (prof, method, NULL);
1914 static void
1915 method_exc_leave (MonoProfiler *prof, MonoMethod *method, MonoObject *exc)
1917 if (--get_thread ()->call_depth <= log_config.max_call_depth) {
1918 ENTER_LOG (&method_exception_exits_ctr, logbuffer,
1919 EVENT_SIZE /* event */ +
1920 LEB128_SIZE /* method */
1923 emit_event (logbuffer, TYPE_EXC_LEAVE | TYPE_METHOD);
1924 emit_method (logbuffer, method);
1926 EXIT_LOG;
1930 static MonoProfilerCallInstrumentationFlags
1931 method_filter (MonoProfiler *prof, MonoMethod *method)
1933 if (log_config.callspec.len > 0 &&
1934 !mono_callspec_eval (method, &log_config.callspec))
1935 return MONO_PROFILER_CALL_INSTRUMENTATION_NONE;
1937 return MONO_PROFILER_CALL_INSTRUMENTATION_ENTER |
1938 MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE |
1939 MONO_PROFILER_CALL_INSTRUMENTATION_TAIL_CALL |
1940 MONO_PROFILER_CALL_INSTRUMENTATION_EXCEPTION_LEAVE;
1943 static void
1944 method_jitted (MonoProfiler *prof, MonoMethod *method, MonoJitInfo *ji)
1946 buffer_lock ();
1948 register_method_local (method, ji);
1950 buffer_unlock ();
1953 static void
1954 code_buffer_new (MonoProfiler *prof, const mono_byte *buffer, uint64_t size, MonoProfilerCodeBufferType type, const void *data)
1956 const char *name;
1957 int nlen;
1959 if (type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE) {
1960 name = (const char *) data;
1961 nlen = strlen (name) + 1;
1962 } else {
1963 name = NULL;
1964 nlen = 0;
1967 ENTER_LOG (&code_buffers_ctr, logbuffer,
1968 EVENT_SIZE /* event */ +
1969 BYTE_SIZE /* type */ +
1970 LEB128_SIZE /* buffer */ +
1971 LEB128_SIZE /* size */ +
1972 (name ? (
1973 nlen /* name */
1974 ) : 0)
1977 emit_event (logbuffer, TYPE_JITHELPER | TYPE_RUNTIME);
1978 emit_byte (logbuffer, type);
1979 emit_ptr (logbuffer, buffer);
1980 emit_value (logbuffer, size);
1982 if (name) {
1983 memcpy (logbuffer->cursor, name, nlen);
1984 logbuffer->cursor += nlen;
1987 EXIT_LOG;
1990 static void
1991 throw_exc (MonoProfiler *prof, MonoObject *object)
1993 int do_bt = (!log_config.enter_leave && mono_atomic_load_i32 (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_THROW_BT : 0;
1994 FrameData data;
1996 if (do_bt)
1997 collect_bt (&data);
1999 ENTER_LOG (&exception_throws_ctr, logbuffer,
2000 EVENT_SIZE /* event */ +
2001 LEB128_SIZE /* object */ +
2002 (do_bt ? (
2003 LEB128_SIZE /* count */ +
2004 data.count * (
2005 LEB128_SIZE /* method */
2007 ) : 0)
2010 emit_event (logbuffer, do_bt | TYPE_EXCEPTION);
2011 emit_obj (logbuffer, object);
2013 if (do_bt)
2014 emit_bt (logbuffer, &data);
2016 EXIT_LOG;
2019 static void
2020 clause_exc (MonoProfiler *prof, MonoMethod *method, uint32_t clause_num, MonoExceptionEnum clause_type, MonoObject *exc)
2022 ENTER_LOG (&exception_clauses_ctr, logbuffer,
2023 EVENT_SIZE /* event */ +
2024 BYTE_SIZE /* clause type */ +
2025 LEB128_SIZE /* clause num */ +
2026 LEB128_SIZE /* method */ +
2027 LEB128_SIZE /* exc */
2030 emit_event (logbuffer, TYPE_EXCEPTION | TYPE_CLAUSE);
2031 emit_byte (logbuffer, clause_type);
2032 emit_value (logbuffer, clause_num);
2033 emit_method (logbuffer, method);
2034 emit_obj (logbuffer, exc);
2036 EXIT_LOG;
2039 static void
2040 monitor_event (MonoProfiler *profiler, MonoObject *object, MonoProfilerMonitorEvent ev)
2042 int do_bt = (!log_config.enter_leave && mono_atomic_load_i32 (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_MONITOR_BT : 0;
2043 FrameData data;
2045 if (do_bt)
2046 collect_bt (&data);
2048 ENTER_LOG (&monitor_events_ctr, logbuffer,
2049 EVENT_SIZE /* event */ +
2050 BYTE_SIZE /* ev */ +
2051 LEB128_SIZE /* object */ +
2052 (do_bt ? (
2053 LEB128_SIZE /* count */ +
2054 data.count * (
2055 LEB128_SIZE /* method */
2057 ) : 0)
2060 emit_event (logbuffer, do_bt | TYPE_MONITOR);
2061 emit_byte (logbuffer, ev);
2062 emit_obj (logbuffer, object);
2064 if (do_bt)
2065 emit_bt (logbuffer, &data);
2067 EXIT_LOG;
2070 static void
2071 monitor_contention (MonoProfiler *prof, MonoObject *object)
2073 monitor_event (prof, object, MONO_PROFILER_MONITOR_CONTENTION);
2076 static void
2077 monitor_acquired (MonoProfiler *prof, MonoObject *object)
2079 monitor_event (prof, object, MONO_PROFILER_MONITOR_DONE);
2082 static void
2083 monitor_failed (MonoProfiler *prof, MonoObject *object)
2085 monitor_event (prof, object, MONO_PROFILER_MONITOR_FAIL);
2088 static void
2089 thread_start (MonoProfiler *prof, uintptr_t tid)
2091 ENTER_LOG (&thread_starts_ctr, logbuffer,
2092 EVENT_SIZE /* event */ +
2093 BYTE_SIZE /* type */ +
2094 LEB128_SIZE /* tid */
2097 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2098 emit_byte (logbuffer, TYPE_THREAD);
2099 emit_ptr (logbuffer, (void*) tid);
2101 EXIT_LOG;
2104 static void
2105 thread_end (MonoProfiler *prof, uintptr_t tid)
2107 ENTER_LOG (&thread_ends_ctr, logbuffer,
2108 EVENT_SIZE /* event */ +
2109 BYTE_SIZE /* type */ +
2110 LEB128_SIZE /* tid */
2113 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2114 emit_byte (logbuffer, TYPE_THREAD);
2115 emit_ptr (logbuffer, (void*) tid);
2117 EXIT_LOG_EXPLICIT (NO_SEND);
2119 MonoProfilerThread *thread = get_thread ();
2121 // Internal profiler threads will clean up manually.
2122 if (thread->attached) {
2123 thread->ended = TRUE;
2124 remove_thread (thread);
2126 PROF_TLS_SET (MONO_PROFILER_THREAD_DEAD);
2130 static void
2131 thread_name (MonoProfiler *prof, uintptr_t tid, const char *name)
2133 int len = strlen (name) + 1;
2135 ENTER_LOG (&thread_names_ctr, logbuffer,
2136 EVENT_SIZE /* event */ +
2137 BYTE_SIZE /* type */ +
2138 LEB128_SIZE /* tid */ +
2139 len /* name */
2142 emit_event (logbuffer, TYPE_METADATA);
2143 emit_byte (logbuffer, TYPE_THREAD);
2144 emit_ptr (logbuffer, (void*)tid);
2145 memcpy (logbuffer->cursor, name, len);
2146 logbuffer->cursor += len;
2148 EXIT_LOG;
2151 static void
2152 domain_loaded (MonoProfiler *prof, MonoDomain *domain)
2154 ENTER_LOG (&domain_loads_ctr, logbuffer,
2155 EVENT_SIZE /* event */ +
2156 BYTE_SIZE /* type */ +
2157 LEB128_SIZE /* domain id */
2160 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2161 emit_byte (logbuffer, TYPE_DOMAIN);
2162 emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2164 EXIT_LOG;
2167 static void
2168 domain_unloaded (MonoProfiler *prof, MonoDomain *domain)
2170 ENTER_LOG (&domain_unloads_ctr, logbuffer,
2171 EVENT_SIZE /* event */ +
2172 BYTE_SIZE /* type */ +
2173 LEB128_SIZE /* domain id */
2176 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2177 emit_byte (logbuffer, TYPE_DOMAIN);
2178 emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2180 EXIT_LOG;
2183 static void
2184 domain_name (MonoProfiler *prof, MonoDomain *domain, const char *name)
2186 int nlen = strlen (name) + 1;
2188 ENTER_LOG (&domain_names_ctr, logbuffer,
2189 EVENT_SIZE /* event */ +
2190 BYTE_SIZE /* type */ +
2191 LEB128_SIZE /* domain id */ +
2192 nlen /* name */
2195 emit_event (logbuffer, TYPE_METADATA);
2196 emit_byte (logbuffer, TYPE_DOMAIN);
2197 emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2198 memcpy (logbuffer->cursor, name, nlen);
2199 logbuffer->cursor += nlen;
2201 EXIT_LOG;
2204 static void
2205 context_loaded (MonoProfiler *prof, MonoAppContext *context)
2207 ENTER_LOG (&context_loads_ctr, logbuffer,
2208 EVENT_SIZE /* event */ +
2209 BYTE_SIZE /* type */ +
2210 LEB128_SIZE /* context id */ +
2211 LEB128_SIZE /* domain id */
2214 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2215 emit_byte (logbuffer, TYPE_CONTEXT);
2216 emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2217 emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2219 EXIT_LOG;
2222 static void
2223 context_unloaded (MonoProfiler *prof, MonoAppContext *context)
2225 ENTER_LOG (&context_unloads_ctr, logbuffer,
2226 EVENT_SIZE /* event */ +
2227 BYTE_SIZE /* type */ +
2228 LEB128_SIZE /* context id */ +
2229 LEB128_SIZE /* domain id */
2232 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2233 emit_byte (logbuffer, TYPE_CONTEXT);
2234 emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2235 emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2237 EXIT_LOG;
2240 typedef struct {
2241 MonoMethod *method;
2242 MonoDomain *domain;
2243 void *base_address;
2244 int offset;
2245 } AsyncFrameInfo;
2247 typedef struct {
2248 MonoLockFreeQueueNode node;
2249 uint64_t time;
2250 uintptr_t tid;
2251 const void *ip;
2252 int count;
2253 AsyncFrameInfo frames [MONO_ZERO_LEN_ARRAY];
2254 } SampleHit;
2256 static mono_bool
2257 async_walk_stack (MonoMethod *method, MonoDomain *domain, void *base_address, int offset, void *data)
2259 SampleHit *sample = (SampleHit *) data;
2261 if (sample->count < log_config.num_frames) {
2262 int i = sample->count;
2264 sample->frames [i].method = method;
2265 sample->frames [i].domain = domain;
2266 sample->frames [i].base_address = base_address;
2267 sample->frames [i].offset = offset;
2269 if (method)
2270 inc_method_ref_count (method);
2272 sample->count++;
2275 return sample->count == log_config.num_frames;
2278 #define SAMPLE_SLOT_SIZE(FRAMES) (sizeof (SampleHit) + sizeof (AsyncFrameInfo) * (FRAMES - MONO_ZERO_LEN_ARRAY))
2279 #define SAMPLE_BLOCK_SIZE (mono_pagesize ())
2281 static void
2282 enqueue_sample_hit (gpointer p)
2284 SampleHit *sample = p;
2286 mono_lock_free_queue_node_unpoison (&sample->node);
2287 mono_lock_free_queue_enqueue (&log_profiler.dumper_queue, &sample->node);
2288 mono_os_sem_post (&log_profiler.dumper_queue_sem);
2291 static void
2292 mono_sample_hit (MonoProfiler *profiler, const mono_byte *ip, const void *context)
2295 * Please note: We rely on the runtime loading the profiler with
2296 * MONO_DL_EAGER (RTLD_NOW) so that references to runtime functions within
2297 * this function (and its siblings) are resolved when the profiler is
2298 * loaded. Otherwise, we would potentially invoke the dynamic linker when
2299 * invoking runtime functions, which is not async-signal-safe.
2302 if (mono_atomic_load_i32 (&log_profiler.in_shutdown))
2303 return;
2305 SampleHit *sample = (SampleHit *) mono_lock_free_queue_dequeue (&profiler->sample_reuse_queue);
2307 if (!sample) {
2309 * If we're out of reusable sample events and we're not allowed to
2310 * allocate more, we have no choice but to drop the event.
2312 if (mono_atomic_load_i32 (&sample_allocations_ctr) >= log_config.max_allocated_sample_hits)
2313 return;
2315 sample = mono_lock_free_alloc (&profiler->sample_allocator);
2316 mono_lock_free_queue_node_init (&sample->node, TRUE);
2318 mono_atomic_inc_i32 (&sample_allocations_ctr);
2321 sample->count = 0;
2322 mono_stack_walk_async_safe (&async_walk_stack, (void *) context, sample);
2324 sample->time = current_time ();
2325 sample->tid = thread_id ();
2326 sample->ip = ip;
2328 mono_thread_hazardous_try_free (sample, enqueue_sample_hit);
2331 static uintptr_t *code_pages = 0;
2332 static int num_code_pages = 0;
2333 static int size_code_pages = 0;
2334 #define CPAGE_SHIFT (9)
2335 #define CPAGE_SIZE (1 << CPAGE_SHIFT)
2336 #define CPAGE_MASK (~(CPAGE_SIZE - 1))
2337 #define CPAGE_ADDR(p) ((p) & CPAGE_MASK)
2339 static uintptr_t
2340 add_code_page (uintptr_t *hash, uintptr_t hsize, uintptr_t page)
2342 uintptr_t i;
2343 uintptr_t start_pos;
2344 start_pos = (page >> CPAGE_SHIFT) % hsize;
2345 i = start_pos;
2346 do {
2347 if (hash [i] && CPAGE_ADDR (hash [i]) == CPAGE_ADDR (page)) {
2348 return 0;
2349 } else if (!hash [i]) {
2350 hash [i] = page;
2351 return 1;
2353 /* wrap around */
2354 if (++i == hsize)
2355 i = 0;
2356 } while (i != start_pos);
2357 g_assert_not_reached ();
2358 return 0;
2361 static void
2362 add_code_pointer (uintptr_t ip)
2364 uintptr_t i;
2365 if (num_code_pages * 2 >= size_code_pages) {
2366 uintptr_t *n;
2367 uintptr_t old_size = size_code_pages;
2368 size_code_pages *= 2;
2369 if (size_code_pages == 0)
2370 size_code_pages = 16;
2371 n = (uintptr_t *) g_calloc (sizeof (uintptr_t) * size_code_pages, 1);
2372 for (i = 0; i < old_size; ++i) {
2373 if (code_pages [i])
2374 add_code_page (n, size_code_pages, code_pages [i]);
2376 if (code_pages)
2377 g_free (code_pages);
2378 code_pages = n;
2380 num_code_pages += add_code_page (code_pages, size_code_pages, ip & CPAGE_MASK);
2383 static void
2384 dump_usym (const char *name, uintptr_t value, uintptr_t size)
2386 int len = strlen (name) + 1;
2388 ENTER_LOG (&sample_usyms_ctr, logbuffer,
2389 EVENT_SIZE /* event */ +
2390 LEB128_SIZE /* value */ +
2391 LEB128_SIZE /* size */ +
2392 len /* name */
2395 emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_USYM);
2396 emit_ptr (logbuffer, (void *) value);
2397 emit_value (logbuffer, size);
2398 memcpy (logbuffer->cursor, name, len);
2399 logbuffer->cursor += len;
2401 EXIT_LOG;
2404 static const char*
2405 symbol_for (uintptr_t code)
2407 #ifdef HAVE_DLADDR
2408 Dl_info di;
2410 if (dladdr ((void *) code, &di))
2411 if (di.dli_sname)
2412 return di.dli_sname;
2413 #endif
2415 return NULL;
2418 static void
2419 dump_unmanaged_coderefs (void)
2421 int i;
2422 const char* last_symbol;
2423 uintptr_t addr, page_end;
2425 for (i = 0; i < size_code_pages; ++i) {
2426 const char* sym;
2427 if (!code_pages [i] || code_pages [i] & 1)
2428 continue;
2429 last_symbol = NULL;
2430 addr = CPAGE_ADDR (code_pages [i]);
2431 page_end = addr + CPAGE_SIZE;
2432 code_pages [i] |= 1;
2433 /* we dump the symbols for the whole page */
2434 for (; addr < page_end; addr += 16) {
2435 sym = symbol_for (addr);
2436 if (sym && sym == last_symbol)
2437 continue;
2438 last_symbol = sym;
2439 if (!sym)
2440 continue;
2441 dump_usym (sym, addr, 0); /* let's not guess the size */
2446 static void
2447 counters_add_agent (MonoCounter *counter)
2449 if (mono_atomic_load_i32 (&log_profiler.in_shutdown))
2450 return;
2452 MonoCounterAgent *agent, *item;
2454 mono_os_mutex_lock (&log_profiler.counters_mutex);
2456 for (agent = log_profiler.counters; agent; agent = agent->next) {
2457 if (agent->counter == counter) {
2458 agent->value_size = 0;
2459 if (agent->value) {
2460 g_free (agent->value);
2461 agent->value = NULL;
2463 goto done;
2467 agent = (MonoCounterAgent *) g_malloc (sizeof (MonoCounterAgent));
2468 agent->counter = counter;
2469 agent->value = NULL;
2470 agent->value_size = 0;
2471 agent->index = log_profiler.counters_index++;
2472 agent->emitted = FALSE;
2473 agent->next = NULL;
2475 if (!log_profiler.counters) {
2476 log_profiler.counters = agent;
2477 } else {
2478 item = log_profiler.counters;
2479 while (item->next)
2480 item = item->next;
2481 item->next = agent;
2484 done:
2485 mono_os_mutex_unlock (&log_profiler.counters_mutex);
2488 static mono_bool
2489 counters_init_foreach_callback (MonoCounter *counter, gpointer data)
2491 counters_add_agent (counter);
2492 return TRUE;
2495 static void
2496 counters_init (void)
2498 mono_os_mutex_init (&log_profiler.counters_mutex);
2500 log_profiler.counters_index = 1;
2502 mono_counters_on_register (&counters_add_agent);
2503 mono_counters_foreach (counters_init_foreach_callback, NULL);
2506 static void
2507 counters_emit (void)
2509 MonoCounterAgent *agent;
2510 int len = 0;
2511 int size =
2512 EVENT_SIZE /* event */ +
2513 LEB128_SIZE /* len */
2516 mono_os_mutex_lock (&log_profiler.counters_mutex);
2518 for (agent = log_profiler.counters; agent; agent = agent->next) {
2519 if (agent->emitted)
2520 continue;
2522 size +=
2523 LEB128_SIZE /* section */ +
2524 strlen (mono_counter_get_name (agent->counter)) + 1 /* name */ +
2525 BYTE_SIZE /* type */ +
2526 BYTE_SIZE /* unit */ +
2527 BYTE_SIZE /* variance */ +
2528 LEB128_SIZE /* index */
2531 len++;
2534 if (!len)
2535 goto done;
2537 ENTER_LOG (&counter_descriptors_ctr, logbuffer, size);
2539 emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
2540 emit_value (logbuffer, len);
2542 for (agent = log_profiler.counters; agent; agent = agent->next) {
2543 const char *name;
2545 if (agent->emitted)
2546 continue;
2548 name = mono_counter_get_name (agent->counter);
2549 emit_value (logbuffer, mono_counter_get_section (agent->counter));
2550 emit_string (logbuffer, name, strlen (name) + 1);
2551 emit_value (logbuffer, mono_counter_get_type (agent->counter));
2552 emit_value (logbuffer, mono_counter_get_unit (agent->counter));
2553 emit_value (logbuffer, mono_counter_get_variance (agent->counter));
2554 emit_value (logbuffer, agent->index);
2556 agent->emitted = TRUE;
2559 EXIT_LOG;
2561 done:
2562 mono_os_mutex_unlock (&log_profiler.counters_mutex);
2565 static void
2566 counters_sample (uint64_t timestamp)
2568 MonoCounterAgent *agent;
2569 MonoCounter *counter;
2570 int type;
2571 int buffer_size;
2572 void *buffer;
2573 int size;
2575 counters_emit ();
2577 buffer_size = 8;
2578 buffer = g_calloc (1, buffer_size);
2580 mono_os_mutex_lock (&log_profiler.counters_mutex);
2582 size =
2583 EVENT_SIZE /* event */
2586 for (agent = log_profiler.counters; agent; agent = agent->next) {
2588 * FIXME: This calculation is incorrect for string counters since
2589 * mono_counter_get_size () just returns 0 in that case. We should
2590 * address this if we ever actually add any string counters to Mono.
2593 size +=
2594 LEB128_SIZE /* index */ +
2595 BYTE_SIZE /* type */ +
2596 mono_counter_get_size (agent->counter) /* value */
2600 size +=
2601 LEB128_SIZE /* stop marker */
2604 ENTER_LOG (&counter_samples_ctr, logbuffer, size);
2606 emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
2608 for (agent = log_profiler.counters; agent; agent = agent->next) {
2609 size_t size;
2611 counter = agent->counter;
2613 size = mono_counter_get_size (counter);
2615 if (size > buffer_size) {
2616 buffer_size = size;
2617 buffer = g_realloc (buffer, buffer_size);
2620 memset (buffer, 0, buffer_size);
2622 g_assert (mono_counters_sample (counter, buffer, size));
2624 type = mono_counter_get_type (counter);
2626 if (!agent->value) {
2627 agent->value = g_calloc (1, size);
2628 agent->value_size = size;
2629 } else {
2630 if (type == MONO_COUNTER_STRING) {
2631 if (strcmp (agent->value, buffer) == 0)
2632 continue;
2633 } else {
2634 if (agent->value_size == size && memcmp (agent->value, buffer, size) == 0)
2635 continue;
2639 emit_uvalue (logbuffer, agent->index);
2640 emit_value (logbuffer, type);
2641 switch (type) {
2642 case MONO_COUNTER_INT:
2643 #if SIZEOF_VOID_P == 4
2644 case MONO_COUNTER_WORD:
2645 #endif
2646 emit_svalue (logbuffer, *(int*)buffer - *(int*)agent->value);
2647 break;
2648 case MONO_COUNTER_UINT:
2649 emit_uvalue (logbuffer, *(guint*)buffer - *(guint*)agent->value);
2650 break;
2651 case MONO_COUNTER_TIME_INTERVAL:
2652 case MONO_COUNTER_LONG:
2653 #if SIZEOF_VOID_P == 8
2654 case MONO_COUNTER_WORD:
2655 #endif
2656 emit_svalue (logbuffer, *(gint64*)buffer - *(gint64*)agent->value);
2657 break;
2658 case MONO_COUNTER_ULONG:
2659 emit_uvalue (logbuffer, *(guint64*)buffer - *(guint64*)agent->value);
2660 break;
2661 case MONO_COUNTER_DOUBLE:
2662 emit_double (logbuffer, *(double*)buffer);
2663 break;
2664 case MONO_COUNTER_STRING:
2665 if (size == 0) {
2666 emit_byte (logbuffer, 0);
2667 } else {
2668 emit_byte (logbuffer, 1);
2669 emit_string (logbuffer, (char*)buffer, size);
2671 break;
2672 default:
2673 g_assert_not_reached ();
2676 if (type == MONO_COUNTER_STRING && size > agent->value_size) {
2677 agent->value = g_realloc (agent->value, size);
2678 agent->value_size = size;
2681 if (size > 0)
2682 memcpy (agent->value, buffer, size);
2684 g_free (buffer);
2686 emit_value (logbuffer, 0);
2688 EXIT_LOG;
2690 mono_os_mutex_unlock (&log_profiler.counters_mutex);
2693 static void
2694 perfcounters_emit (void)
2696 PerfCounterAgent *pcagent;
2697 int len = 0;
2698 int size =
2699 EVENT_SIZE /* event */ +
2700 LEB128_SIZE /* len */
2703 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2704 if (pcagent->emitted)
2705 continue;
2707 size +=
2708 LEB128_SIZE /* section */ +
2709 strlen (pcagent->category_name) + 1 /* category name */ +
2710 strlen (pcagent->name) + 1 /* name */ +
2711 BYTE_SIZE /* type */ +
2712 BYTE_SIZE /* unit */ +
2713 BYTE_SIZE /* variance */ +
2714 LEB128_SIZE /* index */
2717 len++;
2720 if (!len)
2721 return;
2723 ENTER_LOG (&perfcounter_descriptors_ctr, logbuffer, size);
2725 emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
2726 emit_value (logbuffer, len);
2728 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2729 if (pcagent->emitted)
2730 continue;
2732 emit_value (logbuffer, MONO_COUNTER_PERFCOUNTERS);
2733 emit_string (logbuffer, pcagent->category_name, strlen (pcagent->category_name) + 1);
2734 emit_string (logbuffer, pcagent->name, strlen (pcagent->name) + 1);
2735 emit_byte (logbuffer, MONO_COUNTER_LONG);
2736 emit_byte (logbuffer, MONO_COUNTER_RAW);
2737 emit_byte (logbuffer, MONO_COUNTER_VARIABLE);
2738 emit_value (logbuffer, pcagent->index);
2740 pcagent->emitted = TRUE;
2743 EXIT_LOG;
2746 static gboolean
2747 perfcounters_foreach (char *category_name, char *name, unsigned char type, gint64 value, gpointer user_data)
2749 PerfCounterAgent *pcagent;
2751 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2752 if (strcmp (pcagent->category_name, category_name) != 0 || strcmp (pcagent->name, name) != 0)
2753 continue;
2754 if (pcagent->value == value)
2755 return TRUE;
2757 pcagent->value = value;
2758 pcagent->updated = TRUE;
2759 pcagent->deleted = FALSE;
2760 return TRUE;
2763 pcagent = g_new0 (PerfCounterAgent, 1);
2764 pcagent->next = log_profiler.perfcounters;
2765 pcagent->index = log_profiler.counters_index++;
2766 pcagent->category_name = g_strdup (category_name);
2767 pcagent->name = g_strdup (name);
2768 pcagent->value = value;
2769 pcagent->emitted = FALSE;
2770 pcagent->updated = TRUE;
2771 pcagent->deleted = FALSE;
2773 log_profiler.perfcounters = pcagent;
2775 return TRUE;
2778 static void
2779 perfcounters_sample (uint64_t timestamp)
2781 PerfCounterAgent *pcagent;
2782 int len = 0;
2783 int size;
2785 mono_os_mutex_lock (&log_profiler.counters_mutex);
2787 /* mark all perfcounters as deleted, foreach will unmark them as necessary */
2788 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next)
2789 pcagent->deleted = TRUE;
2791 mono_perfcounter_foreach (perfcounters_foreach, NULL);
2793 perfcounters_emit ();
2795 size =
2796 EVENT_SIZE /* event */
2799 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2800 if (pcagent->deleted || !pcagent->updated)
2801 continue;
2803 size +=
2804 LEB128_SIZE /* index */ +
2805 BYTE_SIZE /* type */ +
2806 LEB128_SIZE /* value */
2809 len++;
2812 if (!len)
2813 goto done;
2815 size +=
2816 LEB128_SIZE /* stop marker */
2819 ENTER_LOG (&perfcounter_samples_ctr, logbuffer, size);
2821 emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
2823 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2824 if (pcagent->deleted || !pcagent->updated)
2825 continue;
2826 emit_uvalue (logbuffer, pcagent->index);
2827 emit_byte (logbuffer, MONO_COUNTER_LONG);
2828 emit_svalue (logbuffer, pcagent->value);
2830 pcagent->updated = FALSE;
2833 emit_value (logbuffer, 0);
2835 EXIT_LOG;
2837 done:
2838 mono_os_mutex_unlock (&log_profiler.counters_mutex);
2841 static void
2842 counters_and_perfcounters_sample (void)
2844 uint64_t now = current_time ();
2846 counters_sample (now);
2847 perfcounters_sample (now);
2850 static void
2851 free_sample_hit (gpointer p)
2853 mono_lock_free_free (p, SAMPLE_BLOCK_SIZE);
2856 static void
2857 cleanup_reusable_samples (void)
2859 SampleHit *sample;
2861 while ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&log_profiler.sample_reuse_queue)))
2862 mono_thread_hazardous_try_free (sample, free_sample_hit);
2865 static void
2866 signal_helper_thread (char c)
2868 if (write (log_profiler.pipes [1], &c, 1) != 1) {
2869 mono_profiler_printf_err ("Could not write to log profiler pipe: %s", g_strerror (errno));
2870 exit (1);
2874 static void
2875 log_early_shutdown (MonoProfiler *prof)
2877 dump_aot_id ();
2879 if (log_config.hs_on_shutdown) {
2880 mono_atomic_store_i32 (&log_profiler.heapshot_requested, 1);
2881 mono_gc_collect (mono_gc_max_generation ());
2885 * We need to detach the internal threads early on. log_shutdown () is
2886 * called after the threading subsystem has been cleaned up, so detaching
2887 * there would crash.
2889 mono_os_sem_init (&log_profiler.detach_threads_sem, 0);
2890 mono_atomic_store_i32 (&log_profiler.detach_threads, 1);
2892 signal_helper_thread (2);
2893 mono_os_sem_post (&prof->dumper_queue_sem);
2894 mono_os_sem_post (&prof->writer_queue_sem);
2896 for (int i = 0; i < 3; i++)
2897 mono_os_sem_wait (&log_profiler.detach_threads_sem, MONO_SEM_FLAGS_NONE);
2899 mono_os_sem_destroy (&log_profiler.detach_threads_sem);
2902 static void
2903 log_shutdown (MonoProfiler *prof)
2905 mono_atomic_store_i32 (&log_profiler.in_shutdown, 1);
2907 if (ENABLED (PROFLOG_COUNTER_EVENTS))
2908 counters_and_perfcounters_sample ();
2910 signal_helper_thread (1);
2911 mono_os_event_wait_one (&prof->helper_thread_exited, MONO_INFINITE_WAIT, FALSE);
2912 mono_os_event_destroy (&prof->helper_thread_exited);
2914 mono_os_mutex_destroy (&log_profiler.counters_mutex);
2916 MonoCounterAgent *mc_next;
2918 for (MonoCounterAgent *cur = log_profiler.counters; cur; cur = mc_next) {
2919 mc_next = cur->next;
2920 g_free (cur);
2923 PerfCounterAgent *pc_next;
2925 for (PerfCounterAgent *cur = log_profiler.perfcounters; cur; cur = pc_next) {
2926 pc_next = cur->next;
2927 g_free (cur);
2931 * Ensure that we empty the LLS completely, even if some nodes are
2932 * not immediately removed upon calling mono_lls_remove (), by
2933 * iterating until the head is NULL.
2935 while (log_profiler.profiler_thread_list.head) {
2936 MONO_LLS_FOREACH_SAFE (&log_profiler.profiler_thread_list, MonoProfilerThread, thread) {
2937 g_assert (thread->attached && "Why is a thread in the LLS not attached?");
2939 remove_thread (thread);
2940 } MONO_LLS_FOREACH_SAFE_END
2944 * Ensure that all threads have been freed, so that we don't miss any
2945 * buffers when we shut down the writer thread below.
2947 mono_thread_hazardous_try_free_all ();
2949 mono_atomic_store_i32 (&prof->run_dumper_thread, 0);
2950 mono_os_sem_post (&prof->dumper_queue_sem);
2951 mono_os_event_wait_one (&prof->dumper_thread_exited, MONO_INFINITE_WAIT, FALSE);
2952 mono_os_event_destroy (&prof->dumper_thread_exited);
2953 mono_os_sem_destroy (&prof->dumper_queue_sem);
2955 mono_atomic_store_i32 (&prof->run_writer_thread, 0);
2956 mono_os_sem_post (&prof->writer_queue_sem);
2957 mono_os_event_wait_one (&prof->writer_thread_exited, MONO_INFINITE_WAIT, FALSE);
2958 mono_os_event_destroy (&prof->writer_thread_exited);
2959 mono_os_sem_destroy (&prof->writer_queue_sem);
2962 * Free all writer queue entries, and ensure that all sample hits will be
2963 * added to the sample reuse queue.
2965 mono_thread_hazardous_try_free_all ();
2967 cleanup_reusable_samples ();
2970 * Finally, make sure that all sample hits are freed. This should cover all
2971 * hazardous data from the profiler. We can now be sure that the runtime
2972 * won't later invoke free functions in the profiler library after it has
2973 * been unloaded.
2975 mono_thread_hazardous_try_free_all ();
2977 gint32 state = mono_atomic_load_i32 (&log_profiler.buffer_lock_state);
2979 g_assert (!(state & 0xFFFF) && "Why is the reader count still non-zero?");
2980 g_assert (!(state >> 16) && "Why is the exclusive lock still held?");
2982 #if defined (HAVE_SYS_ZLIB)
2983 if (prof->gzfile)
2984 gzclose (prof->gzfile);
2985 #endif
2986 if (prof->pipe_output)
2987 pclose (prof->file);
2988 else
2989 fclose (prof->file);
2991 mono_conc_hashtable_destroy (prof->method_table);
2992 mono_os_mutex_destroy (&prof->method_table_mutex);
2994 mono_coop_mutex_destroy (&log_profiler.api_mutex);
2996 g_free (prof->args);
2999 static char*
3000 new_filename (const char* filename)
3002 time_t t = time (NULL);
3003 int pid = process_id ();
3004 char pid_buf [16];
3005 char time_buf [16];
3006 char *res, *d;
3007 const char *p;
3008 int count_dates = 0;
3009 int count_pids = 0;
3010 int s_date, s_pid;
3011 struct tm *ts;
3012 for (p = filename; *p; p++) {
3013 if (*p != '%')
3014 continue;
3015 p++;
3016 if (*p == 't')
3017 count_dates++;
3018 else if (*p == 'p')
3019 count_pids++;
3020 else if (*p == 0)
3021 break;
3023 if (!count_dates && !count_pids)
3024 return pstrdup (filename);
3025 snprintf (pid_buf, sizeof (pid_buf), "%d", pid);
3026 ts = gmtime (&t);
3027 snprintf (time_buf, sizeof (time_buf), "%d%02d%02d%02d%02d%02d",
3028 1900 + ts->tm_year, 1 + ts->tm_mon, ts->tm_mday, ts->tm_hour, ts->tm_min, ts->tm_sec);
3029 s_date = strlen (time_buf);
3030 s_pid = strlen (pid_buf);
3031 d = res = (char *) g_malloc (strlen (filename) + s_date * count_dates + s_pid * count_pids);
3032 for (p = filename; *p; p++) {
3033 if (*p != '%') {
3034 *d++ = *p;
3035 continue;
3037 p++;
3038 if (*p == 't') {
3039 strcpy (d, time_buf);
3040 d += s_date;
3041 continue;
3042 } else if (*p == 'p') {
3043 strcpy (d, pid_buf);
3044 d += s_pid;
3045 continue;
3046 } else if (*p == '%') {
3047 *d++ = '%';
3048 continue;
3049 } else if (*p == 0)
3050 break;
3051 *d++ = '%';
3052 *d++ = *p;
3054 *d = 0;
3055 return res;
3058 static MonoProfilerThread *
3059 profiler_thread_begin (const char *name, gboolean send)
3061 mono_thread_info_attach ();
3062 MonoProfilerThread *thread = init_thread (FALSE);
3064 mono_thread_attach (mono_get_root_domain ());
3066 MonoInternalThread *internal = mono_thread_internal_current ();
3069 * Don't let other threads try to suspend internal profiler threads during
3070 * shutdown. This can happen if a program calls Environment.Exit () which
3071 * calls mono_thread_suspend_all_other_threads ().
3073 internal->flags |= MONO_THREAD_FLAG_DONT_MANAGE;
3075 ERROR_DECL (error);
3077 MonoString *name_str = mono_string_new_checked (mono_get_root_domain (), name, error);
3078 mono_error_assert_ok (error);
3079 mono_thread_set_name_internal (internal, name_str, FALSE, FALSE, error);
3080 mono_error_assert_ok (error);
3082 mono_thread_info_set_flags (MONO_THREAD_INFO_FLAGS_NO_GC | MONO_THREAD_INFO_FLAGS_NO_SAMPLE);
3084 if (!send) {
3085 dump_buffer (thread->buffer);
3086 init_buffer_state (thread);
3087 } else
3088 send_log_unsafe (FALSE);
3090 mono_os_sem_post (&log_profiler.attach_threads_sem);
3092 return thread;
3095 static void
3096 profiler_thread_end (MonoProfilerThread *thread, MonoOSEvent *event, gboolean send)
3098 if (send)
3099 send_log_unsafe (FALSE);
3100 else
3101 dump_buffer (thread->buffer);
3103 deinit_thread (thread);
3105 mono_os_event_set (event);
3108 static void
3109 profiler_thread_check_detach (MonoProfilerThread *thread)
3111 if (mono_atomic_load_i32 (&log_profiler.detach_threads) && !thread->did_detach) {
3112 thread->did_detach = TRUE;
3114 mono_thread_info_set_flags (MONO_THREAD_INFO_FLAGS_NONE);
3115 mono_thread_detach (mono_thread_current ());
3117 mono_os_sem_post (&log_profiler.detach_threads_sem);
3121 static void
3122 add_to_fd_set (fd_set *set, int fd, int *max_fd)
3125 * This should only trigger for the basic FDs (server socket, pipes) at
3126 * startup if for some mysterious reason they're too large. In this case,
3127 * the profiler really can't function, and we're better off printing an
3128 * error and exiting.
3130 if (fd >= FD_SETSIZE) {
3131 mono_profiler_printf_err ("File descriptor is out of bounds for fd_set: %d", fd);
3132 exit (1);
3135 FD_SET (fd, set);
3137 if (*max_fd < fd)
3138 *max_fd = fd;
3141 static void *
3142 helper_thread (void *arg)
3144 MonoProfilerThread *thread = profiler_thread_begin ("Profiler Helper", TRUE);
3146 GArray *command_sockets = g_array_new (FALSE, FALSE, sizeof (int));
3148 while (1) {
3149 fd_set rfds;
3150 int max_fd = -1;
3152 FD_ZERO (&rfds);
3154 add_to_fd_set (&rfds, log_profiler.server_socket, &max_fd);
3155 add_to_fd_set (&rfds, log_profiler.pipes [0], &max_fd);
3157 for (gint i = 0; i < command_sockets->len; i++)
3158 add_to_fd_set (&rfds, g_array_index (command_sockets, int, i), &max_fd);
3160 struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
3162 // Sleep for 1sec or until a file descriptor has data.
3163 if (select (max_fd + 1, &rfds, NULL, NULL, &tv) == -1) {
3164 if (errno == EINTR)
3165 continue;
3167 mono_profiler_printf_err ("Could not poll in log profiler helper thread: %s", g_strerror (errno));
3168 exit (1);
3171 if (ENABLED (PROFLOG_COUNTER_EVENTS))
3172 counters_and_perfcounters_sample ();
3174 buffer_lock_excl ();
3176 sync_point (SYNC_POINT_PERIODIC);
3178 buffer_unlock_excl ();
3180 // Did we get a shutdown or detach signal?
3181 if (FD_ISSET (log_profiler.pipes [0], &rfds)) {
3182 char c;
3184 read (log_profiler.pipes [0], &c, 1);
3186 if (c == 1)
3187 break;
3190 for (gint i = 0; i < command_sockets->len; i++) {
3191 int fd = g_array_index (command_sockets, int, i);
3193 if (!FD_ISSET (fd, &rfds))
3194 continue;
3196 char buf [64];
3197 int len = read (fd, buf, sizeof (buf) - 1);
3199 if (len == -1)
3200 continue;
3202 if (!len) {
3203 // The other end disconnected.
3204 g_array_remove_index (command_sockets, i);
3205 close (fd);
3207 continue;
3210 buf [len] = 0;
3212 if (!strcmp (buf, "heapshot\n"))
3213 trigger_heapshot ();
3216 if (FD_ISSET (log_profiler.server_socket, &rfds)) {
3217 int fd = accept (log_profiler.server_socket, NULL, NULL);
3219 if (fd != -1) {
3220 if (fd >= FD_SETSIZE)
3221 close (fd);
3222 else
3223 g_array_append_val (command_sockets, fd);
3227 profiler_thread_check_detach (thread);
3230 for (gint i = 0; i < command_sockets->len; i++)
3231 close (g_array_index (command_sockets, int, i));
3233 g_array_free (command_sockets, TRUE);
3235 profiler_thread_end (thread, &log_profiler.helper_thread_exited, TRUE);
3237 return NULL;
3240 static void
3241 start_helper_thread (void)
3243 if (pipe (log_profiler.pipes) == -1) {
3244 mono_profiler_printf_err ("Could not create log profiler pipe: %s", g_strerror (errno));
3245 exit (1);
3248 log_profiler.server_socket = socket (PF_INET, SOCK_STREAM, 0);
3250 if (log_profiler.server_socket == -1) {
3251 mono_profiler_printf_err ("Could not create log profiler server socket: %s", g_strerror (errno));
3252 exit (1);
3255 struct sockaddr_in server_address;
3257 memset (&server_address, 0, sizeof (server_address));
3258 server_address.sin_family = AF_INET;
3259 server_address.sin_addr.s_addr = INADDR_ANY;
3260 server_address.sin_port = htons (log_profiler.command_port);
3262 if (bind (log_profiler.server_socket, (struct sockaddr *) &server_address, sizeof (server_address)) == -1) {
3263 mono_profiler_printf_err ("Could not bind log profiler server socket on port %d: %s", log_profiler.command_port, g_strerror (errno));
3264 close (log_profiler.server_socket);
3265 exit (1);
3268 if (listen (log_profiler.server_socket, 1) == -1) {
3269 mono_profiler_printf_err ("Could not listen on log profiler server socket: %s", g_strerror (errno));
3270 close (log_profiler.server_socket);
3271 exit (1);
3274 socklen_t slen = sizeof (server_address);
3276 if (getsockname (log_profiler.server_socket, (struct sockaddr *) &server_address, &slen)) {
3277 mono_profiler_printf_err ("Could not retrieve assigned port for log profiler server socket: %s", g_strerror (errno));
3278 close (log_profiler.server_socket);
3279 exit (1);
3282 log_profiler.command_port = ntohs (server_address.sin_port);
3284 if (!mono_native_thread_create (&log_profiler.helper_thread, helper_thread, NULL)) {
3285 mono_profiler_printf_err ("Could not start log profiler helper thread");
3286 close (log_profiler.server_socket);
3287 exit (1);
3291 static void
3292 free_writer_entry (gpointer p)
3294 mono_lock_free_free (p, WRITER_ENTRY_BLOCK_SIZE);
3297 static gboolean
3298 handle_writer_queue_entry (void)
3300 WriterQueueEntry *entry;
3302 if ((entry = (WriterQueueEntry *) mono_lock_free_queue_dequeue (&log_profiler.writer_queue))) {
3303 if (!entry->methods)
3304 goto no_methods;
3306 gboolean wrote_methods = FALSE;
3309 * Encode the method events in a temporary log buffer that we
3310 * flush to disk before the main buffer, ensuring that all
3311 * methods have metadata emitted before they're referenced.
3313 * We use a 'proper' thread-local buffer for this as opposed
3314 * to allocating and freeing a buffer by hand because the call
3315 * to mono_method_full_name () below may trigger class load
3316 * events when it retrieves the signature of the method. So a
3317 * thread-local buffer needs to exist when such events occur.
3319 for (guint i = 0; i < entry->methods->len; i++) {
3320 MethodInfo *info = (MethodInfo *) g_ptr_array_index (entry->methods, i);
3322 if (mono_conc_hashtable_lookup (log_profiler.method_table, info->method))
3323 goto free_info; // This method already has metadata emitted.
3326 * Other threads use this hash table to get a general
3327 * idea of whether a method has already been emitted to
3328 * the stream. Due to the way we add to this table, it
3329 * can easily happen that multiple threads queue up the
3330 * same methods, but that's OK since eventually all
3331 * methods will be in this table and the thread-local
3332 * method lists will just be empty for the rest of the
3333 * app's lifetime.
3335 mono_os_mutex_lock (&log_profiler.method_table_mutex);
3336 mono_conc_hashtable_insert (log_profiler.method_table, info->method, info->method);
3337 mono_os_mutex_unlock (&log_profiler.method_table_mutex);
3339 char *name = mono_method_full_name (info->method, 1);
3340 int nlen = strlen (name) + 1;
3341 void *cstart = info->ji ? mono_jit_info_get_code_start (info->ji) : NULL;
3342 int csize = info->ji ? mono_jit_info_get_code_size (info->ji) : 0;
3344 ENTER_LOG (&method_jits_ctr, logbuffer,
3345 EVENT_SIZE /* event */ +
3346 LEB128_SIZE /* method */ +
3347 LEB128_SIZE /* start */ +
3348 LEB128_SIZE /* size */ +
3349 nlen /* name */
3352 emit_event_time (logbuffer, TYPE_JIT | TYPE_METHOD, info->time);
3353 emit_method_inner (logbuffer, info->method);
3354 emit_ptr (logbuffer, cstart);
3355 emit_value (logbuffer, csize);
3357 memcpy (logbuffer->cursor, name, nlen);
3358 logbuffer->cursor += nlen;
3360 EXIT_LOG_EXPLICIT (NO_SEND);
3362 mono_free (name);
3364 wrote_methods = TRUE;
3366 free_info:
3367 dec_method_ref_count (info->method);
3368 g_free (info);
3371 g_ptr_array_free (entry->methods, TRUE);
3373 if (wrote_methods) {
3374 MonoProfilerThread *thread = get_thread ();
3376 dump_buffer (thread->buffer);
3377 init_buffer_state (thread);
3380 no_methods:
3381 dump_buffer (entry->buffer);
3383 mono_thread_hazardous_try_free (entry, free_writer_entry);
3385 return TRUE;
3388 return FALSE;
3391 static void *
3392 writer_thread (void *arg)
3394 dump_header ();
3396 MonoProfilerThread *thread = profiler_thread_begin ("Profiler Writer", FALSE);
3398 while (mono_atomic_load_i32 (&log_profiler.run_writer_thread)) {
3399 MONO_ENTER_GC_SAFE;
3400 mono_os_sem_wait (&log_profiler.writer_queue_sem, MONO_SEM_FLAGS_NONE);
3401 MONO_EXIT_GC_SAFE;
3402 handle_writer_queue_entry ();
3404 profiler_thread_check_detach (thread);
3407 /* Drain any remaining entries on shutdown. */
3408 while (handle_writer_queue_entry ());
3410 profiler_thread_end (thread, &log_profiler.writer_thread_exited, FALSE);
3412 return NULL;
3415 static void
3416 start_writer_thread (void)
3418 mono_atomic_store_i32 (&log_profiler.run_writer_thread, 1);
3420 if (!mono_native_thread_create (&log_profiler.writer_thread, writer_thread, NULL)) {
3421 mono_profiler_printf_err ("Could not start log profiler writer thread");
3422 exit (1);
3426 static void
3427 reuse_sample_hit (gpointer p)
3429 SampleHit *sample = p;
3431 mono_lock_free_queue_node_unpoison (&sample->node);
3432 mono_lock_free_queue_enqueue (&log_profiler.sample_reuse_queue, &sample->node);
3435 static gboolean
3436 handle_dumper_queue_entry (void)
3438 SampleHit *sample;
3440 if ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&log_profiler.dumper_queue))) {
3441 for (int i = 0; i < sample->count; ++i) {
3442 MonoMethod *method = sample->frames [i].method;
3443 MonoDomain *domain = sample->frames [i].domain;
3444 void *address = sample->frames [i].base_address;
3446 if (!method) {
3447 g_assert (domain && "What happened to the domain pointer?");
3448 g_assert (address && "What happened to the instruction pointer?");
3450 MonoJitInfo *ji = mono_jit_info_table_find (domain, address);
3452 if (ji)
3453 method = mono_jit_info_get_method (ji);
3455 if (method)
3456 inc_method_ref_count (method);
3458 sample->frames [i].method = method;
3462 ENTER_LOG (&sample_hits_ctr, logbuffer,
3463 EVENT_SIZE /* event */ +
3464 LEB128_SIZE /* tid */ +
3465 LEB128_SIZE /* count */ +
3466 1 * (
3467 LEB128_SIZE /* ip */
3469 LEB128_SIZE /* managed count */ +
3470 sample->count * (
3471 LEB128_SIZE /* method */
3475 emit_event_time (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_HIT, sample->time);
3476 emit_ptr (logbuffer, (void *) sample->tid);
3477 emit_value (logbuffer, 1);
3479 // TODO: Actual native unwinding.
3480 for (int i = 0; i < 1; ++i) {
3481 emit_ptr (logbuffer, sample->ip);
3482 add_code_pointer ((uintptr_t) sample->ip);
3485 /* new in data version 6 */
3486 emit_uvalue (logbuffer, sample->count);
3488 for (int i = 0; i < sample->count; ++i)
3489 emit_method (logbuffer, sample->frames [i].method);
3491 EXIT_LOG;
3493 for (int i = 0; i < sample->count; ++i) {
3494 MonoMethod *method = sample->frames [i].method;
3496 if (method)
3497 dec_method_ref_count (method);
3500 mono_thread_hazardous_try_free (sample, reuse_sample_hit);
3502 dump_unmanaged_coderefs ();
3505 return FALSE;
3508 static void *
3509 dumper_thread (void *arg)
3511 MonoProfilerThread *thread = profiler_thread_begin ("Profiler Dumper", TRUE);
3513 while (mono_atomic_load_i32 (&log_profiler.run_dumper_thread)) {
3514 gboolean timedout = FALSE;
3515 MONO_ENTER_GC_SAFE;
3517 * Flush samples every second so it doesn't seem like the profiler is
3518 * not working if the program is mostly idle.
3520 timedout = mono_os_sem_timedwait (&log_profiler.dumper_queue_sem, 1000, MONO_SEM_FLAGS_NONE) == MONO_SEM_TIMEDWAIT_RET_TIMEDOUT;
3521 MONO_EXIT_GC_SAFE;
3522 if (timedout)
3523 send_log_unsafe (FALSE);
3525 handle_dumper_queue_entry ();
3527 profiler_thread_check_detach (thread);
3530 /* Drain any remaining entries on shutdown. */
3531 while (handle_dumper_queue_entry ());
3533 profiler_thread_end (thread, &log_profiler.dumper_thread_exited, TRUE);
3535 return NULL;
3538 static void
3539 start_dumper_thread (void)
3541 mono_atomic_store_i32 (&log_profiler.run_dumper_thread, 1);
3543 if (!mono_native_thread_create (&log_profiler.dumper_thread, dumper_thread, NULL)) {
3544 mono_profiler_printf_err ("Could not start log profiler dumper thread");
3545 exit (1);
3549 static void
3550 register_counter (const char *name, gint32 *counter)
3552 mono_counters_register (name, MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, counter);
3555 #ifdef __GNUC__
3556 #pragma GCC diagnostic push
3557 #pragma GCC diagnostic ignored "-Wmissing-prototypes"
3558 #endif
3560 ICALL_EXPORT gint32
3561 proflog_icall_GetMaxStackTraceFrames (void)
3563 return MAX_FRAMES;
3566 ICALL_EXPORT gint32
3567 proflog_icall_GetStackTraceFrames (void)
3569 return log_config.num_frames;
3572 ICALL_EXPORT void
3573 proflog_icall_SetStackTraceFrames (gint32 value)
3575 log_config.num_frames = value;
3578 ICALL_EXPORT MonoProfilerHeapshotMode
3579 proflog_icall_GetHeapshotMode (void)
3581 return log_config.hs_mode;
3584 ICALL_EXPORT void
3585 proflog_icall_SetHeapshotMode (MonoProfilerHeapshotMode value)
3587 log_config.hs_mode = value;
3590 ICALL_EXPORT gint32
3591 proflog_icall_GetHeapshotMillisecondsFrequency (void)
3593 return log_config.hs_freq_ms;
3596 ICALL_EXPORT void
3597 proflog_icall_SetHeapshotMillisecondsFrequency (gint32 value)
3599 log_config.hs_freq_ms = value;
3602 ICALL_EXPORT gint32
3603 proflog_icall_GetHeapshotCollectionsFrequency (void)
3605 return log_config.hs_freq_gc;
3608 ICALL_EXPORT void
3609 proflog_icall_SetHeapshotCollectionsFrequency (gint32 value)
3611 log_config.hs_freq_gc = value;
3614 ICALL_EXPORT void
3615 proflog_icall_TriggerHeapshot (void)
3617 trigger_heapshot ();
3620 ICALL_EXPORT gint32
3621 proflog_icall_GetCallDepth (void)
3623 return log_config.max_call_depth;
3626 ICALL_EXPORT void
3627 proflog_icall_SetCallDepth (gint32 value)
3629 log_config.max_call_depth = value;
3632 ICALL_EXPORT void
3633 proflog_icall_GetSampleMode (MonoProfilerSampleMode *mode, gint32 *frequency)
3635 uint32_t freq;
3637 mono_profiler_get_sample_mode (log_profiler.handle, mode, &freq);
3639 *frequency = freq;
3642 ICALL_EXPORT MonoBoolean
3643 proflog_icall_SetSampleMode (MonoProfilerSampleMode mode, gint32 frequency)
3645 mono_coop_mutex_lock (&log_profiler.api_mutex);
3647 mono_bool result = mono_profiler_set_sample_mode (log_profiler.handle, mode, frequency);
3649 if (mode != MONO_PROFILER_SAMPLE_MODE_NONE) {
3650 ENABLE (PROFLOG_SAMPLE_EVENTS);
3651 mono_profiler_set_sample_hit_callback (log_profiler.handle, mono_sample_hit);
3652 } else {
3653 DISABLE (PROFLOG_SAMPLE_EVENTS);
3654 mono_profiler_set_sample_hit_callback (log_profiler.handle, NULL);
3657 mono_coop_mutex_unlock (&log_profiler.api_mutex);
3659 return result;
3662 ICALL_EXPORT MonoBoolean
3663 proflog_icall_GetExceptionEvents (void)
3665 return ENABLED (PROFLOG_EXCEPTION_EVENTS);
3668 ICALL_EXPORT void
3669 proflog_icall_SetExceptionEvents (MonoBoolean value)
3671 mono_coop_mutex_lock (&log_profiler.api_mutex);
3673 if (value) {
3674 ENABLE (PROFLOG_EXCEPTION_EVENTS);
3675 mono_profiler_set_exception_throw_callback (log_profiler.handle, throw_exc);
3676 mono_profiler_set_exception_clause_callback (log_profiler.handle, clause_exc);
3677 } else {
3678 DISABLE (PROFLOG_EXCEPTION_EVENTS);
3679 mono_profiler_set_exception_throw_callback (log_profiler.handle, NULL);
3680 mono_profiler_set_exception_clause_callback (log_profiler.handle, NULL);
3683 mono_coop_mutex_unlock (&log_profiler.api_mutex);
3686 ICALL_EXPORT MonoBoolean
3687 proflog_icall_GetMonitorEvents (void)
3689 return ENABLED (PROFLOG_MONITOR_EVENTS);
3692 ICALL_EXPORT void
3693 proflog_icall_SetMonitorEvents (MonoBoolean value)
3695 mono_coop_mutex_lock (&log_profiler.api_mutex);
3697 if (value) {
3698 ENABLE (PROFLOG_MONITOR_EVENTS);
3699 mono_profiler_set_monitor_contention_callback (log_profiler.handle, monitor_contention);
3700 mono_profiler_set_monitor_acquired_callback (log_profiler.handle, monitor_acquired);
3701 mono_profiler_set_monitor_failed_callback (log_profiler.handle, monitor_failed);
3702 } else {
3703 DISABLE (PROFLOG_MONITOR_EVENTS);
3704 mono_profiler_set_monitor_contention_callback (log_profiler.handle, NULL);
3705 mono_profiler_set_monitor_acquired_callback (log_profiler.handle, NULL);
3706 mono_profiler_set_monitor_failed_callback (log_profiler.handle, NULL);
3709 mono_coop_mutex_unlock (&log_profiler.api_mutex);
3712 ICALL_EXPORT MonoBoolean
3713 proflog_icall_GetGCEvents (void)
3715 return ENABLED (PROFLOG_GC_EVENTS);
3718 ICALL_EXPORT void
3719 proflog_icall_SetGCEvents (MonoBoolean value)
3721 mono_coop_mutex_lock (&log_profiler.api_mutex);
3723 if (value)
3724 ENABLE (PROFLOG_GC_EVENTS);
3725 else
3726 DISABLE (PROFLOG_GC_EVENTS);
3728 mono_coop_mutex_unlock (&log_profiler.api_mutex);
3731 ICALL_EXPORT MonoBoolean
3732 proflog_icall_GetGCAllocationEvents (void)
3734 return ENABLED (PROFLOG_GC_ALLOCATION_EVENTS);
3737 ICALL_EXPORT void
3738 proflog_icall_SetGCAllocationEvents (MonoBoolean value)
3740 mono_coop_mutex_lock (&log_profiler.api_mutex);
3742 if (value) {
3743 ENABLE (PROFLOG_GC_ALLOCATION_EVENTS);
3744 mono_profiler_set_gc_allocation_callback (log_profiler.handle, gc_alloc);
3745 } else {
3746 DISABLE (PROFLOG_GC_ALLOCATION_EVENTS);
3747 mono_profiler_set_gc_allocation_callback (log_profiler.handle, NULL);
3750 mono_coop_mutex_unlock (&log_profiler.api_mutex);
3753 ICALL_EXPORT MonoBoolean
3754 proflog_icall_GetGCMoveEvents (void)
3756 return ENABLED (PROFLOG_GC_MOVE_EVENTS);
3759 ICALL_EXPORT void
3760 proflog_icall_SetGCMoveEvents (MonoBoolean value)
3762 mono_coop_mutex_lock (&log_profiler.api_mutex);
3764 if (value) {
3765 ENABLE (PROFLOG_GC_MOVE_EVENTS);
3766 mono_profiler_set_gc_moves_callback (log_profiler.handle, gc_moves);
3767 } else {
3768 DISABLE (PROFLOG_GC_MOVE_EVENTS);
3769 mono_profiler_set_gc_moves_callback (log_profiler.handle, NULL);
3772 mono_coop_mutex_unlock (&log_profiler.api_mutex);
3775 ICALL_EXPORT MonoBoolean
3776 proflog_icall_GetGCRootEvents (void)
3778 return ENABLED (PROFLOG_GC_ROOT_EVENTS);
3781 ICALL_EXPORT void
3782 proflog_icall_SetGCRootEvents (MonoBoolean value)
3784 mono_coop_mutex_lock (&log_profiler.api_mutex);
3786 if (value)
3787 ENABLE (PROFLOG_GC_ROOT_EVENTS);
3788 else
3789 DISABLE (PROFLOG_GC_ROOT_EVENTS);
3791 mono_coop_mutex_unlock (&log_profiler.api_mutex);
3794 ICALL_EXPORT MonoBoolean
3795 proflog_icall_GetGCHandleEvents (void)
3797 return ENABLED (PROFLOG_GC_HANDLE_EVENTS);
3800 ICALL_EXPORT void
3801 proflog_icall_SetGCHandleEvents (MonoBoolean value)
3803 mono_coop_mutex_lock (&log_profiler.api_mutex);
3805 if (value) {
3806 ENABLE (PROFLOG_GC_HANDLE_EVENTS);
3807 mono_profiler_set_gc_handle_created_callback (log_profiler.handle, gc_handle_created);
3808 mono_profiler_set_gc_handle_deleted_callback (log_profiler.handle, gc_handle_deleted);
3809 } else {
3810 DISABLE (PROFLOG_GC_HANDLE_EVENTS);
3811 mono_profiler_set_gc_handle_created_callback (log_profiler.handle, NULL);
3812 mono_profiler_set_gc_handle_deleted_callback (log_profiler.handle, NULL);
3815 mono_coop_mutex_unlock (&log_profiler.api_mutex);
3818 ICALL_EXPORT MonoBoolean
3819 proflog_icall_GetGCFinalizationEvents (void)
3821 return ENABLED (PROFLOG_GC_FINALIZATION_EVENTS);
3824 ICALL_EXPORT void
3825 proflog_icall_SetGCFinalizationEvents (MonoBoolean value)
3827 mono_coop_mutex_lock (&log_profiler.api_mutex);
3829 if (value) {
3830 ENABLE (PROFLOG_GC_FINALIZATION_EVENTS);
3831 mono_profiler_set_gc_finalizing_callback (log_profiler.handle, finalize_begin);
3832 mono_profiler_set_gc_finalizing_object_callback (log_profiler.handle, finalize_object_begin);
3833 mono_profiler_set_gc_finalized_object_callback (log_profiler.handle, finalize_object_end);
3834 } else {
3835 DISABLE (PROFLOG_GC_FINALIZATION_EVENTS);
3836 mono_profiler_set_gc_finalizing_callback (log_profiler.handle, NULL);
3837 mono_profiler_set_gc_finalizing_object_callback (log_profiler.handle, NULL);
3838 mono_profiler_set_gc_finalized_object_callback (log_profiler.handle, NULL);
3841 mono_coop_mutex_unlock (&log_profiler.api_mutex);
3844 ICALL_EXPORT MonoBoolean
3845 proflog_icall_GetCounterEvents (void)
3847 return ENABLED (PROFLOG_COUNTER_EVENTS);
3850 ICALL_EXPORT void
3851 proflog_icall_SetCounterEvents (MonoBoolean value)
3853 mono_coop_mutex_lock (&log_profiler.api_mutex);
3855 if (value)
3856 ENABLE (PROFLOG_COUNTER_EVENTS);
3857 else
3858 DISABLE (PROFLOG_COUNTER_EVENTS);
3860 mono_coop_mutex_unlock (&log_profiler.api_mutex);
3863 ICALL_EXPORT MonoBoolean
3864 proflog_icall_GetJitEvents (void)
3866 return ENABLED (PROFLOG_JIT_EVENTS);
3869 ICALL_EXPORT void
3870 proflog_icall_SetJitEvents (MonoBoolean value)
3872 mono_coop_mutex_lock (&log_profiler.api_mutex);
3874 if (value) {
3875 ENABLE (PROFLOG_JIT_EVENTS);
3876 mono_profiler_set_jit_code_buffer_callback (log_profiler.handle, code_buffer_new);
3877 } else {
3878 DISABLE (PROFLOG_JIT_EVENTS);
3879 mono_profiler_set_jit_code_buffer_callback (log_profiler.handle, NULL);
3882 mono_coop_mutex_unlock (&log_profiler.api_mutex);
3885 #ifdef __GNUC__
3886 #pragma GCC diagnostic pop
3887 #endif
3889 static void
3890 runtime_initialized (MonoProfiler *profiler)
3892 mono_atomic_store_i32 (&log_profiler.runtime_inited, 1);
3894 register_counter ("Sample events allocated", &sample_allocations_ctr);
3895 register_counter ("Log buffers allocated", &buffer_allocations_ctr);
3897 register_counter ("Event: Sync points", &sync_points_ctr);
3898 register_counter ("Event: AOT IDs", &aot_ids_ctr);
3899 register_counter ("Event: Heap objects", &heap_objects_ctr);
3900 register_counter ("Event: Heap starts", &heap_starts_ctr);
3901 register_counter ("Event: Heap ends", &heap_ends_ctr);
3902 register_counter ("Event: Heap roots", &heap_roots_ctr);
3903 register_counter ("Event: Heap root registers", &heap_root_registers_ctr);
3904 register_counter ("Event: Heap root unregisters", &heap_root_unregisters_ctr);
3905 register_counter ("Event: GC events", &gc_events_ctr);
3906 register_counter ("Event: GC resizes", &gc_resizes_ctr);
3907 register_counter ("Event: GC allocations", &gc_allocs_ctr);
3908 register_counter ("Event: GC moves", &gc_moves_ctr);
3909 register_counter ("Event: GC handle creations", &gc_handle_creations_ctr);
3910 register_counter ("Event: GC handle deletions", &gc_handle_deletions_ctr);
3911 register_counter ("Event: GC finalize starts", &finalize_begins_ctr);
3912 register_counter ("Event: GC finalize ends", &finalize_ends_ctr);
3913 register_counter ("Event: GC finalize object starts", &finalize_object_begins_ctr);
3914 register_counter ("Event: GC finalize object ends", &finalize_object_ends_ctr);
3915 register_counter ("Event: Image loads", &image_loads_ctr);
3916 register_counter ("Event: Image unloads", &image_unloads_ctr);
3917 register_counter ("Event: Assembly loads", &assembly_loads_ctr);
3918 register_counter ("Event: Assembly unloads", &assembly_unloads_ctr);
3919 register_counter ("Event: Class loads", &class_loads_ctr);
3920 register_counter ("Event: VTable loads", &vtable_loads_ctr);
3921 register_counter ("Event: Method entries", &method_entries_ctr);
3922 register_counter ("Event: Method exits", &method_exits_ctr);
3923 register_counter ("Event: Method exception leaves", &method_exception_exits_ctr);
3924 register_counter ("Event: Method JITs", &method_jits_ctr);
3925 register_counter ("Event: Code buffers", &code_buffers_ctr);
3926 register_counter ("Event: Exception throws", &exception_throws_ctr);
3927 register_counter ("Event: Exception clauses", &exception_clauses_ctr);
3928 register_counter ("Event: Monitor events", &monitor_events_ctr);
3929 register_counter ("Event: Thread starts", &thread_starts_ctr);
3930 register_counter ("Event: Thread ends", &thread_ends_ctr);
3931 register_counter ("Event: Thread names", &thread_names_ctr);
3932 register_counter ("Event: Domain loads", &domain_loads_ctr);
3933 register_counter ("Event: Domain unloads", &domain_unloads_ctr);
3934 register_counter ("Event: Domain names", &domain_names_ctr);
3935 register_counter ("Event: Context loads", &context_loads_ctr);
3936 register_counter ("Event: Context unloads", &context_unloads_ctr);
3937 register_counter ("Event: Sample binaries", &sample_ubins_ctr);
3938 register_counter ("Event: Sample symbols", &sample_usyms_ctr);
3939 register_counter ("Event: Sample hits", &sample_hits_ctr);
3940 register_counter ("Event: Counter descriptors", &counter_descriptors_ctr);
3941 register_counter ("Event: Counter samples", &counter_samples_ctr);
3942 register_counter ("Event: Performance counter descriptors", &perfcounter_descriptors_ctr);
3943 register_counter ("Event: Performance counter samples", &perfcounter_samples_ctr);
3945 counters_init ();
3947 mono_os_sem_init (&log_profiler.attach_threads_sem, 0);
3950 * We must start the helper thread before the writer thread. This is
3951 * because start_helper_thread () sets up the command port which is written
3952 * to the log header by the writer thread.
3954 start_helper_thread ();
3955 start_writer_thread ();
3956 start_dumper_thread ();
3959 * Wait for all the internal threads to be started. If we don't do this, we
3960 * might shut down before they finish initializing, which could lead to
3961 * various deadlocks when waiting for them to exit during shutdown.
3963 for (int i = 0; i < 3; i++)
3964 mono_os_sem_wait (&log_profiler.attach_threads_sem, MONO_SEM_FLAGS_NONE);
3966 mono_os_sem_destroy (&log_profiler.attach_threads_sem);
3968 mono_coop_mutex_init (&log_profiler.api_mutex);
3970 #define ADD_ICALL(NAME) \
3971 mono_add_internal_call_internal ("Mono.Profiler.Log.LogProfiler::" EGLIB_STRINGIFY (NAME), proflog_icall_ ## NAME);
3973 ADD_ICALL (GetMaxStackTraceFrames);
3974 ADD_ICALL (GetStackTraceFrames);
3975 ADD_ICALL (SetStackTraceFrames);
3976 ADD_ICALL (GetHeapshotMode);
3977 ADD_ICALL (SetHeapshotMode);
3978 ADD_ICALL (GetHeapshotMillisecondsFrequency);
3979 ADD_ICALL (SetHeapshotMillisecondsFrequency);
3980 ADD_ICALL (GetHeapshotCollectionsFrequency);
3981 ADD_ICALL (SetHeapshotCollectionsFrequency);
3982 ADD_ICALL (TriggerHeapshot);
3983 ADD_ICALL (GetCallDepth);
3984 ADD_ICALL (SetCallDepth);
3985 ADD_ICALL (GetSampleMode);
3986 ADD_ICALL (SetSampleMode);
3987 ADD_ICALL (GetExceptionEvents);
3988 ADD_ICALL (SetExceptionEvents);
3989 ADD_ICALL (GetMonitorEvents);
3990 ADD_ICALL (SetMonitorEvents);
3991 ADD_ICALL (GetGCEvents);
3992 ADD_ICALL (SetGCEvents);
3993 ADD_ICALL (GetGCAllocationEvents);
3994 ADD_ICALL (SetGCAllocationEvents);
3995 ADD_ICALL (GetGCMoveEvents);
3996 ADD_ICALL (SetGCMoveEvents);
3997 ADD_ICALL (GetGCRootEvents);
3998 ADD_ICALL (SetGCRootEvents);
3999 ADD_ICALL (GetGCHandleEvents);
4000 ADD_ICALL (SetGCHandleEvents);
4001 ADD_ICALL (GetGCFinalizationEvents);
4002 ADD_ICALL (SetGCFinalizationEvents);
4003 ADD_ICALL (GetCounterEvents);
4004 ADD_ICALL (SetCounterEvents);
4005 ADD_ICALL (GetJitEvents);
4006 ADD_ICALL (SetJitEvents);
4008 #undef ADD_ICALL
4011 static void
4012 create_profiler (const char *args, const char *filename, GPtrArray *filters)
4014 char *nf;
4016 log_profiler.args = pstrdup (args);
4017 log_profiler.command_port = log_config.command_port;
4019 //If filename begin with +, append the pid at the end
4020 if (filename && *filename == '+')
4021 filename = g_strdup_printf ("%s.%d", filename + 1, getpid ());
4023 if (!filename) {
4024 if (log_config.do_report)
4025 filename = "|mprof-report -";
4026 else
4027 filename = "output.mlpd";
4028 nf = (char*)filename;
4029 } else {
4030 nf = new_filename (filename);
4031 if (log_config.do_report) {
4032 int s = strlen (nf) + 32;
4033 char *p = (char *) g_malloc (s);
4034 snprintf (p, s, "|mprof-report '--out=%s' -", nf);
4035 g_free (nf);
4036 nf = p;
4039 if (*nf == '|') {
4040 log_profiler.file = popen (nf + 1, "w");
4041 log_profiler.pipe_output = 1;
4042 } else if (*nf == '#') {
4043 int fd = strtol (nf + 1, NULL, 10);
4044 log_profiler.file = fdopen (fd, "a");
4045 } else
4046 log_profiler.file = fopen (nf, "wb");
4048 if (!log_profiler.file) {
4049 mono_profiler_printf_err ("Could not create log profiler output file '%s': %s", nf, g_strerror (errno));
4050 exit (1);
4053 #if defined (HAVE_SYS_ZLIB)
4054 if (log_config.use_zip)
4055 log_profiler.gzfile = gzdopen (fileno (log_profiler.file), "wb");
4056 #endif
4058 // FIXME: We should free this stuff too.
4059 mono_lock_free_allocator_init_size_class (&log_profiler.sample_size_class, SAMPLE_SLOT_SIZE (log_config.num_frames), SAMPLE_BLOCK_SIZE);
4060 mono_lock_free_allocator_init_allocator (&log_profiler.sample_allocator, &log_profiler.sample_size_class, MONO_MEM_ACCOUNT_PROFILER);
4062 mono_lock_free_queue_init (&log_profiler.sample_reuse_queue);
4064 // FIXME: We should free this stuff too.
4065 mono_lock_free_allocator_init_size_class (&log_profiler.writer_entry_size_class, sizeof (WriterQueueEntry), WRITER_ENTRY_BLOCK_SIZE);
4066 mono_lock_free_allocator_init_allocator (&log_profiler.writer_entry_allocator, &log_profiler.writer_entry_size_class, MONO_MEM_ACCOUNT_PROFILER);
4068 mono_os_event_init (&log_profiler.helper_thread_exited, FALSE);
4070 mono_os_event_init (&log_profiler.writer_thread_exited, FALSE);
4071 mono_lock_free_queue_init (&log_profiler.writer_queue);
4072 mono_os_sem_init (&log_profiler.writer_queue_sem, 0);
4074 mono_os_event_init (&log_profiler.dumper_thread_exited, FALSE);
4075 mono_lock_free_queue_init (&log_profiler.dumper_queue);
4076 mono_os_sem_init (&log_profiler.dumper_queue_sem, 0);
4078 mono_os_mutex_init (&log_profiler.method_table_mutex);
4079 log_profiler.method_table = mono_conc_hashtable_new (NULL, NULL);
4081 log_profiler.startup_time = current_time ();
4084 MONO_API void
4085 mono_profiler_init_log (const char *desc);
4087 void
4088 mono_profiler_init_log (const char *desc)
4091 * If you hit this assert while increasing MAX_FRAMES, you need to increase
4092 * SAMPLE_BLOCK_SIZE as well.
4094 g_assert (SAMPLE_SLOT_SIZE (MAX_FRAMES) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (SAMPLE_BLOCK_SIZE));
4095 g_assert (sizeof (WriterQueueEntry) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (WRITER_ENTRY_BLOCK_SIZE));
4097 GPtrArray *filters = NULL;
4099 proflog_parse_args (&log_config, desc [3] == ':' ? desc + 4 : "");
4101 MonoProfilerHandle handle = log_profiler.handle = mono_profiler_create (&log_profiler);
4103 if (log_config.enter_leave)
4104 mono_profiler_set_call_instrumentation_filter_callback (handle, method_filter);
4107 * If the runtime was invoked for the purpose of AOT compilation only, the
4108 * only thing we want to do is install the call instrumentation filter.
4110 if (mono_jit_aot_compiling ())
4111 goto done;
4113 init_time ();
4115 create_profiler (desc, log_config.output_filename, filters);
4117 mono_lls_init (&log_profiler.profiler_thread_list, NULL);
4120 * Required callbacks. These are either necessary for the profiler itself
4121 * to function, or provide metadata that's needed if other events (e.g.
4122 * allocations, exceptions) are dynamically enabled/disabled.
4125 mono_profiler_set_runtime_shutdown_begin_callback (handle, log_early_shutdown);
4126 mono_profiler_set_runtime_shutdown_end_callback (handle, log_shutdown);
4127 mono_profiler_set_runtime_initialized_callback (handle, runtime_initialized);
4129 mono_profiler_set_gc_event_callback (handle, gc_event);
4131 mono_profiler_set_thread_started_callback (handle, thread_start);
4132 mono_profiler_set_thread_exited_callback (handle, thread_end);
4133 mono_profiler_set_thread_name_callback (handle, thread_name);
4135 mono_profiler_set_domain_loaded_callback (handle, domain_loaded);
4136 mono_profiler_set_domain_unloading_callback (handle, domain_unloaded);
4137 mono_profiler_set_domain_name_callback (handle, domain_name);
4139 mono_profiler_set_context_loaded_callback (handle, context_loaded);
4140 mono_profiler_set_context_unloaded_callback (handle, context_unloaded);
4142 mono_profiler_set_assembly_loaded_callback (handle, assembly_loaded);
4143 mono_profiler_set_assembly_unloading_callback (handle, assembly_unloaded);
4145 mono_profiler_set_image_loaded_callback (handle, image_loaded);
4146 mono_profiler_set_image_unloading_callback (handle, image_unloaded);
4148 mono_profiler_set_class_loaded_callback (handle, class_loaded);
4150 mono_profiler_set_vtable_loaded_callback (handle, vtable_loaded);
4152 mono_profiler_set_jit_done_callback (handle, method_jitted);
4154 mono_profiler_set_gc_root_register_callback (handle, gc_root_register);
4155 mono_profiler_set_gc_root_unregister_callback (handle, gc_root_deregister);
4157 if (ENABLED (PROFLOG_EXCEPTION_EVENTS)) {
4158 mono_profiler_set_exception_throw_callback (handle, throw_exc);
4159 mono_profiler_set_exception_clause_callback (handle, clause_exc);
4162 if (ENABLED (PROFLOG_MONITOR_EVENTS)) {
4163 mono_profiler_set_monitor_contention_callback (handle, monitor_contention);
4164 mono_profiler_set_monitor_acquired_callback (handle, monitor_acquired);
4165 mono_profiler_set_monitor_failed_callback (handle, monitor_failed);
4168 if (ENABLED (PROFLOG_GC_EVENTS))
4169 mono_profiler_set_gc_resize_callback (handle, gc_resize);
4171 if (ENABLED (PROFLOG_GC_ALLOCATION_EVENTS))
4172 mono_profiler_set_gc_allocation_callback (handle, gc_alloc);
4174 if (ENABLED (PROFLOG_GC_MOVE_EVENTS))
4175 mono_profiler_set_gc_moves_callback (handle, gc_moves);
4177 if (ENABLED (PROFLOG_GC_HANDLE_EVENTS)) {
4178 mono_profiler_set_gc_handle_created_callback (handle, gc_handle_created);
4179 mono_profiler_set_gc_handle_deleted_callback (handle, gc_handle_deleted);
4182 if (ENABLED (PROFLOG_GC_FINALIZATION_EVENTS)) {
4183 mono_profiler_set_gc_finalizing_callback (handle, finalize_begin);
4184 mono_profiler_set_gc_finalized_callback (handle, finalize_end);
4185 mono_profiler_set_gc_finalizing_object_callback (handle, finalize_object_begin);
4186 mono_profiler_set_gc_finalized_object_callback (handle, finalize_object_end);
4189 //On Demand heapshot uses the finalizer thread to force a collection and thus a heapshot
4190 mono_profiler_set_gc_finalized_callback (handle, finalize_end);
4192 if (ENABLED (PROFLOG_SAMPLE_EVENTS))
4193 mono_profiler_set_sample_hit_callback (handle, mono_sample_hit);
4195 if (ENABLED (PROFLOG_JIT_EVENTS))
4196 mono_profiler_set_jit_code_buffer_callback (handle, code_buffer_new);
4198 if (log_config.enter_leave) {
4199 mono_profiler_set_method_enter_callback (handle, method_enter);
4200 mono_profiler_set_method_leave_callback (handle, method_leave);
4201 mono_profiler_set_method_tail_call_callback (handle, tailcall);
4202 mono_profiler_set_method_exception_leave_callback (handle, method_exc_leave);
4205 mono_profiler_enable_allocations ();
4206 mono_profiler_enable_clauses ();
4207 mono_profiler_enable_sampling (handle);
4210 * If no sample option was given by the user, this just leaves the sampling
4211 * thread in idle mode. We do this even if no option was given so that we
4212 * can warn if another profiler controls sampling parameters.
4214 if (!mono_profiler_set_sample_mode (handle, log_config.sampling_mode, log_config.sample_freq))
4215 mono_profiler_printf_err ("Another profiler controls sampling parameters; the log profiler will not be able to modify them.");
4217 done: