[profiler] Implement call context introspection for enter/leave events.
[mono-project.git] / mono / profiler / log.c
blob90f7309884796a55ac57dfb19aa89a76a192bffd
1 /*
2 * log.c: mono log profiler
4 * Authors:
5 * Paolo Molaro (lupus@ximian.com)
6 * Alex Rønne Petersen (alexrp@xamarin.com)
8 * Copyright 2010 Novell, Inc (http://www.novell.com)
9 * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
10 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
13 #include <config.h>
14 #include <mono/metadata/assembly.h>
15 #include <mono/metadata/class-internals.h>
16 #include <mono/metadata/debug-helpers.h>
17 #include <mono/metadata/loader.h>
18 #include <mono/metadata/metadata-internals.h>
19 #include <mono/metadata/mono-config.h>
20 #include <mono/metadata/mono-gc.h>
21 #include <mono/metadata/mono-perfcounters.h>
22 #include <mono/metadata/tabledefs.h>
23 #include <mono/utils/atomic.h>
24 #include <mono/utils/hazard-pointer.h>
25 #include <mono/utils/lock-free-alloc.h>
26 #include <mono/utils/lock-free-queue.h>
27 #include <mono/utils/mono-conc-hashtable.h>
28 #include <mono/utils/mono-coop-mutex.h>
29 #include <mono/utils/mono-counters.h>
30 #include <mono/utils/mono-logger-internals.h>
31 #include <mono/utils/mono-linked-list-set.h>
32 #include <mono/utils/mono-membar.h>
33 #include <mono/utils/mono-mmap.h>
34 #include <mono/utils/mono-os-mutex.h>
35 #include <mono/utils/mono-os-semaphore.h>
36 #include <mono/utils/mono-threads.h>
37 #include <mono/utils/mono-threads-api.h>
38 #include "log.h"
40 #ifdef HAVE_DLFCN_H
41 #include <dlfcn.h>
42 #endif
43 #include <fcntl.h>
44 #ifdef HAVE_LINK_H
45 #include <link.h>
46 #endif
47 #ifdef HAVE_UNISTD_H
48 #include <unistd.h>
49 #endif
50 #if defined(__APPLE__)
51 #include <mach/mach_time.h>
52 #endif
53 #include <netinet/in.h>
54 #ifdef HAVE_SYS_MMAN_H
55 #include <sys/mman.h>
56 #endif
57 #include <sys/socket.h>
58 #if defined (HAVE_SYS_ZLIB)
59 #include <zlib.h>
60 #endif
62 // Statistics for internal profiler data structures.
63 static gint32 sample_allocations_ctr,
64 buffer_allocations_ctr;
66 // Statistics for profiler events.
67 static gint32 sync_points_ctr,
68 heap_objects_ctr,
69 heap_starts_ctr,
70 heap_ends_ctr,
71 heap_roots_ctr,
72 gc_events_ctr,
73 gc_resizes_ctr,
74 gc_allocs_ctr,
75 gc_moves_ctr,
76 gc_handle_creations_ctr,
77 gc_handle_deletions_ctr,
78 finalize_begins_ctr,
79 finalize_ends_ctr,
80 finalize_object_begins_ctr,
81 finalize_object_ends_ctr,
82 image_loads_ctr,
83 image_unloads_ctr,
84 assembly_loads_ctr,
85 assembly_unloads_ctr,
86 class_loads_ctr,
87 class_unloads_ctr,
88 method_entries_ctr,
89 method_exits_ctr,
90 method_exception_exits_ctr,
91 method_jits_ctr,
92 code_buffers_ctr,
93 exception_throws_ctr,
94 exception_clauses_ctr,
95 monitor_events_ctr,
96 thread_starts_ctr,
97 thread_ends_ctr,
98 thread_names_ctr,
99 domain_loads_ctr,
100 domain_unloads_ctr,
101 domain_names_ctr,
102 context_loads_ctr,
103 context_unloads_ctr,
104 sample_ubins_ctr,
105 sample_usyms_ctr,
106 sample_hits_ctr,
107 counter_descriptors_ctr,
108 counter_samples_ctr,
109 perfcounter_descriptors_ctr,
110 perfcounter_samples_ctr,
111 coverage_methods_ctr,
112 coverage_statements_ctr,
113 coverage_classes_ctr,
114 coverage_assemblies_ctr;
116 // Pending data to be written to the log, for a single thread.
117 // Threads periodically flush their own LogBuffers by calling safe_send
118 typedef struct _LogBuffer LogBuffer;
119 struct _LogBuffer {
120 // Next (older) LogBuffer in processing queue
121 LogBuffer *next;
123 uint64_t time_base;
124 uint64_t last_time;
125 uintptr_t ptr_base;
126 uintptr_t method_base;
127 uintptr_t last_method;
128 uintptr_t obj_base;
129 uintptr_t thread_id;
131 // Bytes allocated for this LogBuffer
132 int size;
134 // Start of currently unused space in buffer
135 unsigned char* cursor;
137 // Pointer to start-of-structure-plus-size (for convenience)
138 unsigned char* buf_end;
140 // Start of data in buffer. Contents follow "buffer format" described above.
141 unsigned char buf [1];
144 typedef struct {
145 MonoLinkedListSetNode node;
147 // Was this thread added to the LLS?
148 gboolean attached;
150 // The current log buffer for this thread.
151 LogBuffer *buffer;
153 // Methods referenced by events in `buffer`, see `MethodInfo`.
154 GPtrArray *methods;
156 // Current call depth for enter/leave events.
157 int call_depth;
159 // Indicates whether this thread is currently writing to its `buffer`.
160 gboolean busy;
162 // Has this thread written a thread end event to `buffer`?
163 gboolean ended;
165 // Stored in `buffer_lock_state` to take the exclusive lock.
166 int small_id;
167 } MonoProfilerThread;
169 // Do not use these TLS macros directly unless you know what you're doing.
171 #ifdef HOST_WIN32
173 #define PROF_TLS_SET(VAL) (TlsSetValue (profiler_tls, (VAL)))
174 #define PROF_TLS_GET() ((MonoProfilerThread *) TlsGetValue (profiler_tls))
175 #define PROF_TLS_INIT() (profiler_tls = TlsAlloc ())
176 #define PROF_TLS_FREE() (TlsFree (profiler_tls))
178 static DWORD profiler_tls;
180 #elif HAVE_KW_THREAD
182 #define PROF_TLS_SET(VAL) (profiler_tls = (VAL))
183 #define PROF_TLS_GET() (profiler_tls)
184 #define PROF_TLS_INIT()
185 #define PROF_TLS_FREE()
187 static __thread MonoProfilerThread *profiler_tls;
189 #else
191 #define PROF_TLS_SET(VAL) (pthread_setspecific (profiler_tls, (VAL)))
192 #define PROF_TLS_GET() ((MonoProfilerThread *) pthread_getspecific (profiler_tls))
193 #define PROF_TLS_INIT() (pthread_key_create (&profiler_tls, NULL))
194 #define PROF_TLS_FREE() (pthread_key_delete (profiler_tls))
196 static pthread_key_t profiler_tls;
198 #endif
200 static uintptr_t
201 thread_id (void)
203 return (uintptr_t) mono_native_thread_id_get ();
206 static uintptr_t
207 process_id (void)
209 #ifdef HOST_WIN32
210 return (uintptr_t) GetCurrentProcessId ();
211 #else
212 return (uintptr_t) getpid ();
213 #endif
216 #define ENABLED(EVT) (!!(log_config.effective_mask & (EVT)))
217 #define ENABLE(EVT) do { log_config.effective_mask |= (EVT); } while (0)
218 #define DISABLE(EVT) do { log_config.effective_mask &= ~(EVT); } while (0)
221 * These macros should be used when writing an event to a log buffer. They
222 * take care of a bunch of stuff that can be repetitive and error-prone, such
223 * as attaching the current thread, acquiring/releasing the buffer lock,
224 * incrementing the event counter, expanding the log buffer, etc. They also
225 * create a scope so that it's harder to leak the LogBuffer pointer, which can
226 * be problematic as the pointer is unstable when the buffer lock isn't
227 * acquired.
229 * If the calling thread is already attached, these macros will not alter its
230 * attach mode (i.e. whether it's added to the LLS). If the thread is not
231 * attached, init_thread () will be called with add_to_lls = TRUE.
234 #define ENTER_LOG(COUNTER, BUFFER, SIZE) \
235 do { \
236 MonoProfilerThread *thread__ = get_thread (); \
237 if (thread__->attached) \
238 buffer_lock (); \
239 g_assert (!thread__->busy && "Why are we trying to write a new event while already writing one?"); \
240 thread__->busy = TRUE; \
241 InterlockedIncrement ((COUNTER)); \
242 LogBuffer *BUFFER = ensure_logbuf_unsafe (thread__, (SIZE))
244 #define EXIT_LOG_EXPLICIT(SEND) \
245 thread__->busy = FALSE; \
246 if ((SEND)) \
247 send_log_unsafe (TRUE); \
248 if (thread__->attached) \
249 buffer_unlock (); \
250 } while (0)
252 // Pass these to EXIT_LOG_EXPLICIT () for easier reading.
253 #define DO_SEND TRUE
254 #define NO_SEND FALSE
256 #define EXIT_LOG EXIT_LOG_EXPLICIT (DO_SEND)
258 typedef struct _BinaryObject BinaryObject;
259 struct _BinaryObject {
260 BinaryObject *next;
261 void *addr;
262 char *name;
265 typedef struct MonoCounterAgent {
266 MonoCounter *counter;
267 // MonoCounterAgent specific data :
268 void *value;
269 size_t value_size;
270 guint32 index;
271 gboolean emitted;
272 struct MonoCounterAgent *next;
273 } MonoCounterAgent;
275 typedef struct _PerfCounterAgent PerfCounterAgent;
276 struct _PerfCounterAgent {
277 PerfCounterAgent *next;
278 guint32 index;
279 char *category_name;
280 char *name;
281 gint64 value;
282 gboolean emitted;
283 gboolean updated;
284 gboolean deleted;
287 struct _MonoProfiler {
288 MonoProfilerHandle handle;
290 FILE* file;
291 #if defined (HAVE_SYS_ZLIB)
292 gzFile gzfile;
293 #endif
295 char *args;
296 uint64_t startup_time;
297 int timer_overhead;
299 #ifdef __APPLE__
300 mach_timebase_info_data_t timebase_info;
301 #elif defined (HOST_WIN32)
302 LARGE_INTEGER pcounter_freq;
303 #endif
305 int pipe_output;
306 int command_port;
307 int server_socket;
308 int pipes [2];
310 MonoLinkedListSet profiler_thread_list;
311 volatile gint32 buffer_lock_state;
312 volatile gint32 buffer_lock_exclusive_intent;
314 volatile gint32 runtime_inited;
315 volatile gint32 in_shutdown;
317 MonoNativeThreadId helper_thread;
319 MonoNativeThreadId writer_thread;
320 volatile gint32 run_writer_thread;
321 MonoLockFreeQueue writer_queue;
322 MonoSemType writer_queue_sem;
324 MonoLockFreeAllocSizeClass writer_entry_size_class;
325 MonoLockFreeAllocator writer_entry_allocator;
327 MonoConcurrentHashTable *method_table;
328 mono_mutex_t method_table_mutex;
330 MonoNativeThreadId dumper_thread;
331 volatile gint32 run_dumper_thread;
332 MonoLockFreeQueue dumper_queue;
333 MonoSemType dumper_queue_sem;
335 MonoLockFreeAllocSizeClass sample_size_class;
336 MonoLockFreeAllocator sample_allocator;
337 MonoLockFreeQueue sample_reuse_queue;
339 BinaryObject *binary_objects;
341 volatile gint32 heapshot_requested;
342 guint64 gc_count;
343 guint64 last_hs_time;
344 gboolean do_heap_walk;
346 mono_mutex_t counters_mutex;
347 MonoCounterAgent *counters;
348 PerfCounterAgent *perfcounters;
349 guint32 counters_index;
351 mono_mutex_t coverage_mutex;
352 GPtrArray *coverage_data;
354 GPtrArray *coverage_filters;
355 MonoConcurrentHashTable *coverage_filtered_classes;
356 MonoConcurrentHashTable *coverage_suppressed_assemblies;
358 MonoConcurrentHashTable *coverage_methods;
359 MonoConcurrentHashTable *coverage_assemblies;
360 MonoConcurrentHashTable *coverage_classes;
362 MonoConcurrentHashTable *coverage_image_to_methods;
364 guint32 coverage_previous_offset;
365 guint32 coverage_method_id;
367 MonoCoopMutex api_mutex;
370 static ProfilerConfig log_config;
371 static struct _MonoProfiler log_profiler;
373 typedef struct {
374 MonoLockFreeQueueNode node;
375 GPtrArray *methods;
376 LogBuffer *buffer;
377 } WriterQueueEntry;
379 #define WRITER_ENTRY_BLOCK_SIZE (mono_pagesize ())
381 typedef struct {
382 MonoMethod *method;
383 MonoJitInfo *ji;
384 uint64_t time;
385 } MethodInfo;
387 #define TICKS_PER_SEC 1000000000LL
389 static uint64_t
390 current_time (void)
392 #ifdef __APPLE__
393 uint64_t time = mach_absolute_time ();
395 time *= log_profiler.timebase_info.numer;
396 time /= log_profiler.timebase_info.denom;
398 return time;
399 #elif defined (HOST_WIN32)
400 LARGE_INTEGER value;
402 QueryPerformanceCounter (&value);
404 return value.QuadPart * TICKS_PER_SEC / log_profiler.pcounter_freq.QuadPart;
405 #elif defined (CLOCK_MONOTONIC)
406 struct timespec tspec;
408 clock_gettime (CLOCK_MONOTONIC, &tspec);
410 return ((uint64_t) tspec.tv_sec * TICKS_PER_SEC + tspec.tv_nsec);
411 #else
412 struct timeval tv;
414 gettimeofday (&tv, NULL);
416 return ((uint64_t) tv.tv_sec * TICKS_PER_SEC + tv.tv_usec * 1000);
417 #endif
420 static void
421 init_time (void)
423 #ifdef __APPLE__
424 mach_timebase_info (&log_profiler.timebase_info);
425 #elif defined (HOST_WIN32)
426 QueryPerformanceFrequency (&log_profiler.pcounter_freq);
427 #endif
429 uint64_t time_start = current_time ();
431 for (int i = 0; i < 256; ++i)
432 current_time ();
434 uint64_t time_end = current_time ();
436 log_profiler.timer_overhead = (time_end - time_start) / 256;
439 static char*
440 pstrdup (const char *s)
442 int len = strlen (s) + 1;
443 char *p = (char *) g_malloc (len);
444 memcpy (p, s, len);
445 return p;
448 #define BUFFER_SIZE (4096 * 16)
450 /* Worst-case size in bytes of a 64-bit value encoded with LEB128. */
451 #define LEB128_SIZE 10
453 /* Size of a value encoded as a single byte. */
454 #undef BYTE_SIZE // mach/i386/vm_param.h on OS X defines this to 8, but it isn't used for anything.
455 #define BYTE_SIZE 1
457 /* Size in bytes of the event prefix (ID + time). */
458 #define EVENT_SIZE (BYTE_SIZE + LEB128_SIZE)
460 static void *
461 alloc_buffer (int size)
463 return mono_valloc (NULL, size, MONO_MMAP_READ | MONO_MMAP_WRITE | MONO_MMAP_ANON | MONO_MMAP_PRIVATE, MONO_MEM_ACCOUNT_PROFILER);
466 static void
467 free_buffer (void *buf, int size)
469 mono_vfree (buf, size, MONO_MEM_ACCOUNT_PROFILER);
472 static LogBuffer*
473 create_buffer (uintptr_t tid, int bytes)
475 LogBuffer* buf = (LogBuffer *) alloc_buffer (MAX (BUFFER_SIZE, bytes));
477 InterlockedIncrement (&buffer_allocations_ctr);
479 buf->size = BUFFER_SIZE;
480 buf->time_base = current_time ();
481 buf->last_time = buf->time_base;
482 buf->buf_end = (unsigned char *) buf + buf->size;
483 buf->cursor = buf->buf;
484 buf->thread_id = tid;
486 return buf;
490 * Must be called with the reader lock held if thread is the current thread, or
491 * the exclusive lock if thread is a different thread. However, if thread is
492 * the current thread, and init_thread () was called with add_to_lls = FALSE,
493 * then no locking is necessary.
495 static void
496 init_buffer_state (MonoProfilerThread *thread)
498 thread->buffer = create_buffer (thread->node.key, 0);
499 thread->methods = NULL;
502 static void
503 clear_hazard_pointers (MonoThreadHazardPointers *hp)
505 mono_hazard_pointer_clear (hp, 0);
506 mono_hazard_pointer_clear (hp, 1);
507 mono_hazard_pointer_clear (hp, 2);
510 static MonoProfilerThread *
511 init_thread (gboolean add_to_lls)
513 MonoProfilerThread *thread = PROF_TLS_GET ();
516 * Sometimes we may try to initialize a thread twice. One example is the
517 * main thread: We initialize it when setting up the profiler, but we will
518 * also get a thread_start () callback for it. Another example is when
519 * attaching new threads to the runtime: We may get a gc_alloc () callback
520 * for that thread's thread object (where we initialize it), soon followed
521 * by a thread_start () callback.
523 * These cases are harmless anyhow. Just return if we've already done the
524 * initialization work.
526 if (thread)
527 return thread;
529 thread = g_malloc (sizeof (MonoProfilerThread));
530 thread->node.key = thread_id ();
531 thread->attached = add_to_lls;
532 thread->call_depth = 0;
533 thread->busy = 0;
534 thread->ended = FALSE;
536 init_buffer_state (thread);
538 thread->small_id = mono_thread_info_register_small_id ();
541 * Some internal profiler threads don't need to be cleaned up
542 * by the main thread on shutdown.
544 if (add_to_lls) {
545 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
546 g_assert (mono_lls_insert (&log_profiler.profiler_thread_list, hp, &thread->node) && "Why can't we insert the thread in the LLS?");
547 clear_hazard_pointers (hp);
550 PROF_TLS_SET (thread);
552 return thread;
555 // Only valid if init_thread () was called with add_to_lls = FALSE.
556 static void
557 deinit_thread (MonoProfilerThread *thread)
559 g_assert (!thread->attached && "Why are we manually freeing an attached thread?");
561 g_free (thread);
562 PROF_TLS_SET (NULL);
565 static MonoProfilerThread *
566 get_thread (void)
568 return init_thread (TRUE);
571 // Only valid if init_thread () was called with add_to_lls = FALSE.
572 static LogBuffer *
573 ensure_logbuf_unsafe (MonoProfilerThread *thread, int bytes)
575 LogBuffer *old = thread->buffer;
577 if (old->cursor + bytes < old->buf_end)
578 return old;
580 LogBuffer *new_ = create_buffer (thread->node.key, bytes);
581 new_->next = old;
582 thread->buffer = new_;
584 return new_;
588 * This is a reader/writer spin lock of sorts used to protect log buffers.
589 * When a thread modifies its own log buffer, it increments the reader
590 * count. When a thread wants to access log buffers of other threads, it
591 * takes the exclusive lock.
593 * `buffer_lock_state` holds the reader count in its lower 16 bits, and
594 * the small ID of the thread currently holding the exclusive (writer)
595 * lock in its upper 16 bits. Both can be zero. It's important that the
596 * whole lock state is a single word that can be read/written atomically
597 * to avoid race conditions where there could end up being readers while
598 * the writer lock is held.
600 * The lock is writer-biased. When a thread wants to take the exclusive
601 * lock, it increments `buffer_lock_exclusive_intent` which will make new
602 * readers spin until it's back to zero, then takes the exclusive lock
603 * once the reader count has reached zero. After releasing the exclusive
604 * lock, it decrements `buffer_lock_exclusive_intent`, which, when it
605 * reaches zero again, allows readers to increment the reader count.
607 * The writer bias is necessary because we take the exclusive lock in
608 * `gc_event ()` during STW. If the writer bias was not there, and a
609 * program had a large number of threads, STW-induced pauses could be
610 * significantly longer than they have to be. Also, we emit periodic
611 * sync points from the helper thread, which requires taking the
612 * exclusive lock, and we need those to arrive with a reasonably
613 * consistent frequency so that readers don't have to queue up too many
614 * events between sync points.
616 * The lock does not support recursion.
619 static void
620 buffer_lock (void)
623 * If the thread holding the exclusive lock tries to modify the
624 * reader count, just make it a no-op. This way, we also avoid
625 * invoking the GC safe point macros below, which could break if
626 * done from a thread that is currently the initiator of STW.
628 * In other words, we rely on the fact that the GC thread takes
629 * the exclusive lock in the gc_event () callback when the world
630 * is about to stop.
632 if (InterlockedRead (&log_profiler.buffer_lock_state) != get_thread ()->small_id << 16) {
633 MONO_ENTER_GC_SAFE;
635 gint32 old, new_;
637 do {
638 restart:
639 // Hold off if a thread wants to take the exclusive lock.
640 while (InterlockedRead (&log_profiler.buffer_lock_exclusive_intent))
641 mono_thread_info_yield ();
643 old = InterlockedRead (&log_profiler.buffer_lock_state);
645 // Is a thread holding the exclusive lock?
646 if (old >> 16) {
647 mono_thread_info_yield ();
648 goto restart;
651 new_ = old + 1;
652 } while (InterlockedCompareExchange (&log_profiler.buffer_lock_state, new_, old) != old);
654 MONO_EXIT_GC_SAFE;
657 mono_memory_barrier ();
660 static void
661 buffer_unlock (void)
663 mono_memory_barrier ();
665 gint32 state = InterlockedRead (&log_profiler.buffer_lock_state);
667 // See the comment in buffer_lock ().
668 if (state == PROF_TLS_GET ()->small_id << 16)
669 return;
671 g_assert (state && "Why are we decrementing a zero reader count?");
672 g_assert (!(state >> 16) && "Why is the exclusive lock held?");
674 InterlockedDecrement (&log_profiler.buffer_lock_state);
677 static void
678 buffer_lock_excl (void)
680 gint32 new_ = get_thread ()->small_id << 16;
682 g_assert (InterlockedRead (&log_profiler.buffer_lock_state) != new_ && "Why are we taking the exclusive lock twice?");
684 InterlockedIncrement (&log_profiler.buffer_lock_exclusive_intent);
686 MONO_ENTER_GC_SAFE;
688 while (InterlockedCompareExchange (&log_profiler.buffer_lock_state, new_, 0))
689 mono_thread_info_yield ();
691 MONO_EXIT_GC_SAFE;
693 mono_memory_barrier ();
696 static void
697 buffer_unlock_excl (void)
699 mono_memory_barrier ();
701 gint32 state = InterlockedRead (&log_profiler.buffer_lock_state);
702 gint32 excl = state >> 16;
704 g_assert (excl && "Why is the exclusive lock not held?");
705 g_assert (excl == PROF_TLS_GET ()->small_id && "Why does another thread hold the exclusive lock?");
706 g_assert (!(state & 0xFFFF) && "Why are there readers when the exclusive lock is held?");
708 InterlockedWrite (&log_profiler.buffer_lock_state, 0);
709 InterlockedDecrement (&log_profiler.buffer_lock_exclusive_intent);
712 static void
713 encode_uleb128 (uint64_t value, uint8_t *buf, uint8_t **endbuf)
715 uint8_t *p = buf;
717 do {
718 uint8_t b = value & 0x7f;
719 value >>= 7;
721 if (value != 0) /* more bytes to come */
722 b |= 0x80;
724 *p ++ = b;
725 } while (value);
727 *endbuf = p;
730 static void
731 encode_sleb128 (intptr_t value, uint8_t *buf, uint8_t **endbuf)
733 int more = 1;
734 int negative = (value < 0);
735 unsigned int size = sizeof (intptr_t) * 8;
736 uint8_t byte;
737 uint8_t *p = buf;
739 while (more) {
740 byte = value & 0x7f;
741 value >>= 7;
743 /* the following is unnecessary if the
744 * implementation of >>= uses an arithmetic rather
745 * than logical shift for a signed left operand
747 if (negative)
748 /* sign extend */
749 value |= - ((intptr_t) 1 <<(size - 7));
751 /* sign bit of byte is second high order bit (0x40) */
752 if ((value == 0 && !(byte & 0x40)) ||
753 (value == -1 && (byte & 0x40)))
754 more = 0;
755 else
756 byte |= 0x80;
758 *p ++= byte;
761 *endbuf = p;
764 static void
765 emit_byte (LogBuffer *logbuffer, int value)
767 logbuffer->cursor [0] = value;
768 logbuffer->cursor++;
770 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
773 static void
774 emit_value (LogBuffer *logbuffer, int value)
776 encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
778 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
781 static void
782 emit_time (LogBuffer *logbuffer, uint64_t value)
784 uint64_t tdiff = value - logbuffer->last_time;
785 encode_uleb128 (tdiff, logbuffer->cursor, &logbuffer->cursor);
786 logbuffer->last_time = value;
788 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
791 static void
792 emit_event_time (LogBuffer *logbuffer, int event, uint64_t time)
794 emit_byte (logbuffer, event);
795 emit_time (logbuffer, time);
798 static void
799 emit_event (LogBuffer *logbuffer, int event)
801 emit_event_time (logbuffer, event, current_time ());
804 static void
805 emit_svalue (LogBuffer *logbuffer, int64_t value)
807 encode_sleb128 (value, logbuffer->cursor, &logbuffer->cursor);
809 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
812 static void
813 emit_uvalue (LogBuffer *logbuffer, uint64_t value)
815 encode_uleb128 (value, logbuffer->cursor, &logbuffer->cursor);
817 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
820 static void
821 emit_ptr (LogBuffer *logbuffer, const void *ptr)
823 if (!logbuffer->ptr_base)
824 logbuffer->ptr_base = (uintptr_t) ptr;
826 emit_svalue (logbuffer, (intptr_t) ptr - logbuffer->ptr_base);
828 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
831 static void
832 emit_method_inner (LogBuffer *logbuffer, void *method)
834 if (!logbuffer->method_base) {
835 logbuffer->method_base = (intptr_t) method;
836 logbuffer->last_method = (intptr_t) method;
839 encode_sleb128 ((intptr_t) ((char *) method - (char *) logbuffer->last_method), logbuffer->cursor, &logbuffer->cursor);
840 logbuffer->last_method = (intptr_t) method;
842 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
845 // The reader lock must be held.
846 static void
847 register_method_local (MonoMethod *method, MonoJitInfo *ji)
849 MonoProfilerThread *thread = get_thread ();
851 if (!mono_conc_hashtable_lookup (log_profiler.method_table, method)) {
852 MethodInfo *info = (MethodInfo *) g_malloc (sizeof (MethodInfo));
854 info->method = method;
855 info->ji = ji;
856 info->time = current_time ();
858 GPtrArray *arr = thread->methods ? thread->methods : (thread->methods = g_ptr_array_new ());
859 g_ptr_array_add (arr, info);
863 static void
864 emit_method (LogBuffer *logbuffer, MonoMethod *method)
866 register_method_local (method, NULL);
867 emit_method_inner (logbuffer, method);
870 static void
871 emit_obj (LogBuffer *logbuffer, void *ptr)
873 if (!logbuffer->obj_base)
874 logbuffer->obj_base = (uintptr_t) ptr >> 3;
876 emit_svalue (logbuffer, ((uintptr_t) ptr >> 3) - logbuffer->obj_base);
878 g_assert (logbuffer->cursor <= logbuffer->buf_end && "Why are we writing past the buffer end?");
881 static void
882 emit_string (LogBuffer *logbuffer, const char *str, size_t size)
884 size_t i = 0;
885 if (str) {
886 for (; i < size; i++) {
887 if (str[i] == '\0')
888 break;
889 emit_byte (logbuffer, str [i]);
892 emit_byte (logbuffer, '\0');
895 static void
896 emit_double (LogBuffer *logbuffer, double value)
898 int i;
899 unsigned char buffer[8];
900 memcpy (buffer, &value, 8);
901 #if G_BYTE_ORDER == G_BIG_ENDIAN
902 for (i = 7; i >= 0; i--)
903 #else
904 for (i = 0; i < 8; i++)
905 #endif
906 emit_byte (logbuffer, buffer[i]);
909 static char*
910 write_int16 (char *buf, int32_t value)
912 int i;
913 for (i = 0; i < 2; ++i) {
914 buf [i] = value;
915 value >>= 8;
917 return buf + 2;
920 static char*
921 write_int32 (char *buf, int32_t value)
923 int i;
924 for (i = 0; i < 4; ++i) {
925 buf [i] = value;
926 value >>= 8;
928 return buf + 4;
931 static char*
932 write_int64 (char *buf, int64_t value)
934 int i;
935 for (i = 0; i < 8; ++i) {
936 buf [i] = value;
937 value >>= 8;
939 return buf + 8;
942 static char *
943 write_header_string (char *p, const char *str)
945 size_t len = strlen (str) + 1;
947 p = write_int32 (p, len);
948 strcpy (p, str);
950 return p + len;
953 static void
954 dump_header (void)
956 const char *args = log_profiler.args;
957 const char *arch = mono_config_get_cpu ();
958 const char *os = mono_config_get_os ();
960 char *hbuf = g_malloc (
961 sizeof (gint32) /* header id */ +
962 sizeof (gint8) /* major version */ +
963 sizeof (gint8) /* minor version */ +
964 sizeof (gint8) /* data version */ +
965 sizeof (gint8) /* word size */ +
966 sizeof (gint64) /* startup time */ +
967 sizeof (gint32) /* timer overhead */ +
968 sizeof (gint32) /* flags */ +
969 sizeof (gint32) /* process id */ +
970 sizeof (gint16) /* command port */ +
971 sizeof (gint32) + strlen (args) + 1 /* arguments */ +
972 sizeof (gint32) + strlen (arch) + 1 /* architecture */ +
973 sizeof (gint32) + strlen (os) + 1 /* operating system */
975 char *p = hbuf;
977 p = write_int32 (p, LOG_HEADER_ID);
978 *p++ = LOG_VERSION_MAJOR;
979 *p++ = LOG_VERSION_MINOR;
980 *p++ = LOG_DATA_VERSION;
981 *p++ = sizeof (void *);
982 p = write_int64 (p, ((uint64_t) time (NULL)) * 1000);
983 p = write_int32 (p, log_profiler.timer_overhead);
984 p = write_int32 (p, 0); /* flags */
985 p = write_int32 (p, process_id ());
986 p = write_int16 (p, log_profiler.command_port);
987 p = write_header_string (p, args);
988 p = write_header_string (p, arch);
989 p = write_header_string (p, os);
991 #if defined (HAVE_SYS_ZLIB)
992 if (log_profiler.gzfile) {
993 gzwrite (log_profiler.gzfile, hbuf, p - hbuf);
994 } else
995 #endif
997 fwrite (hbuf, p - hbuf, 1, log_profiler.file);
998 fflush (log_profiler.file);
1001 g_free (hbuf);
1005 * Must be called with the reader lock held if thread is the current thread, or
1006 * the exclusive lock if thread is a different thread. However, if thread is
1007 * the current thread, and init_thread () was called with add_to_lls = FALSE,
1008 * then no locking is necessary.
1010 static void
1011 send_buffer (MonoProfilerThread *thread)
1013 WriterQueueEntry *entry = mono_lock_free_alloc (&log_profiler.writer_entry_allocator);
1014 entry->methods = thread->methods;
1015 entry->buffer = thread->buffer;
1017 mono_lock_free_queue_node_init (&entry->node, FALSE);
1019 mono_lock_free_queue_enqueue (&log_profiler.writer_queue, &entry->node);
1020 mono_os_sem_post (&log_profiler.writer_queue_sem);
1023 static void
1024 free_thread (gpointer p)
1026 MonoProfilerThread *thread = p;
1028 if (!thread->ended) {
1030 * The thread is being cleaned up by the main thread during
1031 * shutdown. This typically happens for internal runtime
1032 * threads. We need to synthesize a thread end event.
1035 InterlockedIncrement (&thread_ends_ctr);
1037 LogBuffer *buf = ensure_logbuf_unsafe (thread,
1038 EVENT_SIZE /* event */ +
1039 BYTE_SIZE /* type */ +
1040 LEB128_SIZE /* tid */
1043 emit_event (buf, TYPE_END_UNLOAD | TYPE_METADATA);
1044 emit_byte (buf, TYPE_THREAD);
1045 emit_ptr (buf, (void *) thread->node.key);
1048 send_buffer (thread);
1050 g_free (thread);
1053 static void
1054 remove_thread (MonoProfilerThread *thread)
1056 MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
1058 if (mono_lls_remove (&log_profiler.profiler_thread_list, hp, &thread->node))
1059 mono_thread_hazardous_try_free (thread, free_thread);
1061 clear_hazard_pointers (hp);
1064 static void
1065 dump_buffer (LogBuffer *buf)
1067 char hbuf [128];
1068 char *p = hbuf;
1070 if (buf->next)
1071 dump_buffer (buf->next);
1073 if (buf->cursor - buf->buf) {
1074 p = write_int32 (p, BUF_ID);
1075 p = write_int32 (p, buf->cursor - buf->buf);
1076 p = write_int64 (p, buf->time_base);
1077 p = write_int64 (p, buf->ptr_base);
1078 p = write_int64 (p, buf->obj_base);
1079 p = write_int64 (p, buf->thread_id);
1080 p = write_int64 (p, buf->method_base);
1082 #if defined (HAVE_SYS_ZLIB)
1083 if (log_profiler.gzfile) {
1084 gzwrite (log_profiler.gzfile, hbuf, p - hbuf);
1085 gzwrite (log_profiler.gzfile, buf->buf, buf->cursor - buf->buf);
1086 } else
1087 #endif
1089 fwrite (hbuf, p - hbuf, 1, log_profiler.file);
1090 fwrite (buf->buf, buf->cursor - buf->buf, 1, log_profiler.file);
1091 fflush (log_profiler.file);
1095 free_buffer (buf, buf->size);
1098 static void
1099 dump_buffer_threadless (LogBuffer *buf)
1101 for (LogBuffer *iter = buf; iter; iter = iter->next)
1102 iter->thread_id = 0;
1104 dump_buffer (buf);
1107 // Only valid if init_thread () was called with add_to_lls = FALSE.
1108 static void
1109 send_log_unsafe (gboolean if_needed)
1111 MonoProfilerThread *thread = PROF_TLS_GET ();
1113 if (!if_needed || (if_needed && thread->buffer->next)) {
1114 if (!thread->attached)
1115 for (LogBuffer *iter = thread->buffer; iter; iter = iter->next)
1116 iter->thread_id = 0;
1118 send_buffer (thread);
1119 init_buffer_state (thread);
1123 // Assumes that the exclusive lock is held.
1124 static void
1125 sync_point_flush (void)
1127 g_assert (InterlockedRead (&log_profiler.buffer_lock_state) == PROF_TLS_GET ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1129 MONO_LLS_FOREACH_SAFE (&log_profiler.profiler_thread_list, MonoProfilerThread, thread) {
1130 g_assert (thread->attached && "Why is a thread in the LLS not attached?");
1132 send_buffer (thread);
1133 init_buffer_state (thread);
1134 } MONO_LLS_FOREACH_SAFE_END
1137 // Assumes that the exclusive lock is held.
1138 static void
1139 sync_point_mark (MonoProfilerSyncPointType type)
1141 g_assert (InterlockedRead (&log_profiler.buffer_lock_state) == PROF_TLS_GET ()->small_id << 16 && "Why don't we hold the exclusive lock?");
1143 ENTER_LOG (&sync_points_ctr, logbuffer,
1144 EVENT_SIZE /* event */ +
1145 LEB128_SIZE /* type */
1148 emit_event (logbuffer, TYPE_META | TYPE_SYNC_POINT);
1149 emit_byte (logbuffer, type);
1151 EXIT_LOG_EXPLICIT (NO_SEND);
1153 send_log_unsafe (FALSE);
1156 // Assumes that the exclusive lock is held.
1157 static void
1158 sync_point (MonoProfilerSyncPointType type)
1160 sync_point_flush ();
1161 sync_point_mark (type);
1164 static int
1165 gc_reference (MonoObject *obj, MonoClass *klass, uintptr_t size, uintptr_t num, MonoObject **refs, uintptr_t *offsets, void *data)
1167 /* account for object alignment in the heap */
1168 size += 7;
1169 size &= ~7;
1171 ENTER_LOG (&heap_objects_ctr, logbuffer,
1172 EVENT_SIZE /* event */ +
1173 LEB128_SIZE /* obj */ +
1174 LEB128_SIZE /* klass */ +
1175 LEB128_SIZE /* size */ +
1176 LEB128_SIZE /* num */ +
1177 num * (
1178 LEB128_SIZE /* offset */ +
1179 LEB128_SIZE /* ref */
1183 emit_event (logbuffer, TYPE_HEAP_OBJECT | TYPE_HEAP);
1184 emit_obj (logbuffer, obj);
1185 emit_ptr (logbuffer, klass);
1186 emit_value (logbuffer, size);
1187 emit_value (logbuffer, num);
1189 uintptr_t last_offset = 0;
1191 for (int i = 0; i < num; ++i) {
1192 emit_value (logbuffer, offsets [i] - last_offset);
1193 last_offset = offsets [i];
1194 emit_obj (logbuffer, refs [i]);
1197 EXIT_LOG;
1199 return 0;
1202 static void
1203 gc_roots (MonoProfiler *prof, MonoObject *const *objects, const MonoProfilerGCRootType *root_types, const uintptr_t *extra_info, uint64_t num)
1205 ENTER_LOG (&heap_roots_ctr, logbuffer,
1206 EVENT_SIZE /* event */ +
1207 LEB128_SIZE /* num */ +
1208 LEB128_SIZE /* collections */ +
1209 num * (
1210 LEB128_SIZE /* object */ +
1211 LEB128_SIZE /* root type */ +
1212 LEB128_SIZE /* extra info */
1216 emit_event (logbuffer, TYPE_HEAP_ROOT | TYPE_HEAP);
1217 emit_value (logbuffer, num);
1218 emit_value (logbuffer, mono_gc_collection_count (mono_gc_max_generation ()));
1220 for (int i = 0; i < num; ++i) {
1221 emit_obj (logbuffer, objects [i]);
1222 emit_byte (logbuffer, root_types [i]);
1223 emit_value (logbuffer, extra_info [i]);
1226 EXIT_LOG;
1230 static void
1231 trigger_on_demand_heapshot (void)
1233 if (InterlockedRead (&log_profiler.heapshot_requested))
1234 mono_gc_collect (mono_gc_max_generation ());
1237 #define ALL_GC_EVENTS_MASK (PROFLOG_GC_EVENTS | PROFLOG_GC_MOVE_EVENTS | PROFLOG_GC_ROOT_EVENTS)
1239 static void
1240 gc_event (MonoProfiler *profiler, MonoProfilerGCEvent ev, uint32_t generation)
1242 if (ENABLED (PROFLOG_GC_EVENTS)) {
1243 ENTER_LOG (&gc_events_ctr, logbuffer,
1244 EVENT_SIZE /* event */ +
1245 BYTE_SIZE /* gc event */ +
1246 BYTE_SIZE /* generation */
1249 emit_event (logbuffer, TYPE_GC_EVENT | TYPE_GC);
1250 emit_byte (logbuffer, ev);
1251 emit_byte (logbuffer, generation);
1253 EXIT_LOG;
1256 switch (ev) {
1257 case MONO_GC_EVENT_PRE_STOP_WORLD_LOCKED:
1258 switch (log_config.hs_mode) {
1259 case MONO_PROFILER_HEAPSHOT_NONE:
1260 log_profiler.do_heap_walk = FALSE;
1261 break;
1262 case MONO_PROFILER_HEAPSHOT_MAJOR:
1263 log_profiler.do_heap_walk = generation == mono_gc_max_generation ();
1264 break;
1265 case MONO_PROFILER_HEAPSHOT_ON_DEMAND:
1266 log_profiler.do_heap_walk = InterlockedRead (&log_profiler.heapshot_requested);
1267 break;
1268 case MONO_PROFILER_HEAPSHOT_X_GC:
1269 log_profiler.do_heap_walk = !(log_profiler.gc_count % log_config.hs_freq_gc);
1270 break;
1271 case MONO_PROFILER_HEAPSHOT_X_MS:
1272 log_profiler.do_heap_walk = (current_time () - log_profiler.last_hs_time) / 1000 * 1000 >= log_config.hs_freq_ms;
1273 break;
1274 default:
1275 g_assert_not_reached ();
1279 * heapshot_requested is set either because on-demand heapshot is
1280 * enabled and a heapshot was triggered, or because we're doing a
1281 * shutdown heapshot. In the latter case, we won't check it in the
1282 * switch above, so check it here and override any decision we made
1283 * above.
1285 if (InterlockedRead (&log_profiler.heapshot_requested))
1286 log_profiler.do_heap_walk = TRUE;
1288 if (ENABLED (PROFLOG_GC_ROOT_EVENTS) && log_profiler.do_heap_walk)
1289 mono_profiler_set_gc_roots_callback (log_profiler.handle, gc_roots);
1292 * Ensure that no thread can be in the middle of writing to
1293 * a buffer when the world stops...
1295 buffer_lock_excl ();
1297 break;
1298 case MONO_GC_EVENT_POST_STOP_WORLD:
1300 * ... So that we now have a consistent view of all buffers.
1301 * This allows us to flush them. We need to do this because
1302 * they may contain object allocation events that need to be
1303 * committed to the log file before any object move events
1304 * that will be produced during this GC.
1306 if (ENABLED (ALL_GC_EVENTS_MASK))
1307 sync_point (SYNC_POINT_WORLD_STOP);
1309 // Surround heapshots with HEAP_START/HEAP_END events.
1310 if (log_profiler.do_heap_walk) {
1311 ENTER_LOG (&heap_starts_ctr, logbuffer,
1312 EVENT_SIZE /* event */
1315 emit_event (logbuffer, TYPE_HEAP_START | TYPE_HEAP);
1317 EXIT_LOG;
1320 break;
1321 case MONO_GC_EVENT_START:
1322 if (generation == mono_gc_max_generation ())
1323 log_profiler.gc_count++;
1325 break;
1326 case MONO_GC_EVENT_PRE_START_WORLD:
1327 mono_profiler_set_gc_roots_callback (log_profiler.handle, NULL);
1329 if (log_profiler.do_heap_walk) {
1330 mono_gc_walk_heap (0, gc_reference, NULL);
1332 ENTER_LOG (&heap_ends_ctr, logbuffer,
1333 EVENT_SIZE /* event */
1336 emit_event (logbuffer, TYPE_HEAP_END | TYPE_HEAP);
1338 EXIT_LOG;
1340 log_profiler.do_heap_walk = FALSE;
1341 log_profiler.last_hs_time = current_time ();
1343 InterlockedWrite (&log_profiler.heapshot_requested, 0);
1347 * Similarly, we must now make sure that any object moves
1348 * written to the GC thread's buffer are flushed. Otherwise,
1349 * object allocation events for certain addresses could come
1350 * after the move events that made those addresses available.
1352 if (ENABLED (ALL_GC_EVENTS_MASK))
1353 sync_point_mark (SYNC_POINT_WORLD_START);
1355 break;
1356 case MONO_GC_EVENT_POST_START_WORLD_UNLOCKED:
1358 * Finally, it is safe to allow other threads to write to
1359 * their buffers again.
1361 buffer_unlock_excl ();
1363 break;
1364 default:
1365 break;
1369 static void
1370 gc_resize (MonoProfiler *profiler, uintptr_t new_size)
1372 ENTER_LOG (&gc_resizes_ctr, logbuffer,
1373 EVENT_SIZE /* event */ +
1374 LEB128_SIZE /* new size */
1377 emit_event (logbuffer, TYPE_GC_RESIZE | TYPE_GC);
1378 emit_value (logbuffer, new_size);
1380 EXIT_LOG;
1383 typedef struct {
1384 int count;
1385 MonoMethod* methods [MAX_FRAMES];
1386 int32_t il_offsets [MAX_FRAMES];
1387 int32_t native_offsets [MAX_FRAMES];
1388 } FrameData;
1390 static mono_bool
1391 walk_stack (MonoMethod *method, int32_t native_offset, int32_t il_offset, mono_bool managed, void* data)
1393 FrameData *frame = (FrameData *)data;
1394 if (method && frame->count < log_config.num_frames) {
1395 frame->il_offsets [frame->count] = il_offset;
1396 frame->native_offsets [frame->count] = native_offset;
1397 frame->methods [frame->count++] = method;
1399 return frame->count == log_config.num_frames;
1403 * a note about stack walks: they can cause more profiler events to fire,
1404 * so we need to make sure they don't happen after we started emitting an
1405 * event, hence the collect_bt/emit_bt split.
1407 static void
1408 collect_bt (FrameData *data)
1410 data->count = 0;
1411 mono_stack_walk_no_il (walk_stack, data);
1414 static void
1415 emit_bt (LogBuffer *logbuffer, FrameData *data)
1417 emit_value (logbuffer, data->count);
1419 while (data->count)
1420 emit_method (logbuffer, data->methods [--data->count]);
1423 static void
1424 gc_alloc (MonoProfiler *prof, MonoObject *obj)
1426 int do_bt = (!log_config.enter_leave && InterlockedRead (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_ALLOC_BT : 0;
1427 FrameData data;
1428 uintptr_t len = mono_object_get_size (obj);
1429 /* account for object alignment in the heap */
1430 len += 7;
1431 len &= ~7;
1433 if (do_bt)
1434 collect_bt (&data);
1436 ENTER_LOG (&gc_allocs_ctr, logbuffer,
1437 EVENT_SIZE /* event */ +
1438 LEB128_SIZE /* klass */ +
1439 LEB128_SIZE /* obj */ +
1440 LEB128_SIZE /* size */ +
1441 (do_bt ? (
1442 LEB128_SIZE /* count */ +
1443 data.count * (
1444 LEB128_SIZE /* method */
1446 ) : 0)
1449 emit_event (logbuffer, do_bt | TYPE_ALLOC);
1450 emit_ptr (logbuffer, mono_object_get_class (obj));
1451 emit_obj (logbuffer, obj);
1452 emit_value (logbuffer, len);
1454 if (do_bt)
1455 emit_bt (logbuffer, &data);
1457 EXIT_LOG;
1460 static void
1461 gc_moves (MonoProfiler *prof, MonoObject *const *objects, uint64_t num)
1463 ENTER_LOG (&gc_moves_ctr, logbuffer,
1464 EVENT_SIZE /* event */ +
1465 LEB128_SIZE /* num */ +
1466 num * (
1467 LEB128_SIZE /* object */
1471 emit_event (logbuffer, TYPE_GC_MOVE | TYPE_GC);
1472 emit_value (logbuffer, num);
1474 for (int i = 0; i < num; ++i)
1475 emit_obj (logbuffer, objects [i]);
1477 EXIT_LOG;
1480 static void
1481 gc_handle (MonoProfiler *prof, int op, MonoGCHandleType type, uint32_t handle, MonoObject *obj)
1483 int do_bt = !log_config.enter_leave && InterlockedRead (&log_profiler.runtime_inited) && log_config.num_frames;
1484 FrameData data;
1486 if (do_bt)
1487 collect_bt (&data);
1489 gint32 *ctr = op == MONO_PROFILER_GC_HANDLE_CREATED ? &gc_handle_creations_ctr : &gc_handle_deletions_ctr;
1491 ENTER_LOG (ctr, logbuffer,
1492 EVENT_SIZE /* event */ +
1493 LEB128_SIZE /* type */ +
1494 LEB128_SIZE /* handle */ +
1495 (op == MONO_PROFILER_GC_HANDLE_CREATED ? (
1496 LEB128_SIZE /* obj */
1497 ) : 0) +
1498 (do_bt ? (
1499 LEB128_SIZE /* count */ +
1500 data.count * (
1501 LEB128_SIZE /* method */
1503 ) : 0)
1506 if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1507 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_CREATED_BT : TYPE_GC_HANDLE_CREATED) | TYPE_GC);
1508 else if (op == MONO_PROFILER_GC_HANDLE_DESTROYED)
1509 emit_event (logbuffer, (do_bt ? TYPE_GC_HANDLE_DESTROYED_BT : TYPE_GC_HANDLE_DESTROYED) | TYPE_GC);
1510 else
1511 g_assert_not_reached ();
1513 emit_value (logbuffer, type);
1514 emit_value (logbuffer, handle);
1516 if (op == MONO_PROFILER_GC_HANDLE_CREATED)
1517 emit_obj (logbuffer, obj);
1519 if (do_bt)
1520 emit_bt (logbuffer, &data);
1522 EXIT_LOG;
1525 static void
1526 gc_handle_created (MonoProfiler *prof, uint32_t handle, MonoGCHandleType type, MonoObject *obj)
1528 gc_handle (prof, MONO_PROFILER_GC_HANDLE_CREATED, type, handle, obj);
1531 static void
1532 gc_handle_deleted (MonoProfiler *prof, uint32_t handle, MonoGCHandleType type)
1534 gc_handle (prof, MONO_PROFILER_GC_HANDLE_DESTROYED, type, handle, NULL);
1537 static void
1538 finalize_begin (MonoProfiler *prof)
1540 ENTER_LOG (&finalize_begins_ctr, buf,
1541 EVENT_SIZE /* event */
1544 emit_event (buf, TYPE_GC_FINALIZE_START | TYPE_GC);
1546 EXIT_LOG;
1549 static void
1550 finalize_end (MonoProfiler *prof)
1552 trigger_on_demand_heapshot ();
1553 if (ENABLED (PROFLOG_GC_FINALIZATION_EVENTS)) {
1554 ENTER_LOG (&finalize_ends_ctr, buf,
1555 EVENT_SIZE /* event */
1558 emit_event (buf, TYPE_GC_FINALIZE_END | TYPE_GC);
1560 EXIT_LOG;
1564 static void
1565 finalize_object_begin (MonoProfiler *prof, MonoObject *obj)
1567 ENTER_LOG (&finalize_object_begins_ctr, buf,
1568 EVENT_SIZE /* event */ +
1569 LEB128_SIZE /* obj */
1572 emit_event (buf, TYPE_GC_FINALIZE_OBJECT_START | TYPE_GC);
1573 emit_obj (buf, obj);
1575 EXIT_LOG;
1578 static void
1579 finalize_object_end (MonoProfiler *prof, MonoObject *obj)
1581 ENTER_LOG (&finalize_object_ends_ctr, buf,
1582 EVENT_SIZE /* event */ +
1583 LEB128_SIZE /* obj */
1586 emit_event (buf, TYPE_GC_FINALIZE_OBJECT_END | TYPE_GC);
1587 emit_obj (buf, obj);
1589 EXIT_LOG;
1592 static char*
1593 push_nesting (char *p, MonoClass *klass)
1595 MonoClass *nesting;
1596 const char *name;
1597 const char *nspace;
1598 nesting = mono_class_get_nesting_type (klass);
1599 if (nesting) {
1600 p = push_nesting (p, nesting);
1601 *p++ = '/';
1602 *p = 0;
1604 name = mono_class_get_name (klass);
1605 nspace = mono_class_get_namespace (klass);
1606 if (*nspace) {
1607 strcpy (p, nspace);
1608 p += strlen (nspace);
1609 *p++ = '.';
1610 *p = 0;
1612 strcpy (p, name);
1613 p += strlen (name);
1614 return p;
1617 static char*
1618 type_name (MonoClass *klass)
1620 char buf [1024];
1621 char *p;
1622 push_nesting (buf, klass);
1623 p = (char *) g_malloc (strlen (buf) + 1);
1624 strcpy (p, buf);
1625 return p;
1628 static void
1629 image_loaded (MonoProfiler *prof, MonoImage *image)
1631 const char *name = mono_image_get_filename (image);
1632 int nlen = strlen (name) + 1;
1634 ENTER_LOG (&image_loads_ctr, logbuffer,
1635 EVENT_SIZE /* event */ +
1636 BYTE_SIZE /* type */ +
1637 LEB128_SIZE /* image */ +
1638 nlen /* name */
1641 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1642 emit_byte (logbuffer, TYPE_IMAGE);
1643 emit_ptr (logbuffer, image);
1644 memcpy (logbuffer->cursor, name, nlen);
1645 logbuffer->cursor += nlen;
1647 EXIT_LOG;
1650 static void
1651 image_unloaded (MonoProfiler *prof, MonoImage *image)
1653 const char *name = mono_image_get_filename (image);
1654 int nlen = strlen (name) + 1;
1656 ENTER_LOG (&image_unloads_ctr, logbuffer,
1657 EVENT_SIZE /* event */ +
1658 BYTE_SIZE /* type */ +
1659 LEB128_SIZE /* image */ +
1660 nlen /* name */
1663 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1664 emit_byte (logbuffer, TYPE_IMAGE);
1665 emit_ptr (logbuffer, image);
1666 memcpy (logbuffer->cursor, name, nlen);
1667 logbuffer->cursor += nlen;
1669 EXIT_LOG;
1672 static void
1673 assembly_loaded (MonoProfiler *prof, MonoAssembly *assembly)
1675 char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1676 int nlen = strlen (name) + 1;
1677 MonoImage *image = mono_assembly_get_image (assembly);
1679 ENTER_LOG (&assembly_loads_ctr, logbuffer,
1680 EVENT_SIZE /* event */ +
1681 BYTE_SIZE /* type */ +
1682 LEB128_SIZE /* assembly */ +
1683 LEB128_SIZE /* image */ +
1684 nlen /* name */
1687 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1688 emit_byte (logbuffer, TYPE_ASSEMBLY);
1689 emit_ptr (logbuffer, assembly);
1690 emit_ptr (logbuffer, image);
1691 memcpy (logbuffer->cursor, name, nlen);
1692 logbuffer->cursor += nlen;
1694 EXIT_LOG;
1696 mono_free (name);
1699 static void
1700 assembly_unloaded (MonoProfiler *prof, MonoAssembly *assembly)
1702 char *name = mono_stringify_assembly_name (mono_assembly_get_name (assembly));
1703 int nlen = strlen (name) + 1;
1704 MonoImage *image = mono_assembly_get_image (assembly);
1706 ENTER_LOG (&assembly_unloads_ctr, logbuffer,
1707 EVENT_SIZE /* event */ +
1708 BYTE_SIZE /* type */ +
1709 LEB128_SIZE /* assembly */ +
1710 LEB128_SIZE /* image */ +
1711 nlen /* name */
1714 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1715 emit_byte (logbuffer, TYPE_ASSEMBLY);
1716 emit_ptr (logbuffer, assembly);
1717 emit_ptr (logbuffer, image);
1718 memcpy (logbuffer->cursor, name, nlen);
1719 logbuffer->cursor += nlen;
1721 EXIT_LOG;
1723 mono_free (name);
1726 static void
1727 class_loaded (MonoProfiler *prof, MonoClass *klass)
1729 char *name;
1731 if (InterlockedRead (&log_profiler.runtime_inited))
1732 name = mono_type_get_name (mono_class_get_type (klass));
1733 else
1734 name = type_name (klass);
1736 int nlen = strlen (name) + 1;
1737 MonoImage *image = mono_class_get_image (klass);
1739 ENTER_LOG (&class_loads_ctr, logbuffer,
1740 EVENT_SIZE /* event */ +
1741 BYTE_SIZE /* type */ +
1742 LEB128_SIZE /* klass */ +
1743 LEB128_SIZE /* image */ +
1744 nlen /* name */
1747 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1748 emit_byte (logbuffer, TYPE_CLASS);
1749 emit_ptr (logbuffer, klass);
1750 emit_ptr (logbuffer, image);
1751 memcpy (logbuffer->cursor, name, nlen);
1752 logbuffer->cursor += nlen;
1754 EXIT_LOG;
1756 if (InterlockedRead (&log_profiler.runtime_inited))
1757 mono_free (name);
1758 else
1759 g_free (name);
1762 static void
1763 method_enter (MonoProfiler *prof, MonoMethod *method, MonoProfilerCallContext *ctx)
1765 if (get_thread ()->call_depth++ <= log_config.max_call_depth) {
1766 ENTER_LOG (&method_entries_ctr, logbuffer,
1767 EVENT_SIZE /* event */ +
1768 LEB128_SIZE /* method */
1771 emit_event (logbuffer, TYPE_ENTER | TYPE_METHOD);
1772 emit_method (logbuffer, method);
1774 EXIT_LOG;
1778 static void
1779 method_leave (MonoProfiler *prof, MonoMethod *method, MonoProfilerCallContext *ctx)
1781 if (--get_thread ()->call_depth <= log_config.max_call_depth) {
1782 ENTER_LOG (&method_exits_ctr, logbuffer,
1783 EVENT_SIZE /* event */ +
1784 LEB128_SIZE /* method */
1787 emit_event (logbuffer, TYPE_LEAVE | TYPE_METHOD);
1788 emit_method (logbuffer, method);
1790 EXIT_LOG;
1794 static void
1795 method_exc_leave (MonoProfiler *prof, MonoMethod *method, MonoObject *exc)
1797 if (--get_thread ()->call_depth <= log_config.max_call_depth) {
1798 ENTER_LOG (&method_exception_exits_ctr, logbuffer,
1799 EVENT_SIZE /* event */ +
1800 LEB128_SIZE /* method */
1803 emit_event (logbuffer, TYPE_EXC_LEAVE | TYPE_METHOD);
1804 emit_method (logbuffer, method);
1806 EXIT_LOG;
1810 static MonoProfilerCallInstrumentationFlags
1811 method_filter (MonoProfiler *prof, MonoMethod *method)
1813 return MONO_PROFILER_CALL_INSTRUMENTATION_PROLOGUE | MONO_PROFILER_CALL_INSTRUMENTATION_EPILOGUE;
1816 static void
1817 method_jitted (MonoProfiler *prof, MonoMethod *method, MonoJitInfo *ji)
1819 buffer_lock ();
1821 register_method_local (method, ji);
1823 buffer_unlock ();
1826 static void
1827 code_buffer_new (MonoProfiler *prof, const mono_byte *buffer, uint64_t size, MonoProfilerCodeBufferType type, const void *data)
1829 const char *name;
1830 int nlen;
1832 if (type == MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE) {
1833 name = (const char *) data;
1834 nlen = strlen (name) + 1;
1835 } else {
1836 name = NULL;
1837 nlen = 0;
1840 ENTER_LOG (&code_buffers_ctr, logbuffer,
1841 EVENT_SIZE /* event */ +
1842 BYTE_SIZE /* type */ +
1843 LEB128_SIZE /* buffer */ +
1844 LEB128_SIZE /* size */ +
1845 (name ? (
1846 nlen /* name */
1847 ) : 0)
1850 emit_event (logbuffer, TYPE_JITHELPER | TYPE_RUNTIME);
1851 emit_byte (logbuffer, type);
1852 emit_ptr (logbuffer, buffer);
1853 emit_value (logbuffer, size);
1855 if (name) {
1856 memcpy (logbuffer->cursor, name, nlen);
1857 logbuffer->cursor += nlen;
1860 EXIT_LOG;
1863 static void
1864 throw_exc (MonoProfiler *prof, MonoObject *object)
1866 int do_bt = (!log_config.enter_leave && InterlockedRead (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_THROW_BT : 0;
1867 FrameData data;
1869 if (do_bt)
1870 collect_bt (&data);
1872 ENTER_LOG (&exception_throws_ctr, logbuffer,
1873 EVENT_SIZE /* event */ +
1874 LEB128_SIZE /* object */ +
1875 (do_bt ? (
1876 LEB128_SIZE /* count */ +
1877 data.count * (
1878 LEB128_SIZE /* method */
1880 ) : 0)
1883 emit_event (logbuffer, do_bt | TYPE_EXCEPTION);
1884 emit_obj (logbuffer, object);
1886 if (do_bt)
1887 emit_bt (logbuffer, &data);
1889 EXIT_LOG;
1892 static void
1893 clause_exc (MonoProfiler *prof, MonoMethod *method, uint32_t clause_num, MonoExceptionEnum clause_type, MonoObject *exc)
1895 ENTER_LOG (&exception_clauses_ctr, logbuffer,
1896 EVENT_SIZE /* event */ +
1897 BYTE_SIZE /* clause type */ +
1898 LEB128_SIZE /* clause num */ +
1899 LEB128_SIZE /* method */
1902 emit_event (logbuffer, TYPE_EXCEPTION | TYPE_CLAUSE);
1903 emit_byte (logbuffer, clause_type);
1904 emit_value (logbuffer, clause_num);
1905 emit_method (logbuffer, method);
1906 emit_obj (logbuffer, exc);
1908 EXIT_LOG;
1911 static void
1912 monitor_event (MonoProfiler *profiler, MonoObject *object, MonoProfilerMonitorEvent ev)
1914 int do_bt = (!log_config.enter_leave && InterlockedRead (&log_profiler.runtime_inited) && log_config.num_frames) ? TYPE_MONITOR_BT : 0;
1915 FrameData data;
1917 if (do_bt)
1918 collect_bt (&data);
1920 ENTER_LOG (&monitor_events_ctr, logbuffer,
1921 EVENT_SIZE /* event */ +
1922 BYTE_SIZE /* ev */ +
1923 LEB128_SIZE /* object */ +
1924 (do_bt ? (
1925 LEB128_SIZE /* count */ +
1926 data.count * (
1927 LEB128_SIZE /* method */
1929 ) : 0)
1932 emit_event (logbuffer, do_bt | TYPE_MONITOR);
1933 emit_byte (logbuffer, ev);
1934 emit_obj (logbuffer, object);
1936 if (do_bt)
1937 emit_bt (logbuffer, &data);
1939 EXIT_LOG;
1942 static void
1943 monitor_contention (MonoProfiler *prof, MonoObject *object)
1945 monitor_event (prof, object, MONO_PROFILER_MONITOR_CONTENTION);
1948 static void
1949 monitor_acquired (MonoProfiler *prof, MonoObject *object)
1951 monitor_event (prof, object, MONO_PROFILER_MONITOR_DONE);
1954 static void
1955 monitor_failed (MonoProfiler *prof, MonoObject *object)
1957 monitor_event (prof, object, MONO_PROFILER_MONITOR_FAIL);
1960 static void
1961 thread_start (MonoProfiler *prof, uintptr_t tid)
1963 ENTER_LOG (&thread_starts_ctr, logbuffer,
1964 EVENT_SIZE /* event */ +
1965 BYTE_SIZE /* type */ +
1966 LEB128_SIZE /* tid */
1969 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
1970 emit_byte (logbuffer, TYPE_THREAD);
1971 emit_ptr (logbuffer, (void*) tid);
1973 EXIT_LOG;
1976 static void
1977 thread_end (MonoProfiler *prof, uintptr_t tid)
1979 ENTER_LOG (&thread_ends_ctr, logbuffer,
1980 EVENT_SIZE /* event */ +
1981 BYTE_SIZE /* type */ +
1982 LEB128_SIZE /* tid */
1985 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
1986 emit_byte (logbuffer, TYPE_THREAD);
1987 emit_ptr (logbuffer, (void*) tid);
1989 EXIT_LOG_EXPLICIT (NO_SEND);
1991 MonoProfilerThread *thread = get_thread ();
1993 thread->ended = TRUE;
1994 remove_thread (thread);
1996 PROF_TLS_SET (NULL);
1999 static void
2000 thread_name (MonoProfiler *prof, uintptr_t tid, const char *name)
2002 int len = strlen (name) + 1;
2004 ENTER_LOG (&thread_names_ctr, logbuffer,
2005 EVENT_SIZE /* event */ +
2006 BYTE_SIZE /* type */ +
2007 LEB128_SIZE /* tid */ +
2008 len /* name */
2011 emit_event (logbuffer, TYPE_METADATA);
2012 emit_byte (logbuffer, TYPE_THREAD);
2013 emit_ptr (logbuffer, (void*)tid);
2014 memcpy (logbuffer->cursor, name, len);
2015 logbuffer->cursor += len;
2017 EXIT_LOG;
2020 static void
2021 domain_loaded (MonoProfiler *prof, MonoDomain *domain)
2023 ENTER_LOG (&domain_loads_ctr, logbuffer,
2024 EVENT_SIZE /* event */ +
2025 BYTE_SIZE /* type */ +
2026 LEB128_SIZE /* domain id */
2029 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2030 emit_byte (logbuffer, TYPE_DOMAIN);
2031 emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2033 EXIT_LOG;
2036 static void
2037 domain_unloaded (MonoProfiler *prof, MonoDomain *domain)
2039 ENTER_LOG (&domain_unloads_ctr, logbuffer,
2040 EVENT_SIZE /* event */ +
2041 BYTE_SIZE /* type */ +
2042 LEB128_SIZE /* domain id */
2045 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2046 emit_byte (logbuffer, TYPE_DOMAIN);
2047 emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2049 EXIT_LOG;
2052 static void
2053 domain_name (MonoProfiler *prof, MonoDomain *domain, const char *name)
2055 int nlen = strlen (name) + 1;
2057 ENTER_LOG (&domain_names_ctr, logbuffer,
2058 EVENT_SIZE /* event */ +
2059 BYTE_SIZE /* type */ +
2060 LEB128_SIZE /* domain id */ +
2061 nlen /* name */
2064 emit_event (logbuffer, TYPE_METADATA);
2065 emit_byte (logbuffer, TYPE_DOMAIN);
2066 emit_ptr (logbuffer, (void*)(uintptr_t) mono_domain_get_id (domain));
2067 memcpy (logbuffer->cursor, name, nlen);
2068 logbuffer->cursor += nlen;
2070 EXIT_LOG;
2073 static void
2074 context_loaded (MonoProfiler *prof, MonoAppContext *context)
2076 ENTER_LOG (&context_loads_ctr, logbuffer,
2077 EVENT_SIZE /* event */ +
2078 BYTE_SIZE /* type */ +
2079 LEB128_SIZE /* context id */ +
2080 LEB128_SIZE /* domain id */
2083 emit_event (logbuffer, TYPE_END_LOAD | TYPE_METADATA);
2084 emit_byte (logbuffer, TYPE_CONTEXT);
2085 emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2086 emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2088 EXIT_LOG;
2091 static void
2092 context_unloaded (MonoProfiler *prof, MonoAppContext *context)
2094 ENTER_LOG (&context_unloads_ctr, logbuffer,
2095 EVENT_SIZE /* event */ +
2096 BYTE_SIZE /* type */ +
2097 LEB128_SIZE /* context id */ +
2098 LEB128_SIZE /* domain id */
2101 emit_event (logbuffer, TYPE_END_UNLOAD | TYPE_METADATA);
2102 emit_byte (logbuffer, TYPE_CONTEXT);
2103 emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_id (context));
2104 emit_ptr (logbuffer, (void*)(uintptr_t) mono_context_get_domain_id (context));
2106 EXIT_LOG;
2109 typedef struct {
2110 MonoMethod *method;
2111 MonoDomain *domain;
2112 void *base_address;
2113 int offset;
2114 } AsyncFrameInfo;
2116 typedef struct {
2117 MonoLockFreeQueueNode node;
2118 uint64_t time;
2119 uintptr_t tid;
2120 const void *ip;
2121 int count;
2122 AsyncFrameInfo frames [MONO_ZERO_LEN_ARRAY];
2123 } SampleHit;
2125 static mono_bool
2126 async_walk_stack (MonoMethod *method, MonoDomain *domain, void *base_address, int offset, void *data)
2128 SampleHit *sample = (SampleHit *) data;
2130 if (sample->count < log_config.num_frames) {
2131 int i = sample->count;
2133 sample->frames [i].method = method;
2134 sample->frames [i].domain = domain;
2135 sample->frames [i].base_address = base_address;
2136 sample->frames [i].offset = offset;
2138 sample->count++;
2141 return sample->count == log_config.num_frames;
2144 #define SAMPLE_SLOT_SIZE(FRAMES) (sizeof (SampleHit) + sizeof (AsyncFrameInfo) * (FRAMES - MONO_ZERO_LEN_ARRAY))
2145 #define SAMPLE_BLOCK_SIZE (mono_pagesize ())
2147 static void
2148 enqueue_sample_hit (gpointer p)
2150 SampleHit *sample = p;
2152 mono_lock_free_queue_node_unpoison (&sample->node);
2153 mono_lock_free_queue_enqueue (&log_profiler.dumper_queue, &sample->node);
2154 mono_os_sem_post (&log_profiler.dumper_queue_sem);
2157 static void
2158 mono_sample_hit (MonoProfiler *profiler, const mono_byte *ip, const void *context)
2161 * Please note: We rely on the runtime loading the profiler with
2162 * MONO_DL_EAGER (RTLD_NOW) so that references to runtime functions within
2163 * this function (and its siblings) are resolved when the profiler is
2164 * loaded. Otherwise, we would potentially invoke the dynamic linker when
2165 * invoking runtime functions, which is not async-signal-safe.
2168 if (InterlockedRead (&log_profiler.in_shutdown))
2169 return;
2171 SampleHit *sample = (SampleHit *) mono_lock_free_queue_dequeue (&profiler->sample_reuse_queue);
2173 if (!sample) {
2175 * If we're out of reusable sample events and we're not allowed to
2176 * allocate more, we have no choice but to drop the event.
2178 if (InterlockedRead (&sample_allocations_ctr) >= log_config.max_allocated_sample_hits)
2179 return;
2181 sample = mono_lock_free_alloc (&profiler->sample_allocator);
2182 mono_lock_free_queue_node_init (&sample->node, TRUE);
2184 InterlockedIncrement (&sample_allocations_ctr);
2187 sample->count = 0;
2188 mono_stack_walk_async_safe (&async_walk_stack, (void *) context, sample);
2190 sample->time = current_time ();
2191 sample->tid = thread_id ();
2192 sample->ip = ip;
2194 mono_thread_hazardous_try_free (sample, enqueue_sample_hit);
2197 static uintptr_t *code_pages = 0;
2198 static int num_code_pages = 0;
2199 static int size_code_pages = 0;
2200 #define CPAGE_SHIFT (9)
2201 #define CPAGE_SIZE (1 << CPAGE_SHIFT)
2202 #define CPAGE_MASK (~(CPAGE_SIZE - 1))
2203 #define CPAGE_ADDR(p) ((p) & CPAGE_MASK)
2205 static uintptr_t
2206 add_code_page (uintptr_t *hash, uintptr_t hsize, uintptr_t page)
2208 uintptr_t i;
2209 uintptr_t start_pos;
2210 start_pos = (page >> CPAGE_SHIFT) % hsize;
2211 i = start_pos;
2212 do {
2213 if (hash [i] && CPAGE_ADDR (hash [i]) == CPAGE_ADDR (page)) {
2214 return 0;
2215 } else if (!hash [i]) {
2216 hash [i] = page;
2217 return 1;
2219 /* wrap around */
2220 if (++i == hsize)
2221 i = 0;
2222 } while (i != start_pos);
2223 g_assert_not_reached ();
2224 return 0;
2227 static void
2228 add_code_pointer (uintptr_t ip)
2230 uintptr_t i;
2231 if (num_code_pages * 2 >= size_code_pages) {
2232 uintptr_t *n;
2233 uintptr_t old_size = size_code_pages;
2234 size_code_pages *= 2;
2235 if (size_code_pages == 0)
2236 size_code_pages = 16;
2237 n = (uintptr_t *) g_calloc (sizeof (uintptr_t) * size_code_pages, 1);
2238 for (i = 0; i < old_size; ++i) {
2239 if (code_pages [i])
2240 add_code_page (n, size_code_pages, code_pages [i]);
2242 if (code_pages)
2243 g_free (code_pages);
2244 code_pages = n;
2246 num_code_pages += add_code_page (code_pages, size_code_pages, ip & CPAGE_MASK);
2249 /* ELF code crashes on some systems. */
2250 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2251 #if 0
2252 static void
2253 dump_ubin (const char *filename, uintptr_t load_addr, uint64_t offset, uintptr_t size)
2255 int len = strlen (filename) + 1;
2257 ENTER_LOG (&sample_ubins_ctr, logbuffer,
2258 EVENT_SIZE /* event */ +
2259 LEB128_SIZE /* load address */ +
2260 LEB128_SIZE /* offset */ +
2261 LEB128_SIZE /* size */ +
2262 nlen /* file name */
2265 emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_UBIN);
2266 emit_ptr (logbuffer, load_addr);
2267 emit_uvalue (logbuffer, offset);
2268 emit_uvalue (logbuffer, size);
2269 memcpy (logbuffer->cursor, filename, len);
2270 logbuffer->cursor += len;
2272 EXIT_LOG;
2274 #endif
2276 static void
2277 dump_usym (const char *name, uintptr_t value, uintptr_t size)
2279 int len = strlen (name) + 1;
2281 ENTER_LOG (&sample_usyms_ctr, logbuffer,
2282 EVENT_SIZE /* event */ +
2283 LEB128_SIZE /* value */ +
2284 LEB128_SIZE /* size */ +
2285 len /* name */
2288 emit_event (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_USYM);
2289 emit_ptr (logbuffer, (void*)value);
2290 emit_value (logbuffer, size);
2291 memcpy (logbuffer->cursor, name, len);
2292 logbuffer->cursor += len;
2294 EXIT_LOG;
2297 /* ELF code crashes on some systems. */
2298 //#if defined(ELFMAG0)
2299 #if 0
2301 #if SIZEOF_VOID_P == 4
2302 #define ELF_WSIZE 32
2303 #else
2304 #define ELF_WSIZE 64
2305 #endif
2306 #ifndef ElfW
2307 #define ElfW(type) _ElfW (Elf, ELF_WSIZE, type)
2308 #define _ElfW(e,w,t) _ElfW_1 (e, w, _##t)
2309 #define _ElfW_1(e,w,t) e##w##t
2310 #endif
2312 static void
2313 dump_elf_symbols (ElfW(Sym) *symbols, int num_symbols, const char *strtab, void *load_addr)
2315 int i;
2316 for (i = 0; i < num_symbols; ++i) {
2317 const char* sym;
2318 sym = strtab + symbols [i].st_name;
2319 if (!symbols [i].st_name || !symbols [i].st_size || (symbols [i].st_info & 0xf) != STT_FUNC)
2320 continue;
2321 dump_usym (sym, (uintptr_t)load_addr + symbols [i].st_value, symbols [i].st_size);
2325 static int
2326 read_elf_symbols (MonoProfiler *prof, const char *filename, void *load_addr)
2328 int fd, i;
2329 void *data;
2330 struct stat statb;
2331 uint64_t file_size;
2332 ElfW(Ehdr) *header;
2333 ElfW(Shdr) *sheader;
2334 ElfW(Shdr) *shstrtabh;
2335 ElfW(Shdr) *symtabh = NULL;
2336 ElfW(Shdr) *strtabh = NULL;
2337 ElfW(Sym) *symbols = NULL;
2338 const char *strtab;
2339 int num_symbols;
2341 fd = open (filename, O_RDONLY);
2342 if (fd < 0)
2343 return 0;
2344 if (fstat (fd, &statb) != 0) {
2345 close (fd);
2346 return 0;
2348 file_size = statb.st_size;
2349 data = mmap (NULL, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
2350 close (fd);
2351 if (data == MAP_FAILED)
2352 return 0;
2353 header = data;
2354 if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2355 header->e_ident [EI_MAG1] != ELFMAG1 ||
2356 header->e_ident [EI_MAG2] != ELFMAG2 ||
2357 header->e_ident [EI_MAG3] != ELFMAG3 ) {
2358 munmap (data, file_size);
2359 return 0;
2361 sheader = (void*)((char*)data + header->e_shoff);
2362 shstrtabh = (void*)((char*)sheader + (header->e_shentsize * header->e_shstrndx));
2363 strtab = (const char*)data + shstrtabh->sh_offset;
2364 for (i = 0; i < header->e_shnum; ++i) {
2365 if (sheader->sh_type == SHT_SYMTAB) {
2366 symtabh = sheader;
2367 strtabh = (void*)((char*)data + header->e_shoff + sheader->sh_link * header->e_shentsize);
2368 break;
2370 sheader = (void*)((char*)sheader + header->e_shentsize);
2372 if (!symtabh || !strtabh) {
2373 munmap (data, file_size);
2374 return 0;
2376 strtab = (const char*)data + strtabh->sh_offset;
2377 num_symbols = symtabh->sh_size / symtabh->sh_entsize;
2378 symbols = (void*)((char*)data + symtabh->sh_offset);
2379 dump_elf_symbols (symbols, num_symbols, strtab, load_addr);
2380 munmap (data, file_size);
2381 return 1;
2383 #endif
2385 /* ELF code crashes on some systems. */
2386 //#if defined(HAVE_DL_ITERATE_PHDR) && defined(ELFMAG0)
2387 #if 0
2388 static int
2389 elf_dl_callback (struct dl_phdr_info *info, size_t size, void *data)
2391 char buf [256];
2392 const char *filename;
2393 BinaryObject *obj;
2394 char *a = (void*)info->dlpi_addr;
2395 int i, num_sym;
2396 ElfW(Dyn) *dyn = NULL;
2397 ElfW(Sym) *symtab = NULL;
2398 ElfW(Word) *hash_table = NULL;
2399 ElfW(Ehdr) *header = NULL;
2400 const char* strtab = NULL;
2401 for (obj = log_profiler.binary_objects; obj; obj = obj->next) {
2402 if (obj->addr == a)
2403 return 0;
2405 filename = info->dlpi_name;
2406 if (!filename)
2407 return 0;
2408 if (!info->dlpi_addr && !filename [0]) {
2409 int l = readlink ("/proc/self/exe", buf, sizeof (buf) - 1);
2410 if (l > 0) {
2411 buf [l] = 0;
2412 filename = buf;
2415 obj = g_calloc (sizeof (BinaryObject), 1);
2416 obj->addr = (void*)info->dlpi_addr;
2417 obj->name = pstrdup (filename);
2418 obj->next = log_profiler.binary_objects;
2419 log_profiler.binary_objects = obj;
2420 a = NULL;
2421 for (i = 0; i < info->dlpi_phnum; ++i) {
2422 if (info->dlpi_phdr[i].p_type == PT_LOAD && !header) {
2423 header = (ElfW(Ehdr)*)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2424 if (header->e_ident [EI_MAG0] != ELFMAG0 ||
2425 header->e_ident [EI_MAG1] != ELFMAG1 ||
2426 header->e_ident [EI_MAG2] != ELFMAG2 ||
2427 header->e_ident [EI_MAG3] != ELFMAG3 ) {
2428 header = NULL;
2430 dump_ubin (filename, info->dlpi_addr + info->dlpi_phdr[i].p_vaddr, info->dlpi_phdr[i].p_offset, info->dlpi_phdr[i].p_memsz);
2431 } else if (info->dlpi_phdr[i].p_type == PT_DYNAMIC) {
2432 dyn = (ElfW(Dyn) *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
2435 if (read_elf_symbols (filename, (void*)info->dlpi_addr))
2436 return 0;
2437 if (!info->dlpi_name || !info->dlpi_name[0])
2438 return 0;
2439 if (!dyn)
2440 return 0;
2441 for (i = 0; dyn [i].d_tag != DT_NULL; ++i) {
2442 if (dyn [i].d_tag == DT_SYMTAB) {
2443 symtab = (ElfW(Sym) *)(a + dyn [i].d_un.d_ptr);
2444 } else if (dyn [i].d_tag == DT_HASH) {
2445 hash_table = (ElfW(Word) *)(a + dyn [i].d_un.d_ptr);
2446 } else if (dyn [i].d_tag == DT_STRTAB) {
2447 strtab = (const char*)(a + dyn [i].d_un.d_ptr);
2450 if (!hash_table)
2451 return 0;
2452 num_sym = hash_table [1];
2453 dump_elf_symbols (symtab, num_sym, strtab, (void*)info->dlpi_addr);
2454 return 0;
2457 static int
2458 load_binaries (void)
2460 dl_iterate_phdr (elf_dl_callback, NULL);
2461 return 1;
2463 #else
2464 static int
2465 load_binaries (void)
2467 return 0;
2469 #endif
2471 static const char*
2472 symbol_for (uintptr_t code)
2474 #ifdef HAVE_DLADDR
2475 void *ip = (void*)code;
2476 Dl_info di;
2477 if (dladdr (ip, &di)) {
2478 if (di.dli_sname)
2479 return di.dli_sname;
2480 } else {
2481 /* char **names;
2482 names = backtrace_symbols (&ip, 1);
2483 if (names) {
2484 const char* p = names [0];
2485 g_free (names);
2486 return p;
2490 #endif
2491 return NULL;
2494 static void
2495 dump_unmanaged_coderefs (void)
2497 int i;
2498 const char* last_symbol;
2499 uintptr_t addr, page_end;
2501 if (load_binaries ())
2502 return;
2503 for (i = 0; i < size_code_pages; ++i) {
2504 const char* sym;
2505 if (!code_pages [i] || code_pages [i] & 1)
2506 continue;
2507 last_symbol = NULL;
2508 addr = CPAGE_ADDR (code_pages [i]);
2509 page_end = addr + CPAGE_SIZE;
2510 code_pages [i] |= 1;
2511 /* we dump the symbols for the whole page */
2512 for (; addr < page_end; addr += 16) {
2513 sym = symbol_for (addr);
2514 if (sym && sym == last_symbol)
2515 continue;
2516 last_symbol = sym;
2517 if (!sym)
2518 continue;
2519 dump_usym (sym, addr, 0); /* let's not guess the size */
2524 static void
2525 counters_add_agent (MonoCounter *counter)
2527 if (InterlockedRead (&log_profiler.in_shutdown))
2528 return;
2530 MonoCounterAgent *agent, *item;
2532 mono_os_mutex_lock (&log_profiler.counters_mutex);
2534 for (agent = log_profiler.counters; agent; agent = agent->next) {
2535 if (agent->counter == counter) {
2536 agent->value_size = 0;
2537 if (agent->value) {
2538 g_free (agent->value);
2539 agent->value = NULL;
2541 goto done;
2545 agent = (MonoCounterAgent *) g_malloc (sizeof (MonoCounterAgent));
2546 agent->counter = counter;
2547 agent->value = NULL;
2548 agent->value_size = 0;
2549 agent->index = log_profiler.counters_index++;
2550 agent->emitted = FALSE;
2551 agent->next = NULL;
2553 if (!log_profiler.counters) {
2554 log_profiler.counters = agent;
2555 } else {
2556 item = log_profiler.counters;
2557 while (item->next)
2558 item = item->next;
2559 item->next = agent;
2562 done:
2563 mono_os_mutex_unlock (&log_profiler.counters_mutex);
2566 static mono_bool
2567 counters_init_foreach_callback (MonoCounter *counter, gpointer data)
2569 counters_add_agent (counter);
2570 return TRUE;
2573 static void
2574 counters_init (void)
2576 mono_os_mutex_init (&log_profiler.counters_mutex);
2578 log_profiler.counters_index = 1;
2580 mono_counters_on_register (&counters_add_agent);
2581 mono_counters_foreach (counters_init_foreach_callback, NULL);
2584 static void
2585 counters_emit (void)
2587 MonoCounterAgent *agent;
2588 int len = 0;
2589 int size =
2590 EVENT_SIZE /* event */ +
2591 LEB128_SIZE /* len */
2594 mono_os_mutex_lock (&log_profiler.counters_mutex);
2596 for (agent = log_profiler.counters; agent; agent = agent->next) {
2597 if (agent->emitted)
2598 continue;
2600 size +=
2601 LEB128_SIZE /* section */ +
2602 strlen (mono_counter_get_name (agent->counter)) + 1 /* name */ +
2603 BYTE_SIZE /* type */ +
2604 BYTE_SIZE /* unit */ +
2605 BYTE_SIZE /* variance */ +
2606 LEB128_SIZE /* index */
2609 len++;
2612 if (!len)
2613 goto done;
2615 ENTER_LOG (&counter_descriptors_ctr, logbuffer, size);
2617 emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
2618 emit_value (logbuffer, len);
2620 for (agent = log_profiler.counters; agent; agent = agent->next) {
2621 const char *name;
2623 if (agent->emitted)
2624 continue;
2626 name = mono_counter_get_name (agent->counter);
2627 emit_value (logbuffer, mono_counter_get_section (agent->counter));
2628 emit_string (logbuffer, name, strlen (name) + 1);
2629 emit_byte (logbuffer, mono_counter_get_type (agent->counter));
2630 emit_byte (logbuffer, mono_counter_get_unit (agent->counter));
2631 emit_byte (logbuffer, mono_counter_get_variance (agent->counter));
2632 emit_value (logbuffer, agent->index);
2634 agent->emitted = TRUE;
2637 EXIT_LOG;
2639 done:
2640 mono_os_mutex_unlock (&log_profiler.counters_mutex);
2643 static void
2644 counters_sample (uint64_t timestamp)
2646 MonoCounterAgent *agent;
2647 MonoCounter *counter;
2648 int type;
2649 int buffer_size;
2650 void *buffer;
2651 int size;
2653 counters_emit ();
2655 buffer_size = 8;
2656 buffer = g_calloc (1, buffer_size);
2658 mono_os_mutex_lock (&log_profiler.counters_mutex);
2660 size =
2661 EVENT_SIZE /* event */
2664 for (agent = log_profiler.counters; agent; agent = agent->next) {
2665 size +=
2666 LEB128_SIZE /* index */ +
2667 BYTE_SIZE /* type */ +
2668 mono_counter_get_size (agent->counter) /* value */
2672 size +=
2673 LEB128_SIZE /* stop marker */
2676 ENTER_LOG (&counter_samples_ctr, logbuffer, size);
2678 emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
2680 for (agent = log_profiler.counters; agent; agent = agent->next) {
2681 size_t size;
2683 counter = agent->counter;
2685 size = mono_counter_get_size (counter);
2687 if (size > buffer_size) {
2688 buffer_size = size;
2689 buffer = g_realloc (buffer, buffer_size);
2692 memset (buffer, 0, buffer_size);
2694 g_assert (mono_counters_sample (counter, buffer, size));
2696 type = mono_counter_get_type (counter);
2698 if (!agent->value) {
2699 agent->value = g_calloc (1, size);
2700 agent->value_size = size;
2701 } else {
2702 if (type == MONO_COUNTER_STRING) {
2703 if (strcmp (agent->value, buffer) == 0)
2704 continue;
2705 } else {
2706 if (agent->value_size == size && memcmp (agent->value, buffer, size) == 0)
2707 continue;
2711 emit_uvalue (logbuffer, agent->index);
2712 emit_byte (logbuffer, type);
2713 switch (type) {
2714 case MONO_COUNTER_INT:
2715 #if SIZEOF_VOID_P == 4
2716 case MONO_COUNTER_WORD:
2717 #endif
2718 emit_svalue (logbuffer, *(int*)buffer - *(int*)agent->value);
2719 break;
2720 case MONO_COUNTER_UINT:
2721 emit_uvalue (logbuffer, *(guint*)buffer - *(guint*)agent->value);
2722 break;
2723 case MONO_COUNTER_TIME_INTERVAL:
2724 case MONO_COUNTER_LONG:
2725 #if SIZEOF_VOID_P == 8
2726 case MONO_COUNTER_WORD:
2727 #endif
2728 emit_svalue (logbuffer, *(gint64*)buffer - *(gint64*)agent->value);
2729 break;
2730 case MONO_COUNTER_ULONG:
2731 emit_uvalue (logbuffer, *(guint64*)buffer - *(guint64*)agent->value);
2732 break;
2733 case MONO_COUNTER_DOUBLE:
2734 emit_double (logbuffer, *(double*)buffer);
2735 break;
2736 case MONO_COUNTER_STRING:
2737 if (size == 0) {
2738 emit_byte (logbuffer, 0);
2739 } else {
2740 emit_byte (logbuffer, 1);
2741 emit_string (logbuffer, (char*)buffer, size);
2743 break;
2744 default:
2745 g_assert_not_reached ();
2748 if (type == MONO_COUNTER_STRING && size > agent->value_size) {
2749 agent->value = g_realloc (agent->value, size);
2750 agent->value_size = size;
2753 if (size > 0)
2754 memcpy (agent->value, buffer, size);
2756 g_free (buffer);
2758 emit_value (logbuffer, 0);
2760 EXIT_LOG;
2762 mono_os_mutex_unlock (&log_profiler.counters_mutex);
2765 static void
2766 perfcounters_emit (void)
2768 PerfCounterAgent *pcagent;
2769 int len = 0;
2770 int size =
2771 EVENT_SIZE /* event */ +
2772 LEB128_SIZE /* len */
2775 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2776 if (pcagent->emitted)
2777 continue;
2779 size +=
2780 LEB128_SIZE /* section */ +
2781 strlen (pcagent->category_name) + 1 /* category name */ +
2782 strlen (pcagent->name) + 1 /* name */ +
2783 BYTE_SIZE /* type */ +
2784 BYTE_SIZE /* unit */ +
2785 BYTE_SIZE /* variance */ +
2786 LEB128_SIZE /* index */
2789 len++;
2792 if (!len)
2793 return;
2795 ENTER_LOG (&perfcounter_descriptors_ctr, logbuffer, size);
2797 emit_event (logbuffer, TYPE_SAMPLE_COUNTERS_DESC | TYPE_SAMPLE);
2798 emit_value (logbuffer, len);
2800 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2801 if (pcagent->emitted)
2802 continue;
2804 emit_value (logbuffer, MONO_COUNTER_PERFCOUNTERS);
2805 emit_string (logbuffer, pcagent->category_name, strlen (pcagent->category_name) + 1);
2806 emit_string (logbuffer, pcagent->name, strlen (pcagent->name) + 1);
2807 emit_byte (logbuffer, MONO_COUNTER_LONG);
2808 emit_byte (logbuffer, MONO_COUNTER_RAW);
2809 emit_byte (logbuffer, MONO_COUNTER_VARIABLE);
2810 emit_value (logbuffer, pcagent->index);
2812 pcagent->emitted = TRUE;
2815 EXIT_LOG;
2818 static gboolean
2819 perfcounters_foreach (char *category_name, char *name, unsigned char type, gint64 value, gpointer user_data)
2821 PerfCounterAgent *pcagent;
2823 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2824 if (strcmp (pcagent->category_name, category_name) != 0 || strcmp (pcagent->name, name) != 0)
2825 continue;
2826 if (pcagent->value == value)
2827 return TRUE;
2829 pcagent->value = value;
2830 pcagent->updated = TRUE;
2831 pcagent->deleted = FALSE;
2832 return TRUE;
2835 pcagent = g_new0 (PerfCounterAgent, 1);
2836 pcagent->next = log_profiler.perfcounters;
2837 pcagent->index = log_profiler.counters_index++;
2838 pcagent->category_name = g_strdup (category_name);
2839 pcagent->name = g_strdup (name);
2840 pcagent->value = value;
2841 pcagent->emitted = FALSE;
2842 pcagent->updated = TRUE;
2843 pcagent->deleted = FALSE;
2845 log_profiler.perfcounters = pcagent;
2847 return TRUE;
2850 static void
2851 perfcounters_sample (uint64_t timestamp)
2853 PerfCounterAgent *pcagent;
2854 int len = 0;
2855 int size;
2857 mono_os_mutex_lock (&log_profiler.counters_mutex);
2859 /* mark all perfcounters as deleted, foreach will unmark them as necessary */
2860 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next)
2861 pcagent->deleted = TRUE;
2863 mono_perfcounter_foreach (perfcounters_foreach, NULL);
2865 perfcounters_emit ();
2867 size =
2868 EVENT_SIZE /* event */
2871 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2872 if (pcagent->deleted || !pcagent->updated)
2873 continue;
2875 size +=
2876 LEB128_SIZE /* index */ +
2877 BYTE_SIZE /* type */ +
2878 LEB128_SIZE /* value */
2881 len++;
2884 if (!len)
2885 goto done;
2887 size +=
2888 LEB128_SIZE /* stop marker */
2891 ENTER_LOG (&perfcounter_samples_ctr, logbuffer, size);
2893 emit_event_time (logbuffer, TYPE_SAMPLE_COUNTERS | TYPE_SAMPLE, timestamp);
2895 for (pcagent = log_profiler.perfcounters; pcagent; pcagent = pcagent->next) {
2896 if (pcagent->deleted || !pcagent->updated)
2897 continue;
2898 emit_uvalue (logbuffer, pcagent->index);
2899 emit_byte (logbuffer, MONO_COUNTER_LONG);
2900 emit_svalue (logbuffer, pcagent->value);
2902 pcagent->updated = FALSE;
2905 emit_value (logbuffer, 0);
2907 EXIT_LOG;
2909 done:
2910 mono_os_mutex_unlock (&log_profiler.counters_mutex);
2913 static void
2914 counters_and_perfcounters_sample (void)
2916 uint64_t now = current_time ();
2918 counters_sample (now);
2919 perfcounters_sample (now);
2922 typedef struct {
2923 MonoLockFreeQueueNode node;
2924 MonoMethod *method;
2925 } MethodNode;
2927 typedef struct {
2928 int offset;
2929 int counter;
2930 char *filename;
2931 int line;
2932 int column;
2933 } CoverageEntry;
2935 static void
2936 free_coverage_entry (gpointer data, gpointer userdata)
2938 CoverageEntry *entry = (CoverageEntry *)data;
2939 g_free (entry->filename);
2940 g_free (entry);
2943 static void
2944 obtain_coverage_for_method (MonoProfiler *prof, const MonoProfilerCoverageData *entry)
2946 int offset = entry->il_offset - log_profiler.coverage_previous_offset;
2947 CoverageEntry *e = g_new (CoverageEntry, 1);
2949 log_profiler.coverage_previous_offset = entry->il_offset;
2951 e->offset = offset;
2952 e->counter = entry->counter;
2953 e->filename = g_strdup(entry->file_name ? entry->file_name : "");
2954 e->line = entry->line;
2955 e->column = entry->column;
2957 g_ptr_array_add (log_profiler.coverage_data, e);
2960 static char *
2961 parse_generic_type_names(char *name)
2963 char *new_name, *ret;
2964 int within_generic_declaration = 0, generic_members = 1;
2966 if (name == NULL || *name == '\0')
2967 return g_strdup ("");
2969 if (!(ret = new_name = (char *) g_calloc (strlen (name) * 4 + 1, sizeof (char))))
2970 return NULL;
2972 do {
2973 switch (*name) {
2974 case '<':
2975 within_generic_declaration = 1;
2976 break;
2978 case '>':
2979 within_generic_declaration = 0;
2981 if (*(name - 1) != '<') {
2982 *new_name++ = '`';
2983 *new_name++ = '0' + generic_members;
2984 } else {
2985 memcpy (new_name, "&lt;&gt;", 8);
2986 new_name += 8;
2989 generic_members = 0;
2990 break;
2992 case ',':
2993 generic_members++;
2994 break;
2996 default:
2997 if (!within_generic_declaration)
2998 *new_name++ = *name;
3000 break;
3002 } while (*name++);
3004 return ret;
3007 static void
3008 build_method_buffer (gpointer key, gpointer value, gpointer userdata)
3010 MonoMethod *method = (MonoMethod *)value;
3011 MonoClass *klass;
3012 MonoImage *image;
3013 char *class_name;
3014 const char *image_name, *method_name, *sig, *first_filename;
3015 guint i;
3017 log_profiler.coverage_previous_offset = 0;
3018 log_profiler.coverage_data = g_ptr_array_new ();
3020 mono_profiler_get_coverage_data (log_profiler.handle, method, obtain_coverage_for_method);
3022 klass = mono_method_get_class (method);
3023 image = mono_class_get_image (klass);
3024 image_name = mono_image_get_name (image);
3026 sig = mono_signature_get_desc (mono_method_signature (method), TRUE);
3027 class_name = parse_generic_type_names (mono_type_get_name (mono_class_get_type (klass)));
3028 method_name = mono_method_get_name (method);
3030 if (log_profiler.coverage_data->len != 0) {
3031 CoverageEntry *entry = (CoverageEntry *)log_profiler.coverage_data->pdata[0];
3032 first_filename = entry->filename ? entry->filename : "";
3033 } else
3034 first_filename = "";
3036 image_name = image_name ? image_name : "";
3037 sig = sig ? sig : "";
3038 method_name = method_name ? method_name : "";
3040 ENTER_LOG (&coverage_methods_ctr, logbuffer,
3041 EVENT_SIZE /* event */ +
3042 strlen (image_name) + 1 /* image name */ +
3043 strlen (class_name) + 1 /* class name */ +
3044 strlen (method_name) + 1 /* method name */ +
3045 strlen (sig) + 1 /* signature */ +
3046 strlen (first_filename) + 1 /* first file name */ +
3047 LEB128_SIZE /* token */ +
3048 LEB128_SIZE /* method id */ +
3049 LEB128_SIZE /* entries */
3052 emit_event (logbuffer, TYPE_COVERAGE_METHOD | TYPE_COVERAGE);
3053 emit_string (logbuffer, image_name, strlen (image_name) + 1);
3054 emit_string (logbuffer, class_name, strlen (class_name) + 1);
3055 emit_string (logbuffer, method_name, strlen (method_name) + 1);
3056 emit_string (logbuffer, sig, strlen (sig) + 1);
3057 emit_string (logbuffer, first_filename, strlen (first_filename) + 1);
3059 emit_uvalue (logbuffer, mono_method_get_token (method));
3060 emit_uvalue (logbuffer, log_profiler.coverage_method_id);
3061 emit_value (logbuffer, log_profiler.coverage_data->len);
3063 EXIT_LOG;
3065 for (i = 0; i < log_profiler.coverage_data->len; i++) {
3066 CoverageEntry *entry = (CoverageEntry *)log_profiler.coverage_data->pdata[i];
3068 ENTER_LOG (&coverage_statements_ctr, logbuffer,
3069 EVENT_SIZE /* event */ +
3070 LEB128_SIZE /* method id */ +
3071 LEB128_SIZE /* offset */ +
3072 LEB128_SIZE /* counter */ +
3073 LEB128_SIZE /* line */ +
3074 LEB128_SIZE /* column */
3077 emit_event (logbuffer, TYPE_COVERAGE_STATEMENT | TYPE_COVERAGE);
3078 emit_uvalue (logbuffer, log_profiler.coverage_method_id);
3079 emit_uvalue (logbuffer, entry->offset);
3080 emit_uvalue (logbuffer, entry->counter);
3081 emit_uvalue (logbuffer, entry->line);
3082 emit_uvalue (logbuffer, entry->column);
3084 EXIT_LOG;
3087 log_profiler.coverage_method_id++;
3089 g_free (class_name);
3091 g_ptr_array_foreach (log_profiler.coverage_data, free_coverage_entry, NULL);
3092 g_ptr_array_free (log_profiler.coverage_data, TRUE);
3095 /* This empties the queue */
3096 static guint
3097 count_queue (MonoLockFreeQueue *queue)
3099 MonoLockFreeQueueNode *node;
3100 guint count = 0;
3102 while ((node = mono_lock_free_queue_dequeue (queue))) {
3103 count++;
3104 mono_thread_hazardous_try_free (node, g_free);
3107 return count;
3110 static void
3111 build_class_buffer (gpointer key, gpointer value, gpointer userdata)
3113 MonoClass *klass = (MonoClass *)key;
3114 MonoLockFreeQueue *class_methods = (MonoLockFreeQueue *)value;
3115 MonoImage *image;
3116 char *class_name;
3117 const char *assembly_name;
3118 int number_of_methods, partially_covered;
3119 guint fully_covered;
3121 image = mono_class_get_image (klass);
3122 assembly_name = mono_image_get_name (image);
3123 class_name = mono_type_get_name (mono_class_get_type (klass));
3125 assembly_name = assembly_name ? assembly_name : "";
3126 number_of_methods = mono_class_num_methods (klass);
3127 fully_covered = count_queue (class_methods);
3128 /* We don't handle partial covered yet */
3129 partially_covered = 0;
3131 ENTER_LOG (&coverage_classes_ctr, logbuffer,
3132 EVENT_SIZE /* event */ +
3133 strlen (assembly_name) + 1 /* assembly name */ +
3134 strlen (class_name) + 1 /* class name */ +
3135 LEB128_SIZE /* no. methods */ +
3136 LEB128_SIZE /* fully covered */ +
3137 LEB128_SIZE /* partially covered */
3140 emit_event (logbuffer, TYPE_COVERAGE_CLASS | TYPE_COVERAGE);
3141 emit_string (logbuffer, assembly_name, strlen (assembly_name) + 1);
3142 emit_string (logbuffer, class_name, strlen (class_name) + 1);
3143 emit_uvalue (logbuffer, number_of_methods);
3144 emit_uvalue (logbuffer, fully_covered);
3145 emit_uvalue (logbuffer, partially_covered);
3147 EXIT_LOG;
3149 g_free (class_name);
3152 static void
3153 get_coverage_for_image (MonoImage *image, int *number_of_methods, guint *fully_covered, int *partially_covered)
3155 MonoLockFreeQueue *image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (log_profiler.coverage_image_to_methods, image);
3157 *number_of_methods = mono_image_get_table_rows (image, MONO_TABLE_METHOD);
3158 if (image_methods)
3159 *fully_covered = count_queue (image_methods);
3160 else
3161 *fully_covered = 0;
3163 // FIXME: We don't handle partially covered yet.
3164 *partially_covered = 0;
3167 static void
3168 build_assembly_buffer (gpointer key, gpointer value, gpointer userdata)
3170 MonoAssembly *assembly = (MonoAssembly *)value;
3171 MonoImage *image = mono_assembly_get_image (assembly);
3172 const char *name, *guid, *filename;
3173 int number_of_methods = 0, partially_covered = 0;
3174 guint fully_covered = 0;
3176 name = mono_image_get_name (image);
3177 guid = mono_image_get_guid (image);
3178 filename = mono_image_get_filename (image);
3180 name = name ? name : "";
3181 guid = guid ? guid : "";
3182 filename = filename ? filename : "";
3184 get_coverage_for_image (image, &number_of_methods, &fully_covered, &partially_covered);
3186 ENTER_LOG (&coverage_assemblies_ctr, logbuffer,
3187 EVENT_SIZE /* event */ +
3188 strlen (name) + 1 /* name */ +
3189 strlen (guid) + 1 /* guid */ +
3190 strlen (filename) + 1 /* file name */ +
3191 LEB128_SIZE /* no. methods */ +
3192 LEB128_SIZE /* fully covered */ +
3193 LEB128_SIZE /* partially covered */
3196 emit_event (logbuffer, TYPE_COVERAGE_ASSEMBLY | TYPE_COVERAGE);
3197 emit_string (logbuffer, name, strlen (name) + 1);
3198 emit_string (logbuffer, guid, strlen (guid) + 1);
3199 emit_string (logbuffer, filename, strlen (filename) + 1);
3200 emit_uvalue (logbuffer, number_of_methods);
3201 emit_uvalue (logbuffer, fully_covered);
3202 emit_uvalue (logbuffer, partially_covered);
3204 EXIT_LOG;
3207 static void
3208 dump_coverage (void)
3210 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3211 mono_conc_hashtable_foreach (log_profiler.coverage_assemblies, build_assembly_buffer, NULL);
3212 mono_conc_hashtable_foreach (log_profiler.coverage_classes, build_class_buffer, NULL);
3213 mono_conc_hashtable_foreach (log_profiler.coverage_methods, build_method_buffer, NULL);
3214 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3217 static MonoLockFreeQueueNode *
3218 create_method_node (MonoMethod *method)
3220 MethodNode *node = (MethodNode *) g_malloc (sizeof (MethodNode));
3221 mono_lock_free_queue_node_init ((MonoLockFreeQueueNode *) node, FALSE);
3222 node->method = method;
3224 return (MonoLockFreeQueueNode *) node;
3227 static gboolean
3228 coverage_filter (MonoProfiler *prof, MonoMethod *method)
3230 MonoError error;
3231 MonoClass *klass;
3232 MonoImage *image;
3233 MonoAssembly *assembly;
3234 MonoMethodHeader *header;
3235 guint32 iflags, flags, code_size;
3236 char *fqn, *classname;
3237 gboolean has_positive, found;
3238 MonoLockFreeQueue *image_methods, *class_methods;
3239 MonoLockFreeQueueNode *node;
3241 flags = mono_method_get_flags (method, &iflags);
3242 if ((iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3243 (flags & METHOD_ATTRIBUTE_PINVOKE_IMPL))
3244 return FALSE;
3246 // Don't need to do anything else if we're already tracking this method
3247 if (mono_conc_hashtable_lookup (log_profiler.coverage_methods, method))
3248 return TRUE;
3250 klass = mono_method_get_class (method);
3251 image = mono_class_get_image (klass);
3253 // Don't handle coverage for the core assemblies
3254 if (mono_conc_hashtable_lookup (log_profiler.coverage_suppressed_assemblies, (gpointer) mono_image_get_name (image)) != NULL)
3255 return FALSE;
3257 if (prof->coverage_filters) {
3258 /* Check already filtered classes first */
3259 if (mono_conc_hashtable_lookup (log_profiler.coverage_filtered_classes, klass))
3260 return FALSE;
3262 classname = mono_type_get_name (mono_class_get_type (klass));
3264 fqn = g_strdup_printf ("[%s]%s", mono_image_get_name (image), classname);
3266 // Check positive filters first
3267 has_positive = FALSE;
3268 found = FALSE;
3269 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3270 char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3272 if (filter [0] == '+') {
3273 filter = &filter [1];
3275 if (strstr (fqn, filter) != NULL)
3276 found = TRUE;
3278 has_positive = TRUE;
3282 if (has_positive && !found) {
3283 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3284 mono_conc_hashtable_insert (log_profiler.coverage_filtered_classes, klass, klass);
3285 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3286 g_free (fqn);
3287 g_free (classname);
3289 return FALSE;
3292 for (guint i = 0; i < prof->coverage_filters->len; ++i) {
3293 // FIXME: Is substring search sufficient?
3294 char *filter = (char *)g_ptr_array_index (prof->coverage_filters, i);
3295 if (filter [0] == '+')
3296 continue;
3298 // Skip '-'
3299 filter = &filter [1];
3301 if (strstr (fqn, filter) != NULL) {
3302 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3303 mono_conc_hashtable_insert (log_profiler.coverage_filtered_classes, klass, klass);
3304 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3305 g_free (fqn);
3306 g_free (classname);
3308 return FALSE;
3312 g_free (fqn);
3313 g_free (classname);
3316 header = mono_method_get_header_checked (method, &error);
3317 mono_error_cleanup (&error);
3319 mono_method_header_get_code (header, &code_size, NULL);
3321 assembly = mono_image_get_assembly (image);
3323 // Need to keep the assemblies around for as long as they are kept in the hashtable
3324 // Nunit, for example, has a habit of unloading them before the coverage statistics are
3325 // generated causing a crash. See https://bugzilla.xamarin.com/show_bug.cgi?id=39325
3326 mono_assembly_addref (assembly);
3328 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3329 mono_conc_hashtable_insert (log_profiler.coverage_methods, method, method);
3330 mono_conc_hashtable_insert (log_profiler.coverage_assemblies, assembly, assembly);
3331 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3333 image_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (log_profiler.coverage_image_to_methods, image);
3335 if (image_methods == NULL) {
3336 image_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3337 mono_lock_free_queue_init (image_methods);
3338 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3339 mono_conc_hashtable_insert (log_profiler.coverage_image_to_methods, image, image_methods);
3340 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3343 node = create_method_node (method);
3344 mono_lock_free_queue_enqueue (image_methods, node);
3346 class_methods = (MonoLockFreeQueue *)mono_conc_hashtable_lookup (log_profiler.coverage_classes, klass);
3348 if (class_methods == NULL) {
3349 class_methods = (MonoLockFreeQueue *) g_malloc (sizeof (MonoLockFreeQueue));
3350 mono_lock_free_queue_init (class_methods);
3351 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3352 mono_conc_hashtable_insert (log_profiler.coverage_classes, klass, class_methods);
3353 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3356 node = create_method_node (method);
3357 mono_lock_free_queue_enqueue (class_methods, node);
3359 return TRUE;
3362 #define LINE_BUFFER_SIZE 4096
3363 /* Max file limit of 128KB */
3364 #define MAX_FILE_SIZE 128 * 1024
3365 static char *
3366 get_file_content (FILE *stream)
3368 char *buffer;
3369 ssize_t bytes_read;
3370 long filesize;
3371 int res, offset = 0;
3373 res = fseek (stream, 0, SEEK_END);
3374 if (res < 0)
3375 return NULL;
3377 filesize = ftell (stream);
3378 if (filesize < 0)
3379 return NULL;
3381 res = fseek (stream, 0, SEEK_SET);
3382 if (res < 0)
3383 return NULL;
3385 if (filesize > MAX_FILE_SIZE)
3386 return NULL;
3388 buffer = (char *) g_malloc ((filesize + 1) * sizeof (char));
3389 while ((bytes_read = fread (buffer + offset, 1, LINE_BUFFER_SIZE, stream)) > 0)
3390 offset += bytes_read;
3392 /* NULL terminate our buffer */
3393 buffer[filesize] = '\0';
3394 return buffer;
3397 static char *
3398 get_next_line (char *contents, char **next_start)
3400 char *p = contents;
3402 if (p == NULL || *p == '\0') {
3403 *next_start = NULL;
3404 return NULL;
3407 while (*p != '\n' && *p != '\0')
3408 p++;
3410 if (*p == '\n') {
3411 *p = '\0';
3412 *next_start = p + 1;
3413 } else
3414 *next_start = NULL;
3416 return contents;
3419 static void
3420 init_suppressed_assemblies (void)
3422 char *content;
3423 char *line;
3424 FILE *sa_file;
3426 log_profiler.coverage_suppressed_assemblies = mono_conc_hashtable_new (g_str_hash, g_str_equal);
3427 sa_file = fopen (SUPPRESSION_DIR "/mono-profiler-log.suppression", "r");
3428 if (sa_file == NULL)
3429 return;
3431 /* Don't need to free @content as it is referred to by the lines stored in @suppressed_assemblies */
3432 content = get_file_content (sa_file);
3433 if (content == NULL)
3434 g_error ("mono-profiler-log.suppression is greater than 128kb - aborting.");
3436 while ((line = get_next_line (content, &content))) {
3437 line = g_strchomp (g_strchug (line));
3438 /* No locking needed as we're doing initialization */
3439 mono_conc_hashtable_insert (log_profiler.coverage_suppressed_assemblies, line, line);
3442 fclose (sa_file);
3445 static void
3446 parse_cov_filter_file (GPtrArray *filters, const char *file)
3448 FILE *filter_file = fopen (file, "r");
3450 if (filter_file == NULL) {
3451 mono_profiler_printf_err ("Could not open coverage filter file '%s'.", file);
3452 return;
3455 /* Don't need to free content as it is referred to by the lines stored in @filters */
3456 char *content = get_file_content (filter_file);
3458 if (content == NULL)
3459 mono_profiler_printf_err ("Coverage filter file '%s' is larger than 128kb - ignoring.", file);
3461 char *line;
3463 while ((line = get_next_line (content, &content)))
3464 g_ptr_array_add (filters, g_strchug (g_strchomp (line)));
3466 fclose (filter_file);
3469 static void
3470 coverage_init (void)
3472 mono_os_mutex_init (&log_profiler.coverage_mutex);
3473 log_profiler.coverage_methods = mono_conc_hashtable_new (NULL, NULL);
3474 log_profiler.coverage_assemblies = mono_conc_hashtable_new (NULL, NULL);
3475 log_profiler.coverage_classes = mono_conc_hashtable_new (NULL, NULL);
3476 log_profiler.coverage_filtered_classes = mono_conc_hashtable_new (NULL, NULL);
3477 log_profiler.coverage_image_to_methods = mono_conc_hashtable_new (NULL, NULL);
3478 init_suppressed_assemblies ();
3481 static void
3482 unref_coverage_assemblies (gpointer key, gpointer value, gpointer userdata)
3484 MonoAssembly *assembly = (MonoAssembly *)value;
3485 mono_assembly_close (assembly);
3488 static void
3489 free_sample_hit (gpointer p)
3491 mono_lock_free_free (p, SAMPLE_BLOCK_SIZE);
3494 static void
3495 cleanup_reusable_samples (void)
3497 SampleHit *sample;
3499 while ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&log_profiler.sample_reuse_queue)))
3500 mono_thread_hazardous_try_free (sample, free_sample_hit);
3503 static void
3504 log_early_shutdown (MonoProfiler *prof)
3506 if (log_config.hs_on_shutdown) {
3507 InterlockedWrite (&log_profiler.heapshot_requested, 1);
3508 mono_gc_collect (mono_gc_max_generation ());
3512 static void
3513 log_shutdown (MonoProfiler *prof)
3515 InterlockedWrite (&log_profiler.in_shutdown, 1);
3517 if (ENABLED (PROFLOG_COUNTER_EVENTS))
3518 counters_and_perfcounters_sample ();
3520 if (log_config.collect_coverage)
3521 dump_coverage ();
3523 char c = 1;
3525 if (write (prof->pipes [1], &c, 1) != 1) {
3526 mono_profiler_printf_err ("Could not write to log profiler pipe: %s", g_strerror (errno));
3527 exit (1);
3530 mono_native_thread_join (prof->helper_thread);
3532 mono_os_mutex_destroy (&log_profiler.counters_mutex);
3534 MonoCounterAgent *mc_next;
3536 for (MonoCounterAgent *cur = log_profiler.counters; cur; cur = mc_next) {
3537 mc_next = cur->next;
3538 g_free (cur);
3541 PerfCounterAgent *pc_next;
3543 for (PerfCounterAgent *cur = log_profiler.perfcounters; cur; cur = pc_next) {
3544 pc_next = cur->next;
3545 g_free (cur);
3549 * Ensure that we empty the LLS completely, even if some nodes are
3550 * not immediately removed upon calling mono_lls_remove (), by
3551 * iterating until the head is NULL.
3553 while (log_profiler.profiler_thread_list.head) {
3554 MONO_LLS_FOREACH_SAFE (&log_profiler.profiler_thread_list, MonoProfilerThread, thread) {
3555 g_assert (thread->attached && "Why is a thread in the LLS not attached?");
3557 remove_thread (thread);
3558 } MONO_LLS_FOREACH_SAFE_END
3562 * Ensure that all threads have been freed, so that we don't miss any
3563 * buffers when we shut down the writer thread below.
3565 mono_thread_hazardous_try_free_all ();
3567 InterlockedWrite (&prof->run_dumper_thread, 0);
3568 mono_os_sem_post (&prof->dumper_queue_sem);
3569 mono_native_thread_join (prof->dumper_thread);
3570 mono_os_sem_destroy (&prof->dumper_queue_sem);
3572 InterlockedWrite (&prof->run_writer_thread, 0);
3573 mono_os_sem_post (&prof->writer_queue_sem);
3574 mono_native_thread_join (prof->writer_thread);
3575 mono_os_sem_destroy (&prof->writer_queue_sem);
3578 * Free all writer queue entries, and ensure that all sample hits will be
3579 * added to the sample reuse queue.
3581 mono_thread_hazardous_try_free_all ();
3583 cleanup_reusable_samples ();
3586 * Finally, make sure that all sample hits are freed. This should cover all
3587 * hazardous data from the profiler. We can now be sure that the runtime
3588 * won't later invoke free functions in the profiler library after it has
3589 * been unloaded.
3591 mono_thread_hazardous_try_free_all ();
3593 gint32 state = InterlockedRead (&log_profiler.buffer_lock_state);
3595 g_assert (!(state & 0xFFFF) && "Why is the reader count still non-zero?");
3596 g_assert (!(state >> 16) && "Why is the exclusive lock still held?");
3598 #if defined (HAVE_SYS_ZLIB)
3599 if (prof->gzfile)
3600 gzclose (prof->gzfile);
3601 #endif
3602 if (prof->pipe_output)
3603 pclose (prof->file);
3604 else
3605 fclose (prof->file);
3607 mono_conc_hashtable_destroy (prof->method_table);
3608 mono_os_mutex_destroy (&prof->method_table_mutex);
3610 if (log_config.collect_coverage) {
3611 mono_os_mutex_lock (&log_profiler.coverage_mutex);
3612 mono_conc_hashtable_foreach (log_profiler.coverage_assemblies, unref_coverage_assemblies, NULL);
3613 mono_os_mutex_unlock (&log_profiler.coverage_mutex);
3615 mono_conc_hashtable_destroy (log_profiler.coverage_methods);
3616 mono_conc_hashtable_destroy (log_profiler.coverage_assemblies);
3617 mono_conc_hashtable_destroy (log_profiler.coverage_classes);
3618 mono_conc_hashtable_destroy (log_profiler.coverage_filtered_classes);
3620 mono_conc_hashtable_destroy (log_profiler.coverage_image_to_methods);
3621 mono_conc_hashtable_destroy (log_profiler.coverage_suppressed_assemblies);
3622 mono_os_mutex_destroy (&log_profiler.coverage_mutex);
3625 mono_coop_mutex_destroy (&log_profiler.api_mutex);
3627 PROF_TLS_FREE ();
3629 g_free (prof->args);
3632 static char*
3633 new_filename (const char* filename)
3635 time_t t = time (NULL);
3636 int pid = process_id ();
3637 char pid_buf [16];
3638 char time_buf [16];
3639 char *res, *d;
3640 const char *p;
3641 int count_dates = 0;
3642 int count_pids = 0;
3643 int s_date, s_pid;
3644 struct tm *ts;
3645 for (p = filename; *p; p++) {
3646 if (*p != '%')
3647 continue;
3648 p++;
3649 if (*p == 't')
3650 count_dates++;
3651 else if (*p == 'p')
3652 count_pids++;
3653 else if (*p == 0)
3654 break;
3656 if (!count_dates && !count_pids)
3657 return pstrdup (filename);
3658 snprintf (pid_buf, sizeof (pid_buf), "%d", pid);
3659 ts = gmtime (&t);
3660 snprintf (time_buf, sizeof (time_buf), "%d%02d%02d%02d%02d%02d",
3661 1900 + ts->tm_year, 1 + ts->tm_mon, ts->tm_mday, ts->tm_hour, ts->tm_min, ts->tm_sec);
3662 s_date = strlen (time_buf);
3663 s_pid = strlen (pid_buf);
3664 d = res = (char *) g_malloc (strlen (filename) + s_date * count_dates + s_pid * count_pids);
3665 for (p = filename; *p; p++) {
3666 if (*p != '%') {
3667 *d++ = *p;
3668 continue;
3670 p++;
3671 if (*p == 't') {
3672 strcpy (d, time_buf);
3673 d += s_date;
3674 continue;
3675 } else if (*p == 'p') {
3676 strcpy (d, pid_buf);
3677 d += s_pid;
3678 continue;
3679 } else if (*p == '%') {
3680 *d++ = '%';
3681 continue;
3682 } else if (*p == 0)
3683 break;
3684 *d++ = '%';
3685 *d++ = *p;
3687 *d = 0;
3688 return res;
3691 static void
3692 add_to_fd_set (fd_set *set, int fd, int *max_fd)
3695 * This should only trigger for the basic FDs (server socket, pipes) at
3696 * startup if for some mysterious reason they're too large. In this case,
3697 * the profiler really can't function, and we're better off printing an
3698 * error and exiting.
3700 if (fd >= FD_SETSIZE) {
3701 mono_profiler_printf_err ("File descriptor is out of bounds for fd_set: %d", fd);
3702 exit (1);
3705 FD_SET (fd, set);
3707 if (*max_fd < fd)
3708 *max_fd = fd;
3711 static void *
3712 helper_thread (void *arg)
3714 mono_threads_attach_tools_thread ();
3715 mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler helper");
3717 MonoProfilerThread *thread = init_thread (FALSE);
3719 GArray *command_sockets = g_array_new (FALSE, FALSE, sizeof (int));
3721 while (1) {
3722 fd_set rfds;
3723 int max_fd = -1;
3725 FD_ZERO (&rfds);
3727 add_to_fd_set (&rfds, log_profiler.server_socket, &max_fd);
3728 add_to_fd_set (&rfds, log_profiler.pipes [0], &max_fd);
3730 for (gint i = 0; i < command_sockets->len; i++)
3731 add_to_fd_set (&rfds, g_array_index (command_sockets, int, i), &max_fd);
3733 struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
3735 // Sleep for 1sec or until a file descriptor has data.
3736 if (select (max_fd + 1, &rfds, NULL, NULL, &tv) == -1) {
3737 if (errno == EINTR)
3738 continue;
3740 mono_profiler_printf_err ("Could not poll in log profiler helper thread: %s", g_strerror (errno));
3741 exit (1);
3744 if (ENABLED (PROFLOG_COUNTER_EVENTS))
3745 counters_and_perfcounters_sample ();
3747 buffer_lock_excl ();
3749 sync_point (SYNC_POINT_PERIODIC);
3751 buffer_unlock_excl ();
3753 // Are we shutting down?
3754 if (FD_ISSET (log_profiler.pipes [0], &rfds)) {
3755 char c;
3756 read (log_profiler.pipes [0], &c, 1);
3757 break;
3760 for (gint i = 0; i < command_sockets->len; i++) {
3761 int fd = g_array_index (command_sockets, int, i);
3763 if (!FD_ISSET (fd, &rfds))
3764 continue;
3766 char buf [64];
3767 int len = read (fd, buf, sizeof (buf) - 1);
3769 if (len == -1)
3770 continue;
3772 if (!len) {
3773 // The other end disconnected.
3774 g_array_remove_index (command_sockets, i);
3775 close (fd);
3777 continue;
3780 buf [len] = 0;
3782 if (log_config.hs_mode == MONO_PROFILER_HEAPSHOT_ON_DEMAND && !strcmp (buf, "heapshot\n")) {
3783 // Rely on the finalization callback triggering a GC.
3784 InterlockedWrite (&log_profiler.heapshot_requested, 1);
3785 mono_gc_finalize_notify ();
3789 if (FD_ISSET (log_profiler.server_socket, &rfds)) {
3790 int fd = accept (log_profiler.server_socket, NULL, NULL);
3792 if (fd != -1) {
3793 if (fd >= FD_SETSIZE)
3794 close (fd);
3795 else
3796 g_array_append_val (command_sockets, fd);
3801 for (gint i = 0; i < command_sockets->len; i++)
3802 close (g_array_index (command_sockets, int, i));
3804 g_array_free (command_sockets, TRUE);
3806 send_log_unsafe (FALSE);
3807 deinit_thread (thread);
3809 mono_thread_info_detach ();
3811 return NULL;
3814 static void
3815 start_helper_thread (void)
3817 if (pipe (log_profiler.pipes) == -1) {
3818 mono_profiler_printf_err ("Could not create log profiler pipe: %s", g_strerror (errno));
3819 exit (1);
3822 log_profiler.server_socket = socket (PF_INET, SOCK_STREAM, 0);
3824 if (log_profiler.server_socket == -1) {
3825 mono_profiler_printf_err ("Could not create log profiler server socket: %s", g_strerror (errno));
3826 exit (1);
3829 struct sockaddr_in server_address;
3831 memset (&server_address, 0, sizeof (server_address));
3832 server_address.sin_family = AF_INET;
3833 server_address.sin_addr.s_addr = INADDR_ANY;
3834 server_address.sin_port = htons (log_profiler.command_port);
3836 if (bind (log_profiler.server_socket, (struct sockaddr *) &server_address, sizeof (server_address)) == -1) {
3837 mono_profiler_printf_err ("Could not bind log profiler server socket on port %d: %s", log_profiler.command_port, g_strerror (errno));
3838 close (log_profiler.server_socket);
3839 exit (1);
3842 if (listen (log_profiler.server_socket, 1) == -1) {
3843 mono_profiler_printf_err ("Could not listen on log profiler server socket: %s", g_strerror (errno));
3844 close (log_profiler.server_socket);
3845 exit (1);
3848 socklen_t slen = sizeof (server_address);
3850 if (getsockname (log_profiler.server_socket, (struct sockaddr *) &server_address, &slen)) {
3851 mono_profiler_printf_err ("Could not retrieve assigned port for log profiler server socket: %s", g_strerror (errno));
3852 close (log_profiler.server_socket);
3853 exit (1);
3856 log_profiler.command_port = ntohs (server_address.sin_port);
3858 if (!mono_native_thread_create (&log_profiler.helper_thread, helper_thread, NULL)) {
3859 mono_profiler_printf_err ("Could not start log profiler helper thread");
3860 close (log_profiler.server_socket);
3861 exit (1);
3865 static void
3866 free_writer_entry (gpointer p)
3868 mono_lock_free_free (p, WRITER_ENTRY_BLOCK_SIZE);
3871 static gboolean
3872 handle_writer_queue_entry (void)
3874 WriterQueueEntry *entry;
3876 if ((entry = (WriterQueueEntry *) mono_lock_free_queue_dequeue (&log_profiler.writer_queue))) {
3877 if (!entry->methods)
3878 goto no_methods;
3880 gboolean wrote_methods = FALSE;
3883 * Encode the method events in a temporary log buffer that we
3884 * flush to disk before the main buffer, ensuring that all
3885 * methods have metadata emitted before they're referenced.
3887 * We use a 'proper' thread-local buffer for this as opposed
3888 * to allocating and freeing a buffer by hand because the call
3889 * to mono_method_full_name () below may trigger class load
3890 * events when it retrieves the signature of the method. So a
3891 * thread-local buffer needs to exist when such events occur.
3893 for (guint i = 0; i < entry->methods->len; i++) {
3894 MethodInfo *info = (MethodInfo *) g_ptr_array_index (entry->methods, i);
3896 if (mono_conc_hashtable_lookup (log_profiler.method_table, info->method))
3897 goto free_info; // This method already has metadata emitted.
3900 * Other threads use this hash table to get a general
3901 * idea of whether a method has already been emitted to
3902 * the stream. Due to the way we add to this table, it
3903 * can easily happen that multiple threads queue up the
3904 * same methods, but that's OK since eventually all
3905 * methods will be in this table and the thread-local
3906 * method lists will just be empty for the rest of the
3907 * app's lifetime.
3909 mono_os_mutex_lock (&log_profiler.method_table_mutex);
3910 mono_conc_hashtable_insert (log_profiler.method_table, info->method, info->method);
3911 mono_os_mutex_unlock (&log_profiler.method_table_mutex);
3913 char *name = mono_method_full_name (info->method, 1);
3914 int nlen = strlen (name) + 1;
3915 void *cstart = info->ji ? mono_jit_info_get_code_start (info->ji) : NULL;
3916 int csize = info->ji ? mono_jit_info_get_code_size (info->ji) : 0;
3918 ENTER_LOG (&method_jits_ctr, logbuffer,
3919 EVENT_SIZE /* event */ +
3920 LEB128_SIZE /* method */ +
3921 LEB128_SIZE /* start */ +
3922 LEB128_SIZE /* size */ +
3923 nlen /* name */
3926 emit_event_time (logbuffer, TYPE_JIT | TYPE_METHOD, info->time);
3927 emit_method_inner (logbuffer, info->method);
3928 emit_ptr (logbuffer, cstart);
3929 emit_value (logbuffer, csize);
3931 memcpy (logbuffer->cursor, name, nlen);
3932 logbuffer->cursor += nlen;
3934 EXIT_LOG_EXPLICIT (NO_SEND);
3936 mono_free (name);
3938 wrote_methods = TRUE;
3940 free_info:
3941 g_free (info);
3944 g_ptr_array_free (entry->methods, TRUE);
3946 if (wrote_methods) {
3947 MonoProfilerThread *thread = PROF_TLS_GET ();
3949 dump_buffer_threadless (thread->buffer);
3950 init_buffer_state (thread);
3953 no_methods:
3954 dump_buffer (entry->buffer);
3956 mono_thread_hazardous_try_free (entry, free_writer_entry);
3958 return TRUE;
3961 return FALSE;
3964 static void *
3965 writer_thread (void *arg)
3967 mono_threads_attach_tools_thread ();
3968 mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler writer");
3970 dump_header ();
3972 MonoProfilerThread *thread = init_thread (FALSE);
3974 while (InterlockedRead (&log_profiler.run_writer_thread)) {
3975 mono_os_sem_wait (&log_profiler.writer_queue_sem, MONO_SEM_FLAGS_NONE);
3976 handle_writer_queue_entry ();
3979 /* Drain any remaining entries on shutdown. */
3980 while (handle_writer_queue_entry ());
3982 free_buffer (thread->buffer, thread->buffer->size);
3983 deinit_thread (thread);
3985 mono_thread_info_detach ();
3987 return NULL;
3990 static void
3991 start_writer_thread (void)
3993 InterlockedWrite (&log_profiler.run_writer_thread, 1);
3995 if (!mono_native_thread_create (&log_profiler.writer_thread, writer_thread, NULL)) {
3996 mono_profiler_printf_err ("Could not start log profiler writer thread");
3997 exit (1);
4001 static void
4002 reuse_sample_hit (gpointer p)
4004 SampleHit *sample = p;
4006 mono_lock_free_queue_node_unpoison (&sample->node);
4007 mono_lock_free_queue_enqueue (&log_profiler.sample_reuse_queue, &sample->node);
4010 static gboolean
4011 handle_dumper_queue_entry (void)
4013 SampleHit *sample;
4015 if ((sample = (SampleHit *) mono_lock_free_queue_dequeue (&log_profiler.dumper_queue))) {
4016 for (int i = 0; i < sample->count; ++i) {
4017 MonoMethod *method = sample->frames [i].method;
4018 MonoDomain *domain = sample->frames [i].domain;
4019 void *address = sample->frames [i].base_address;
4021 if (!method) {
4022 g_assert (domain && "What happened to the domain pointer?");
4023 g_assert (address && "What happened to the instruction pointer?");
4025 MonoJitInfo *ji = mono_jit_info_table_find (domain, (char *) address);
4027 if (ji)
4028 sample->frames [i].method = mono_jit_info_get_method (ji);
4032 ENTER_LOG (&sample_hits_ctr, logbuffer,
4033 EVENT_SIZE /* event */ +
4034 LEB128_SIZE /* tid */ +
4035 LEB128_SIZE /* count */ +
4036 1 * (
4037 LEB128_SIZE /* ip */
4039 LEB128_SIZE /* managed count */ +
4040 sample->count * (
4041 LEB128_SIZE /* method */
4045 emit_event_time (logbuffer, TYPE_SAMPLE | TYPE_SAMPLE_HIT, sample->time);
4046 emit_ptr (logbuffer, (void *) sample->tid);
4047 emit_value (logbuffer, 1);
4049 // TODO: Actual native unwinding.
4050 for (int i = 0; i < 1; ++i) {
4051 emit_ptr (logbuffer, sample->ip);
4052 add_code_pointer ((uintptr_t) sample->ip);
4055 /* new in data version 6 */
4056 emit_uvalue (logbuffer, sample->count);
4058 for (int i = 0; i < sample->count; ++i)
4059 emit_method (logbuffer, sample->frames [i].method);
4061 EXIT_LOG;
4063 mono_thread_hazardous_try_free (sample, reuse_sample_hit);
4065 dump_unmanaged_coderefs ();
4068 return FALSE;
4071 static void *
4072 dumper_thread (void *arg)
4074 mono_threads_attach_tools_thread ();
4075 mono_native_thread_set_name (mono_native_thread_id_get (), "Profiler dumper");
4077 MonoProfilerThread *thread = init_thread (FALSE);
4079 while (InterlockedRead (&log_profiler.run_dumper_thread)) {
4081 * Flush samples every second so it doesn't seem like the profiler is
4082 * not working if the program is mostly idle.
4084 if (mono_os_sem_timedwait (&log_profiler.dumper_queue_sem, 1000, MONO_SEM_FLAGS_NONE) == MONO_SEM_TIMEDWAIT_RET_TIMEDOUT)
4085 send_log_unsafe (FALSE);
4087 handle_dumper_queue_entry ();
4090 /* Drain any remaining entries on shutdown. */
4091 while (handle_dumper_queue_entry ());
4093 send_log_unsafe (FALSE);
4094 deinit_thread (thread);
4096 mono_thread_info_detach ();
4098 return NULL;
4101 static void
4102 start_dumper_thread (void)
4104 InterlockedWrite (&log_profiler.run_dumper_thread, 1);
4106 if (!mono_native_thread_create (&log_profiler.dumper_thread, dumper_thread, NULL)) {
4107 mono_profiler_printf_err ("Could not start log profiler dumper thread");
4108 exit (1);
4112 static void
4113 register_counter (const char *name, gint32 *counter)
4115 mono_counters_register (name, MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, counter);
4118 ICALL_EXPORT gint32
4119 proflog_icall_GetMaxStackTraceFrames (void)
4121 return MAX_FRAMES;
4124 ICALL_EXPORT gint32
4125 proflog_icall_GetStackTraceFrames (void)
4127 return log_config.num_frames;
4130 ICALL_EXPORT void
4131 proflog_icall_SetStackTraceFrames (gint32 value)
4133 log_config.num_frames = value;
4136 ICALL_EXPORT MonoProfilerHeapshotMode
4137 proflog_icall_GetHeapshotMode (void)
4139 return log_config.hs_mode;
4142 ICALL_EXPORT void
4143 proflog_icall_SetHeapshotMode (MonoProfilerHeapshotMode value)
4145 log_config.hs_mode = value;
4148 ICALL_EXPORT gint32
4149 proflog_icall_GetHeapshotMillisecondsFrequency (void)
4151 return log_config.hs_freq_ms;
4154 ICALL_EXPORT void
4155 proflog_icall_SetHeapshotMillisecondsFrequency (gint32 value)
4157 log_config.hs_freq_ms = value;
4160 ICALL_EXPORT gint32
4161 proflog_icall_GetHeapshotCollectionsFrequency (void)
4163 return log_config.hs_freq_gc;
4166 ICALL_EXPORT void
4167 proflog_icall_SetHeapshotCollectionsFrequency (gint32 value)
4169 log_config.hs_freq_gc = value;
4172 ICALL_EXPORT gint32
4173 proflog_icall_GetCallDepth (void)
4175 return log_config.max_call_depth;
4178 ICALL_EXPORT void
4179 proflog_icall_SetCallDepth (gint32 value)
4181 log_config.max_call_depth = value;
4184 ICALL_EXPORT void
4185 proflog_icall_GetSampleMode (MonoProfilerSampleMode *mode, gint32 *frequency)
4187 uint32_t freq;
4189 mono_profiler_get_sample_mode (log_profiler.handle, mode, &freq);
4191 *frequency = freq;
4194 ICALL_EXPORT MonoBoolean
4195 proflog_icall_SetSampleMode (MonoProfilerSampleMode mode, gint32 frequency)
4197 mono_coop_mutex_lock (&log_profiler.api_mutex);
4199 mono_bool result = mono_profiler_set_sample_mode (log_profiler.handle, mode, frequency);
4201 if (mode != MONO_PROFILER_SAMPLE_MODE_NONE) {
4202 ENABLE (PROFLOG_SAMPLE_EVENTS);
4203 mono_profiler_set_sample_hit_callback (log_profiler.handle, mono_sample_hit);
4204 } else {
4205 DISABLE (PROFLOG_SAMPLE_EVENTS);
4206 mono_profiler_set_sample_hit_callback (log_profiler.handle, NULL);
4209 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4211 return result;
4214 ICALL_EXPORT MonoBoolean
4215 proflog_icall_GetExceptionEvents (void)
4217 return ENABLED (PROFLOG_EXCEPTION_EVENTS);
4220 ICALL_EXPORT void
4221 proflog_icall_SetExceptionEvents (MonoBoolean value)
4223 mono_coop_mutex_lock (&log_profiler.api_mutex);
4225 if (value) {
4226 ENABLE (PROFLOG_EXCEPTION_EVENTS);
4227 mono_profiler_set_exception_throw_callback (log_profiler.handle, throw_exc);
4228 mono_profiler_set_exception_clause_callback (log_profiler.handle, clause_exc);
4229 } else {
4230 DISABLE (PROFLOG_EXCEPTION_EVENTS);
4231 mono_profiler_set_exception_throw_callback (log_profiler.handle, NULL);
4232 mono_profiler_set_exception_clause_callback (log_profiler.handle, NULL);
4235 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4238 ICALL_EXPORT MonoBoolean
4239 proflog_icall_GetMonitorEvents (void)
4241 return ENABLED (PROFLOG_MONITOR_EVENTS);
4244 ICALL_EXPORT void
4245 proflog_icall_SetMonitorEvents (MonoBoolean value)
4247 mono_coop_mutex_lock (&log_profiler.api_mutex);
4249 if (value) {
4250 ENABLE (PROFLOG_EXCEPTION_EVENTS);
4251 mono_profiler_set_monitor_contention_callback (log_profiler.handle, monitor_contention);
4252 mono_profiler_set_monitor_acquired_callback (log_profiler.handle, monitor_acquired);
4253 mono_profiler_set_monitor_failed_callback (log_profiler.handle, monitor_failed);
4254 } else {
4255 DISABLE (PROFLOG_EXCEPTION_EVENTS);
4256 mono_profiler_set_monitor_contention_callback (log_profiler.handle, NULL);
4257 mono_profiler_set_monitor_acquired_callback (log_profiler.handle, NULL);
4258 mono_profiler_set_monitor_failed_callback (log_profiler.handle, NULL);
4261 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4264 ICALL_EXPORT MonoBoolean
4265 proflog_icall_GetGCEvents (void)
4267 return ENABLED (PROFLOG_GC_EVENTS);
4270 ICALL_EXPORT void
4271 proflog_icall_SetGCEvents (MonoBoolean value)
4273 mono_coop_mutex_lock (&log_profiler.api_mutex);
4275 if (value)
4276 ENABLE (PROFLOG_GC_EVENTS);
4277 else
4278 DISABLE (PROFLOG_GC_EVENTS);
4280 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4283 ICALL_EXPORT MonoBoolean
4284 proflog_icall_GetGCAllocationEvents (void)
4286 return ENABLED (PROFLOG_GC_ALLOCATION_EVENTS);
4289 ICALL_EXPORT void
4290 proflog_icall_SetGCAllocationEvents (MonoBoolean value)
4292 mono_coop_mutex_lock (&log_profiler.api_mutex);
4294 if (value) {
4295 ENABLE (PROFLOG_GC_ALLOCATION_EVENTS);
4296 mono_profiler_set_gc_allocation_callback (log_profiler.handle, gc_alloc);
4297 } else {
4298 DISABLE (PROFLOG_GC_ALLOCATION_EVENTS);
4299 mono_profiler_set_gc_allocation_callback (log_profiler.handle, NULL);
4302 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4305 ICALL_EXPORT MonoBoolean
4306 proflog_icall_GetGCMoveEvents (void)
4308 return ENABLED (PROFLOG_GC_MOVE_EVENTS);
4311 ICALL_EXPORT void
4312 proflog_icall_SetGCMoveEvents (MonoBoolean value)
4314 mono_coop_mutex_lock (&log_profiler.api_mutex);
4316 if (value) {
4317 ENABLE (PROFLOG_GC_MOVE_EVENTS);
4318 mono_profiler_set_gc_moves_callback (log_profiler.handle, gc_moves);
4319 } else {
4320 DISABLE (PROFLOG_GC_MOVE_EVENTS);
4321 mono_profiler_set_gc_moves_callback (log_profiler.handle, NULL);
4324 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4327 ICALL_EXPORT MonoBoolean
4328 proflog_icall_GetGCRootEvents (void)
4330 return ENABLED (PROFLOG_GC_ROOT_EVENTS);
4333 ICALL_EXPORT void
4334 proflog_icall_SetGCRootEvents (MonoBoolean value)
4336 mono_coop_mutex_lock (&log_profiler.api_mutex);
4338 if (value)
4339 ENABLE (PROFLOG_GC_ROOT_EVENTS);
4340 else
4341 DISABLE (PROFLOG_GC_ROOT_EVENTS);
4343 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4346 ICALL_EXPORT MonoBoolean
4347 proflog_icall_GetGCHandleEvents (void)
4349 return ENABLED (PROFLOG_GC_HANDLE_EVENTS);
4352 ICALL_EXPORT void
4353 proflog_icall_SetGCHandleEvents (MonoBoolean value)
4355 mono_coop_mutex_lock (&log_profiler.api_mutex);
4357 if (value) {
4358 ENABLE (PROFLOG_GC_HANDLE_EVENTS);
4359 mono_profiler_set_gc_handle_created_callback (log_profiler.handle, gc_handle_created);
4360 mono_profiler_set_gc_handle_deleted_callback (log_profiler.handle, gc_handle_deleted);
4361 } else {
4362 DISABLE (PROFLOG_GC_HANDLE_EVENTS);
4363 mono_profiler_set_gc_handle_created_callback (log_profiler.handle, NULL);
4364 mono_profiler_set_gc_handle_deleted_callback (log_profiler.handle, NULL);
4367 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4370 ICALL_EXPORT MonoBoolean
4371 proflog_icall_GetGCFinalizationEvents (void)
4373 return ENABLED (PROFLOG_GC_FINALIZATION_EVENTS);
4376 ICALL_EXPORT void
4377 proflog_icall_SetGCFinalizationEvents (MonoBoolean value)
4379 mono_coop_mutex_lock (&log_profiler.api_mutex);
4381 if (value) {
4382 ENABLE (PROFLOG_GC_FINALIZATION_EVENTS);
4383 mono_profiler_set_gc_finalizing_callback (log_profiler.handle, finalize_begin);
4384 mono_profiler_set_gc_finalizing_object_callback (log_profiler.handle, finalize_object_begin);
4385 mono_profiler_set_gc_finalized_object_callback (log_profiler.handle, finalize_object_end);
4386 } else {
4387 DISABLE (PROFLOG_GC_FINALIZATION_EVENTS);
4388 mono_profiler_set_gc_finalizing_callback (log_profiler.handle, NULL);
4389 mono_profiler_set_gc_finalizing_object_callback (log_profiler.handle, NULL);
4390 mono_profiler_set_gc_finalized_object_callback (log_profiler.handle, NULL);
4393 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4396 ICALL_EXPORT MonoBoolean
4397 proflog_icall_GetCounterEvents (void)
4399 return ENABLED (PROFLOG_COUNTER_EVENTS);
4402 ICALL_EXPORT void
4403 proflog_icall_SetCounterEvents (MonoBoolean value)
4405 mono_coop_mutex_lock (&log_profiler.api_mutex);
4407 if (value)
4408 ENABLE (PROFLOG_COUNTER_EVENTS);
4409 else
4410 DISABLE (PROFLOG_COUNTER_EVENTS);
4412 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4415 ICALL_EXPORT MonoBoolean
4416 proflog_icall_GetJitEvents (void)
4418 return ENABLED (PROFLOG_JIT_EVENTS);
4421 ICALL_EXPORT void
4422 proflog_icall_SetJitEvents (MonoBoolean value)
4424 mono_coop_mutex_lock (&log_profiler.api_mutex);
4426 if (value) {
4427 ENABLE (PROFLOG_JIT_EVENTS);
4428 mono_profiler_set_jit_code_buffer_callback (log_profiler.handle, code_buffer_new);
4429 } else {
4430 DISABLE (PROFLOG_JIT_EVENTS);
4431 mono_profiler_set_jit_code_buffer_callback (log_profiler.handle, NULL);
4434 mono_coop_mutex_unlock (&log_profiler.api_mutex);
4437 static void
4438 runtime_initialized (MonoProfiler *profiler)
4440 InterlockedWrite (&log_profiler.runtime_inited, 1);
4442 register_counter ("Sample events allocated", &sample_allocations_ctr);
4443 register_counter ("Log buffers allocated", &buffer_allocations_ctr);
4445 register_counter ("Event: Sync points", &sync_points_ctr);
4446 register_counter ("Event: Heap objects", &heap_objects_ctr);
4447 register_counter ("Event: Heap starts", &heap_starts_ctr);
4448 register_counter ("Event: Heap ends", &heap_ends_ctr);
4449 register_counter ("Event: Heap roots", &heap_roots_ctr);
4450 register_counter ("Event: GC events", &gc_events_ctr);
4451 register_counter ("Event: GC resizes", &gc_resizes_ctr);
4452 register_counter ("Event: GC allocations", &gc_allocs_ctr);
4453 register_counter ("Event: GC moves", &gc_moves_ctr);
4454 register_counter ("Event: GC handle creations", &gc_handle_creations_ctr);
4455 register_counter ("Event: GC handle deletions", &gc_handle_deletions_ctr);
4456 register_counter ("Event: GC finalize starts", &finalize_begins_ctr);
4457 register_counter ("Event: GC finalize ends", &finalize_ends_ctr);
4458 register_counter ("Event: GC finalize object starts", &finalize_object_begins_ctr);
4459 register_counter ("Event: GC finalize object ends", &finalize_object_ends_ctr);
4460 register_counter ("Event: Image loads", &image_loads_ctr);
4461 register_counter ("Event: Image unloads", &image_unloads_ctr);
4462 register_counter ("Event: Assembly loads", &assembly_loads_ctr);
4463 register_counter ("Event: Assembly unloads", &assembly_unloads_ctr);
4464 register_counter ("Event: Class loads", &class_loads_ctr);
4465 register_counter ("Event: Class unloads", &class_unloads_ctr);
4466 register_counter ("Event: Method entries", &method_entries_ctr);
4467 register_counter ("Event: Method exits", &method_exits_ctr);
4468 register_counter ("Event: Method exception leaves", &method_exception_exits_ctr);
4469 register_counter ("Event: Method JITs", &method_jits_ctr);
4470 register_counter ("Event: Code buffers", &code_buffers_ctr);
4471 register_counter ("Event: Exception throws", &exception_throws_ctr);
4472 register_counter ("Event: Exception clauses", &exception_clauses_ctr);
4473 register_counter ("Event: Monitor events", &monitor_events_ctr);
4474 register_counter ("Event: Thread starts", &thread_starts_ctr);
4475 register_counter ("Event: Thread ends", &thread_ends_ctr);
4476 register_counter ("Event: Thread names", &thread_names_ctr);
4477 register_counter ("Event: Domain loads", &domain_loads_ctr);
4478 register_counter ("Event: Domain unloads", &domain_unloads_ctr);
4479 register_counter ("Event: Domain names", &domain_names_ctr);
4480 register_counter ("Event: Context loads", &context_loads_ctr);
4481 register_counter ("Event: Context unloads", &context_unloads_ctr);
4482 register_counter ("Event: Sample binaries", &sample_ubins_ctr);
4483 register_counter ("Event: Sample symbols", &sample_usyms_ctr);
4484 register_counter ("Event: Sample hits", &sample_hits_ctr);
4485 register_counter ("Event: Counter descriptors", &counter_descriptors_ctr);
4486 register_counter ("Event: Counter samples", &counter_samples_ctr);
4487 register_counter ("Event: Performance counter descriptors", &perfcounter_descriptors_ctr);
4488 register_counter ("Event: Performance counter samples", &perfcounter_samples_ctr);
4489 register_counter ("Event: Coverage methods", &coverage_methods_ctr);
4490 register_counter ("Event: Coverage statements", &coverage_statements_ctr);
4491 register_counter ("Event: Coverage classes", &coverage_classes_ctr);
4492 register_counter ("Event: Coverage assemblies", &coverage_assemblies_ctr);
4494 counters_init ();
4497 * We must start the helper thread before the writer thread. This is
4498 * because the helper thread sets up the command port which is written to
4499 * the log header by the writer thread.
4501 start_helper_thread ();
4502 start_writer_thread ();
4503 start_dumper_thread ();
4505 mono_coop_mutex_init (&log_profiler.api_mutex);
4507 #define ADD_ICALL(NAME) \
4508 mono_add_internal_call ("Mono.Profiler.Log.LogProfiler::" EGLIB_STRINGIFY (NAME), proflog_icall_ ## NAME);
4510 ADD_ICALL (GetMaxStackTraceFrames);
4511 ADD_ICALL (GetStackTraceFrames);
4512 ADD_ICALL (SetStackTraceFrames);
4513 ADD_ICALL (GetHeapshotMode);
4514 ADD_ICALL (SetHeapshotMode);
4515 ADD_ICALL (GetHeapshotMillisecondsFrequency);
4516 ADD_ICALL (SetHeapshotMillisecondsFrequency);
4517 ADD_ICALL (GetHeapshotCollectionsFrequency);
4518 ADD_ICALL (SetHeapshotCollectionsFrequency);
4519 ADD_ICALL (GetCallDepth);
4520 ADD_ICALL (SetCallDepth);
4521 ADD_ICALL (GetSampleMode);
4522 ADD_ICALL (SetSampleMode);
4523 ADD_ICALL (GetExceptionEvents);
4524 ADD_ICALL (SetExceptionEvents);
4525 ADD_ICALL (GetMonitorEvents);
4526 ADD_ICALL (SetMonitorEvents);
4527 ADD_ICALL (GetGCEvents);
4528 ADD_ICALL (SetGCEvents);
4529 ADD_ICALL (GetGCAllocationEvents);
4530 ADD_ICALL (SetGCAllocationEvents);
4531 ADD_ICALL (GetGCMoveEvents);
4532 ADD_ICALL (SetGCMoveEvents);
4533 ADD_ICALL (GetGCRootEvents);
4534 ADD_ICALL (SetGCRootEvents);
4535 ADD_ICALL (GetGCHandleEvents);
4536 ADD_ICALL (SetGCHandleEvents);
4537 ADD_ICALL (GetGCFinalizationEvents);
4538 ADD_ICALL (SetGCFinalizationEvents);
4539 ADD_ICALL (GetCounterEvents);
4540 ADD_ICALL (SetCounterEvents);
4541 ADD_ICALL (GetJitEvents);
4542 ADD_ICALL (SetJitEvents);
4544 #undef ADD_ICALL
4547 static void
4548 create_profiler (const char *args, const char *filename, GPtrArray *filters)
4550 char *nf;
4552 log_profiler.args = pstrdup (args);
4553 log_profiler.command_port = log_config.command_port;
4555 //If filename begin with +, append the pid at the end
4556 if (filename && *filename == '+')
4557 filename = g_strdup_printf ("%s.%d", filename + 1, getpid ());
4559 if (!filename) {
4560 if (log_config.do_report)
4561 filename = "|mprof-report -";
4562 else
4563 filename = "output.mlpd";
4564 nf = (char*)filename;
4565 } else {
4566 nf = new_filename (filename);
4567 if (log_config.do_report) {
4568 int s = strlen (nf) + 32;
4569 char *p = (char *) g_malloc (s);
4570 snprintf (p, s, "|mprof-report '--out=%s' -", nf);
4571 g_free (nf);
4572 nf = p;
4575 if (*nf == '|') {
4576 log_profiler.file = popen (nf + 1, "w");
4577 log_profiler.pipe_output = 1;
4578 } else if (*nf == '#') {
4579 int fd = strtol (nf + 1, NULL, 10);
4580 log_profiler.file = fdopen (fd, "a");
4581 } else
4582 log_profiler.file = fopen (nf, "wb");
4584 if (!log_profiler.file) {
4585 mono_profiler_printf_err ("Could not create log profiler output file '%s'.", nf);
4586 exit (1);
4589 #if defined (HAVE_SYS_ZLIB)
4590 if (log_config.use_zip)
4591 log_profiler.gzfile = gzdopen (fileno (log_profiler.file), "wb");
4592 #endif
4595 * If you hit this assert while increasing MAX_FRAMES, you need to increase
4596 * SAMPLE_BLOCK_SIZE as well.
4598 g_assert (SAMPLE_SLOT_SIZE (MAX_FRAMES) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (SAMPLE_BLOCK_SIZE));
4600 // FIXME: We should free this stuff too.
4601 mono_lock_free_allocator_init_size_class (&log_profiler.sample_size_class, SAMPLE_SLOT_SIZE (log_config.num_frames), SAMPLE_BLOCK_SIZE);
4602 mono_lock_free_allocator_init_allocator (&log_profiler.sample_allocator, &log_profiler.sample_size_class, MONO_MEM_ACCOUNT_PROFILER);
4604 mono_lock_free_queue_init (&log_profiler.sample_reuse_queue);
4606 g_assert (sizeof (WriterQueueEntry) * 2 < LOCK_FREE_ALLOC_SB_USABLE_SIZE (WRITER_ENTRY_BLOCK_SIZE));
4608 // FIXME: We should free this stuff too.
4609 mono_lock_free_allocator_init_size_class (&log_profiler.writer_entry_size_class, sizeof (WriterQueueEntry), WRITER_ENTRY_BLOCK_SIZE);
4610 mono_lock_free_allocator_init_allocator (&log_profiler.writer_entry_allocator, &log_profiler.writer_entry_size_class, MONO_MEM_ACCOUNT_PROFILER);
4612 mono_lock_free_queue_init (&log_profiler.writer_queue);
4613 mono_os_sem_init (&log_profiler.writer_queue_sem, 0);
4615 mono_lock_free_queue_init (&log_profiler.dumper_queue);
4616 mono_os_sem_init (&log_profiler.dumper_queue_sem, 0);
4618 mono_os_mutex_init (&log_profiler.method_table_mutex);
4619 log_profiler.method_table = mono_conc_hashtable_new (NULL, NULL);
4621 if (log_config.collect_coverage)
4622 coverage_init ();
4624 log_profiler.coverage_filters = filters;
4626 log_profiler.startup_time = current_time ();
4629 MONO_API void
4630 mono_profiler_init_log (const char *desc);
4632 void
4633 mono_profiler_init_log (const char *desc)
4635 GPtrArray *filters = NULL;
4637 proflog_parse_args (&log_config, desc [3] == ':' ? desc + 4 : "");
4639 if (log_config.cov_filter_files) {
4640 filters = g_ptr_array_new ();
4641 int i;
4642 for (i = 0; i < log_config.cov_filter_files->len; ++i) {
4643 const char *name = log_config.cov_filter_files->pdata [i];
4644 parse_cov_filter_file (filters, name);
4648 init_time ();
4650 PROF_TLS_INIT ();
4652 create_profiler (desc, log_config.output_filename, filters);
4654 mono_lls_init (&log_profiler.profiler_thread_list, NULL);
4656 MonoProfilerHandle handle = log_profiler.handle = mono_profiler_create (&log_profiler);
4659 * Required callbacks. These are either necessary for the profiler itself
4660 * to function, or provide metadata that's needed if other events (e.g.
4661 * allocations, exceptions) are dynamically enabled/disabled.
4664 mono_profiler_set_runtime_shutdown_begin_callback (handle, log_early_shutdown);
4665 mono_profiler_set_runtime_shutdown_end_callback (handle, log_shutdown);
4666 mono_profiler_set_runtime_initialized_callback (handle, runtime_initialized);
4668 mono_profiler_set_gc_event_callback (handle, gc_event);
4670 mono_profiler_set_thread_started_callback (handle, thread_start);
4671 mono_profiler_set_thread_stopped_callback (handle, thread_end);
4672 mono_profiler_set_thread_name_callback (handle, thread_name);
4674 mono_profiler_set_domain_loaded_callback (handle, domain_loaded);
4675 mono_profiler_set_domain_unloading_callback (handle, domain_unloaded);
4676 mono_profiler_set_domain_name_callback (handle, domain_name);
4678 mono_profiler_set_context_loaded_callback (handle, context_loaded);
4679 mono_profiler_set_context_unloaded_callback (handle, context_unloaded);
4681 mono_profiler_set_assembly_loaded_callback (handle, assembly_loaded);
4682 mono_profiler_set_assembly_unloading_callback (handle, assembly_unloaded);
4684 mono_profiler_set_image_loaded_callback (handle, image_loaded);
4685 mono_profiler_set_image_unloading_callback (handle, image_unloaded);
4687 mono_profiler_set_class_loaded_callback (handle, class_loaded);
4689 mono_profiler_set_jit_done_callback (handle, method_jitted);
4691 if (ENABLED (PROFLOG_EXCEPTION_EVENTS)) {
4692 mono_profiler_set_exception_throw_callback (handle, throw_exc);
4693 mono_profiler_set_exception_clause_callback (handle, clause_exc);
4696 if (ENABLED (PROFLOG_MONITOR_EVENTS)) {
4697 mono_profiler_set_monitor_contention_callback (handle, monitor_contention);
4698 mono_profiler_set_monitor_acquired_callback (handle, monitor_acquired);
4699 mono_profiler_set_monitor_failed_callback (handle, monitor_failed);
4702 if (ENABLED (PROFLOG_GC_EVENTS))
4703 mono_profiler_set_gc_resize_callback (handle, gc_resize);
4705 if (ENABLED (PROFLOG_GC_ALLOCATION_EVENTS))
4706 mono_profiler_set_gc_allocation_callback (handle, gc_alloc);
4708 if (ENABLED (PROFLOG_GC_MOVE_EVENTS))
4709 mono_profiler_set_gc_moves_callback (handle, gc_moves);
4711 if (ENABLED (PROFLOG_GC_ROOT_EVENTS))
4712 mono_profiler_set_gc_roots_callback (handle, gc_roots);
4714 if (ENABLED (PROFLOG_GC_HANDLE_EVENTS)) {
4715 mono_profiler_set_gc_handle_created_callback (handle, gc_handle_created);
4716 mono_profiler_set_gc_handle_deleted_callback (handle, gc_handle_deleted);
4719 if (ENABLED (PROFLOG_GC_FINALIZATION_EVENTS)) {
4720 mono_profiler_set_gc_finalizing_callback (handle, finalize_begin);
4721 mono_profiler_set_gc_finalized_callback (handle, finalize_end);
4722 mono_profiler_set_gc_finalizing_object_callback (handle, finalize_object_begin);
4725 //On Demand heapshot uses the finalizer thread to force a collection and thus a heapshot
4726 mono_profiler_set_gc_finalized_callback (handle, finalize_end);
4728 if (ENABLED (PROFLOG_SAMPLE_EVENTS))
4729 mono_profiler_set_sample_hit_callback (handle, mono_sample_hit);
4731 if (ENABLED (PROFLOG_JIT_EVENTS))
4732 mono_profiler_set_jit_code_buffer_callback (handle, code_buffer_new);
4734 if (log_config.enter_leave) {
4735 mono_profiler_set_call_instrumentation_filter_callback (handle, method_filter);
4736 mono_profiler_set_method_enter_callback (handle, method_enter);
4737 mono_profiler_set_method_leave_callback (handle, method_leave);
4738 mono_profiler_set_method_exception_leave_callback (handle, method_exc_leave);
4741 if (log_config.collect_coverage)
4742 mono_profiler_set_coverage_filter_callback (handle, coverage_filter);
4744 mono_profiler_enable_allocations ();
4745 mono_profiler_enable_sampling (handle);
4748 * If no sample option was given by the user, this just leaves the sampling
4749 * thread in idle mode. We do this even if no option was given so that we
4750 * can warn if another profiler controls sampling parameters.
4752 if (!mono_profiler_set_sample_mode (handle, log_config.sampling_mode, log_config.sample_freq))
4753 mono_profiler_printf_err ("Another profiler controls sampling parameters; the log profiler will not be able to modify them.");