Remove unused var and declare variables first to fix msvc build.
[mono/afaerber.git] / mono / profiler / mono-profiler-logging.c
blob9ed3cb8cde298fd011036322b3b09cf45922b004
1 /*
2 * mono-profiler-logging.c: Logging profiler for Mono.
4 * Author:
5 * Massimiliano Mantione (massi@ximian.com)
7 * Copyright 2008-2009 Novell, Inc (http://www.novell.com)
8 */
9 #include <config.h>
10 #include <mono/metadata/profiler.h>
11 #include <mono/metadata/class.h>
12 #include <mono/metadata/metadata-internals.h>
13 #include <mono/metadata/class-internals.h>
14 #include <mono/metadata/assembly.h>
15 #include <mono/metadata/loader.h>
16 #include <mono/metadata/threads.h>
17 #include <mono/metadata/debug-helpers.h>
18 #include <mono/metadata/mono-gc.h>
19 #include <mono/io-layer/atomic.h>
20 #include <string.h>
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <ctype.h>
24 #include <glib.h>
26 #include <dlfcn.h>
28 #include <sys/types.h>
29 #include <sys/socket.h>
30 #include <netinet/in.h>
32 #define HAS_OPROFILE 0
34 #if (HAS_OPROFILE)
35 #include <libopagent.h>
36 #endif
38 // Needed for heap analysis
39 extern gboolean mono_object_is_alive (MonoObject* obj);
41 typedef enum {
42 MONO_PROFILER_FILE_BLOCK_KIND_INTRO = 1,
43 MONO_PROFILER_FILE_BLOCK_KIND_END = 2,
44 MONO_PROFILER_FILE_BLOCK_KIND_MAPPING = 3,
45 MONO_PROFILER_FILE_BLOCK_KIND_LOADED = 4,
46 MONO_PROFILER_FILE_BLOCK_KIND_UNLOADED = 5,
47 MONO_PROFILER_FILE_BLOCK_KIND_EVENTS = 6,
48 MONO_PROFILER_FILE_BLOCK_KIND_STATISTICAL = 7,
49 MONO_PROFILER_FILE_BLOCK_KIND_HEAP_DATA = 8,
50 MONO_PROFILER_FILE_BLOCK_KIND_HEAP_SUMMARY = 9,
51 MONO_PROFILER_FILE_BLOCK_KIND_DIRECTIVES = 10
52 } MonoProfilerFileBlockKind;
54 typedef enum {
55 MONO_PROFILER_DIRECTIVE_END = 0,
56 MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_CALLER = 1,
57 MONO_PROFILER_DIRECTIVE_ALLOCATIONS_HAVE_STACK = 2,
58 MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_ID = 3,
59 MONO_PROFILER_DIRECTIVE_LOADED_ELEMENTS_CARRY_ID = 4,
60 MONO_PROFILER_DIRECTIVE_CLASSES_CARRY_ASSEMBLY_ID = 5,
61 MONO_PROFILER_DIRECTIVE_METHODS_CARRY_WRAPPER_FLAG = 6,
62 MONO_PROFILER_DIRECTIVE_LAST
63 } MonoProfilerDirectives;
66 #define MONO_PROFILER_LOADED_EVENT_MODULE 1
67 #define MONO_PROFILER_LOADED_EVENT_ASSEMBLY 2
68 #define MONO_PROFILER_LOADED_EVENT_APPDOMAIN 4
69 #define MONO_PROFILER_LOADED_EVENT_SUCCESS 8
70 #define MONO_PROFILER_LOADED_EVENT_FAILURE 16
72 typedef enum {
73 MONO_PROFILER_EVENT_DATA_TYPE_OTHER = 0,
74 MONO_PROFILER_EVENT_DATA_TYPE_METHOD = 1,
75 MONO_PROFILER_EVENT_DATA_TYPE_CLASS = 2
76 } MonoProfilerEventDataType;
78 typedef struct _ProfilerEventData {
79 union {
80 gpointer address;
81 gsize number;
82 } data;
83 unsigned int data_type:2;
84 unsigned int code:4;
85 unsigned int kind:1;
86 unsigned int value:25;
87 } ProfilerEventData;
89 #define EVENT_VALUE_BITS (25)
90 #define MAX_EVENT_VALUE ((1<<EVENT_VALUE_BITS)-1)
92 typedef enum {
93 MONO_PROFILER_EVENT_METHOD_JIT = 0,
94 MONO_PROFILER_EVENT_METHOD_FREED = 1,
95 MONO_PROFILER_EVENT_METHOD_CALL = 2,
96 MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER = 3,
97 MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER = 4
98 } MonoProfilerMethodEvents;
99 typedef enum {
100 MONO_PROFILER_EVENT_CLASS_LOAD = 0,
101 MONO_PROFILER_EVENT_CLASS_UNLOAD = 1,
102 MONO_PROFILER_EVENT_CLASS_EXCEPTION = 2,
103 MONO_PROFILER_EVENT_CLASS_MONITOR = 3,
104 MONO_PROFILER_EVENT_CLASS_ALLOCATION = 4
105 } MonoProfilerClassEvents;
106 typedef enum {
107 MONO_PROFILER_EVENT_RESULT_SUCCESS = 0,
108 MONO_PROFILER_EVENT_RESULT_FAILURE = 4
109 } MonoProfilerEventResult;
110 #define MONO_PROFILER_EVENT_RESULT_MASK MONO_PROFILER_EVENT_RESULT_FAILURE
111 typedef enum {
112 MONO_PROFILER_EVENT_THREAD = 1,
113 MONO_PROFILER_EVENT_GC_COLLECTION = 2,
114 MONO_PROFILER_EVENT_GC_MARK = 3,
115 MONO_PROFILER_EVENT_GC_SWEEP = 4,
116 MONO_PROFILER_EVENT_GC_RESIZE = 5,
117 MONO_PROFILER_EVENT_GC_STOP_WORLD = 6,
118 MONO_PROFILER_EVENT_GC_START_WORLD = 7,
119 MONO_PROFILER_EVENT_JIT_TIME_ALLOCATION = 8,
120 MONO_PROFILER_EVENT_STACK_SECTION = 9,
121 MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID = 10,
122 MONO_PROFILER_EVENT_OBJECT_MONITOR = 11
123 } MonoProfilerEvents;
124 typedef enum {
125 MONO_PROFILER_EVENT_KIND_START = 0,
126 MONO_PROFILER_EVENT_KIND_END = 1
127 } MonoProfilerEventKind;
129 #define MONO_PROFILER_GET_CURRENT_TIME(t) {\
130 struct timeval current_time;\
131 gettimeofday (&current_time, NULL);\
132 (t) = (((guint64)current_time.tv_sec) * 1000000) + current_time.tv_usec;\
133 } while (0)
135 static gboolean use_fast_timer = FALSE;
137 #if (defined(__i386__) || defined(__x86_64__)) && ! defined(HOST_WIN32)
139 #if defined(__i386__)
140 static const guchar cpuid_impl [] = {
141 0x55, /* push %ebp */
142 0x89, 0xe5, /* mov %esp,%ebp */
143 0x53, /* push %ebx */
144 0x8b, 0x45, 0x08, /* mov 0x8(%ebp),%eax */
145 0x0f, 0xa2, /* cpuid */
146 0x50, /* push %eax */
147 0x8b, 0x45, 0x10, /* mov 0x10(%ebp),%eax */
148 0x89, 0x18, /* mov %ebx,(%eax) */
149 0x8b, 0x45, 0x14, /* mov 0x14(%ebp),%eax */
150 0x89, 0x08, /* mov %ecx,(%eax) */
151 0x8b, 0x45, 0x18, /* mov 0x18(%ebp),%eax */
152 0x89, 0x10, /* mov %edx,(%eax) */
153 0x58, /* pop %eax */
154 0x8b, 0x55, 0x0c, /* mov 0xc(%ebp),%edx */
155 0x89, 0x02, /* mov %eax,(%edx) */
156 0x5b, /* pop %ebx */
157 0xc9, /* leave */
158 0xc3, /* ret */
161 typedef void (*CpuidFunc) (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx);
163 static int
164 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx) {
165 int have_cpuid = 0;
166 #ifndef _MSC_VER
167 __asm__ __volatile__ (
168 "pushfl\n"
169 "popl %%eax\n"
170 "movl %%eax, %%edx\n"
171 "xorl $0x200000, %%eax\n"
172 "pushl %%eax\n"
173 "popfl\n"
174 "pushfl\n"
175 "popl %%eax\n"
176 "xorl %%edx, %%eax\n"
177 "andl $0x200000, %%eax\n"
178 "movl %%eax, %0"
179 : "=r" (have_cpuid)
181 : "%eax", "%edx"
183 #else
184 __asm {
185 pushfd
186 pop eax
187 mov edx, eax
188 xor eax, 0x200000
189 push eax
190 popfd
191 pushfd
192 pop eax
193 xor eax, edx
194 and eax, 0x200000
195 mov have_cpuid, eax
197 #endif
198 if (have_cpuid) {
199 CpuidFunc func = (CpuidFunc) cpuid_impl;
200 func (id, p_eax, p_ebx, p_ecx, p_edx);
202 * We use this approach because of issues with gcc and pic code, see:
203 * http://gcc.gnu.org/cgi-bin/gnatsweb.pl?cmd=view%20audit-trail&database=gcc&pr=7329
204 __asm__ __volatile__ ("cpuid"
205 : "=a" (*p_eax), "=b" (*p_ebx), "=c" (*p_ecx), "=d" (*p_edx)
206 : "a" (id));
208 return 1;
210 return 0;
213 static void detect_fast_timer (void) {
214 int p_eax, p_ebx, p_ecx, p_edx;
216 if (cpuid (0x1, &p_eax, &p_ebx, &p_ecx, &p_edx)) {
217 if (p_edx & 0x10) {
218 use_fast_timer = TRUE;
219 } else {
220 use_fast_timer = FALSE;
222 } else {
223 use_fast_timer = FALSE;
226 #endif
228 #if defined(__x86_64__)
229 static void detect_fast_timer (void) {
230 guint32 op = 0x1;
231 guint32 eax,ebx,ecx,edx;
232 __asm__ __volatile__ ("cpuid" : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) : "a"(op));
233 if (edx & 0x10) {
234 use_fast_timer = TRUE;
235 } else {
236 use_fast_timer = FALSE;
239 #endif
241 static __inline__ guint64 rdtsc(void) {
242 guint32 hi, lo;
243 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
244 return ((guint64) lo) | (((guint64) hi) << 32);
246 #define MONO_PROFILER_GET_CURRENT_COUNTER(c) {\
247 if (use_fast_timer) {\
248 (c) = rdtsc ();\
249 } else {\
250 MONO_PROFILER_GET_CURRENT_TIME ((c));\
252 } while (0)
253 #else
254 static void detect_fast_timer (void) {
255 use_fast_timer = FALSE;
257 #define MONO_PROFILER_GET_CURRENT_COUNTER(c) MONO_PROFILER_GET_CURRENT_TIME ((c))
258 #endif
261 #define CLASS_LAYOUT_PACKED_BITMAP_SIZE 64
262 #define CLASS_LAYOUT_NOT_INITIALIZED (0xFFFF)
263 typedef enum {
264 HEAP_CODE_NONE = 0,
265 HEAP_CODE_OBJECT = 1,
266 HEAP_CODE_FREE_OBJECT_CLASS = 2,
267 HEAP_CODE_MASK = 3
268 } HeapProfilerJobValueCode;
269 typedef struct _MonoProfilerClassData {
270 union {
271 guint64 compact;
272 guint8 *extended;
273 } bitmap;
274 struct {
275 guint16 slots;
276 guint16 references;
277 } layout;
278 } MonoProfilerClassData;
280 typedef struct _MonoProfilerMethodData {
281 gpointer code_start;
282 guint32 code_size;
283 } MonoProfilerMethodData;
285 typedef struct _ClassIdMappingElement {
286 char *name;
287 guint32 id;
288 MonoClass *klass;
289 struct _ClassIdMappingElement *next_unwritten;
290 MonoProfilerClassData data;
291 } ClassIdMappingElement;
293 typedef struct _MethodIdMappingElement {
294 char *name;
295 guint32 id;
296 MonoMethod *method;
297 struct _MethodIdMappingElement *next_unwritten;
298 MonoProfilerMethodData data;
299 } MethodIdMappingElement;
301 typedef struct _ClassIdMapping {
302 GHashTable *table;
303 ClassIdMappingElement *unwritten;
304 guint32 next_id;
305 } ClassIdMapping;
307 typedef struct _MethodIdMapping {
308 GHashTable *table;
309 MethodIdMappingElement *unwritten;
310 guint32 next_id;
311 } MethodIdMapping;
313 typedef struct _LoadedElement {
314 char *name;
315 guint64 load_start_counter;
316 guint64 load_end_counter;
317 guint64 unload_start_counter;
318 guint64 unload_end_counter;
319 guint32 id;
320 guint8 loaded;
321 guint8 load_written;
322 guint8 unloaded;
323 guint8 unload_written;
324 } LoadedElement;
325 struct _ProfilerCodeBufferArray;
326 typedef struct _ProfilerCodeBuffer {
327 gpointer start;
328 gpointer end;
329 struct {
330 union {
331 MonoMethod *method;
332 MonoClass *klass;
333 void *data;
334 struct _ProfilerCodeBufferArray *sub_buffers;
335 } data;
336 guint16 value;
337 guint16 type;
338 } info;
339 } ProfilerCodeBuffer;
341 #define PROFILER_CODE_BUFFER_ARRAY_SIZE 64
342 typedef struct _ProfilerCodeBufferArray {
343 int level;
344 int number_of_buffers;
345 ProfilerCodeBuffer buffers [PROFILER_CODE_BUFFER_ARRAY_SIZE];
346 } ProfilerCodeBufferArray;
348 typedef struct _ProfilerCodeChunk {
349 gpointer start;
350 gpointer end;
351 gboolean destroyed;
352 ProfilerCodeBufferArray *buffers;
353 } ProfilerCodeChunk;
355 typedef struct _ProfilerCodeChunks {
356 int capacity;
357 int number_of_chunks;;
358 ProfilerCodeChunk *chunks;
359 } ProfilerCodeChunks;
362 #define PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE 1024
363 #define PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE 4096
364 #define PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE 4096
366 typedef struct _ProfilerHeapShotObjectBuffer {
367 struct _ProfilerHeapShotObjectBuffer *next;
368 MonoObject **next_free_slot;
369 MonoObject **end;
370 MonoObject **first_unprocessed_slot;
371 MonoObject *buffer [PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE];
372 } ProfilerHeapShotObjectBuffer;
374 typedef struct _ProfilerHeapShotHeapBuffer {
375 struct _ProfilerHeapShotHeapBuffer *next;
376 struct _ProfilerHeapShotHeapBuffer *previous;
377 MonoObject **start_slot;
378 MonoObject **end_slot;
379 MonoObject *buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE];
380 } ProfilerHeapShotHeapBuffer;
382 typedef struct _ProfilerHeapShotHeapBuffers {
383 ProfilerHeapShotHeapBuffer *buffers;
384 ProfilerHeapShotHeapBuffer *last;
385 ProfilerHeapShotHeapBuffer *current;
386 MonoObject **first_free_slot;
387 } ProfilerHeapShotHeapBuffers;
390 typedef struct _ProfilerHeapShotWriteBuffer {
391 struct _ProfilerHeapShotWriteBuffer *next;
392 gpointer buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE];
393 } ProfilerHeapShotWriteBuffer;
395 typedef struct _ProfilerHeapShotClassSummary {
396 struct {
397 guint32 instances;
398 guint32 bytes;
399 } reachable;
400 struct {
401 guint32 instances;
402 guint32 bytes;
403 } unreachable;
404 } ProfilerHeapShotClassSummary;
406 typedef struct _ProfilerHeapShotCollectionSummary {
407 ProfilerHeapShotClassSummary *per_class_data;
408 guint32 capacity;
409 } ProfilerHeapShotCollectionSummary;
411 typedef struct _ProfilerHeapShotWriteJob {
412 struct _ProfilerHeapShotWriteJob *next;
413 struct _ProfilerHeapShotWriteJob *next_unwritten;
414 gpointer *start;
415 gpointer *cursor;
416 gpointer *end;
417 ProfilerHeapShotWriteBuffer *buffers;
418 ProfilerHeapShotWriteBuffer **last_next;
419 guint32 full_buffers;
420 gboolean heap_shot_was_requested;
421 guint64 start_counter;
422 guint64 start_time;
423 guint64 end_counter;
424 guint64 end_time;
425 guint32 collection;
426 ProfilerHeapShotCollectionSummary summary;
427 gboolean dump_heap_data;
428 } ProfilerHeapShotWriteJob;
430 typedef struct _ProfilerThreadStack {
431 guint32 capacity;
432 guint32 top;
433 guint32 last_saved_top;
434 guint32 last_written_frame;
435 MonoMethod **stack;
436 guint8 *method_is_jitted;
437 guint32 *written_frames;
438 } ProfilerThreadStack;
440 typedef struct _ProfilerPerThreadData {
441 ProfilerEventData *events;
442 ProfilerEventData *next_free_event;
443 ProfilerEventData *next_unreserved_event;
444 ProfilerEventData *end_event;
445 ProfilerEventData *first_unwritten_event;
446 ProfilerEventData *first_unmapped_event;
447 guint64 start_event_counter;
448 guint64 last_event_counter;
449 gsize thread_id;
450 ProfilerHeapShotObjectBuffer *heap_shot_object_buffers;
451 ProfilerThreadStack stack;
452 struct _ProfilerPerThreadData* next;
453 } ProfilerPerThreadData;
455 typedef struct _ProfilerStatisticalHit {
456 gpointer *address;
457 MonoDomain *domain;
458 } ProfilerStatisticalHit;
460 typedef struct _ProfilerStatisticalData {
461 ProfilerStatisticalHit *hits;
462 unsigned int next_free_index;
463 unsigned int end_index;
464 unsigned int first_unwritten_index;
465 } ProfilerStatisticalData;
467 typedef struct _ProfilerUnmanagedSymbol {
468 guint32 offset;
469 guint32 size;
470 guint32 id;
471 guint32 index;
472 } ProfilerUnmanagedSymbol;
474 struct _ProfilerExecutableFile;
475 struct _ProfilerExecutableFileSectionRegion;
477 typedef struct _ProfilerExecutableMemoryRegionData {
478 gpointer start;
479 gpointer end;
480 guint32 file_offset;
481 char *file_name;
482 guint32 id;
483 gboolean is_new;
485 struct _ProfilerExecutableFile *file;
486 struct _ProfilerExecutableFileSectionRegion *file_region_reference;
487 guint32 symbols_count;
488 guint32 symbols_capacity;
489 ProfilerUnmanagedSymbol *symbols;
490 } ProfilerExecutableMemoryRegionData;
492 typedef struct _ProfilerExecutableMemoryRegions {
493 ProfilerExecutableMemoryRegionData **regions;
494 guint32 regions_capacity;
495 guint32 regions_count;
496 guint32 next_id;
497 guint32 next_unmanaged_function_id;
498 } ProfilerExecutableMemoryRegions;
500 /* Start of ELF definitions */
501 #define EI_NIDENT 16
502 typedef guint16 ElfHalf;
503 typedef guint32 ElfWord;
504 typedef gsize ElfAddr;
505 typedef gsize ElfOff;
507 typedef struct {
508 unsigned char e_ident[EI_NIDENT];
509 ElfHalf e_type;
510 ElfHalf e_machine;
511 ElfWord e_version;
512 ElfAddr e_entry;
513 ElfOff e_phoff;
514 ElfOff e_shoff; // Section header table
515 ElfWord e_flags;
516 ElfHalf e_ehsize; // Header size
517 ElfHalf e_phentsize;
518 ElfHalf e_phnum;
519 ElfHalf e_shentsize; // Section header entry size
520 ElfHalf e_shnum; // Section header entries number
521 ElfHalf e_shstrndx; // String table index
522 } ElfHeader;
524 #if (SIZEOF_VOID_P == 4)
525 typedef struct {
526 ElfWord sh_name;
527 ElfWord sh_type;
528 ElfWord sh_flags;
529 ElfAddr sh_addr; // Address in memory
530 ElfOff sh_offset; // Offset in file
531 ElfWord sh_size;
532 ElfWord sh_link;
533 ElfWord sh_info;
534 ElfWord sh_addralign;
535 ElfWord sh_entsize;
536 } ElfSection;
537 typedef struct {
538 ElfWord st_name;
539 ElfAddr st_value;
540 ElfWord st_size;
541 unsigned char st_info; // Use ELF32_ST_TYPE to get symbol type
542 unsigned char st_other;
543 ElfHalf st_shndx; // Or one of SHN_ABS, SHN_COMMON or SHN_UNDEF.
544 } ElfSymbol;
545 #elif (SIZEOF_VOID_P == 8)
546 typedef struct {
547 ElfWord sh_name;
548 ElfWord sh_type;
549 ElfOff sh_flags;
550 ElfAddr sh_addr; // Address in memory
551 ElfOff sh_offset; // Offset in file
552 ElfOff sh_size;
553 ElfWord sh_link;
554 ElfWord sh_info;
555 ElfOff sh_addralign;
556 ElfOff sh_entsize;
557 } ElfSection;
558 typedef struct {
559 ElfWord st_name;
560 unsigned char st_info; // Use ELF_ST_TYPE to get symbol type
561 unsigned char st_other;
562 ElfHalf st_shndx; // Or one of SHN_ABS, SHN_COMMON or SHN_UNDEF.
563 ElfAddr st_value;
564 ElfAddr st_size;
565 } ElfSymbol;
566 #else
567 #error Bad size of void pointer
568 #endif
571 #define ELF_ST_BIND(i) ((i)>>4)
572 #define ELF_ST_TYPE(i) ((i)&0xf)
575 typedef enum {
576 EI_MAG0 = 0,
577 EI_MAG1 = 1,
578 EI_MAG2 = 2,
579 EI_MAG3 = 3,
580 EI_CLASS = 4,
581 EI_DATA = 5
582 } ElfIdentFields;
584 typedef enum {
585 ELF_FILE_TYPE_NONE = 0,
586 ELF_FILE_TYPE_REL = 1,
587 ELF_FILE_TYPE_EXEC = 2,
588 ELF_FILE_TYPE_DYN = 3,
589 ELF_FILE_TYPE_CORE = 4
590 } ElfFileType;
592 typedef enum {
593 ELF_CLASS_NONE = 0,
594 ELF_CLASS_32 = 1,
595 ELF_CLASS_64 = 2
596 } ElfIdentClass;
598 typedef enum {
599 ELF_DATA_NONE = 0,
600 ELF_DATA_LSB = 1,
601 ELF_DATA_MSB = 2
602 } ElfIdentData;
604 typedef enum {
605 ELF_SHT_NULL = 0,
606 ELF_SHT_PROGBITS = 1,
607 ELF_SHT_SYMTAB = 2,
608 ELF_SHT_STRTAB = 3,
609 ELF_SHT_RELA = 4,
610 ELF_SHT_HASH = 5,
611 ELF_SHT_DYNAMIC = 6,
612 ELF_SHT_NOTE = 7,
613 ELF_SHT_NOBITS = 8,
614 ELF_SHT_REL = 9,
615 ELF_SHT_SHLIB = 10,
616 ELF_SHT_DYNSYM = 11
617 } ElfSectionType;
619 typedef enum {
620 ELF_STT_NOTYPE = 0,
621 ELF_STT_OBJECT = 1,
622 ELF_STT_FUNC = 2,
623 ELF_STT_SECTION = 3,
624 ELF_STT_FILE = 4
625 } ElfSymbolType;
627 typedef enum {
628 ELF_SHF_WRITE = 1,
629 ELF_SHF_ALLOC = 2,
630 ELF_SHF_EXECINSTR = 4,
631 } ElfSectionFlags;
633 #define ELF_SHN_UNDEF 0
634 #define ELF_SHN_LORESERVE 0xff00
635 #define ELF_SHN_LOPROC 0xff00
636 #define ELF_SHN_HIPROC 0xff1f
637 #define ELF_SHN_ABS 0xfff1
638 #define ELF_SHN_COMMON 0xfff2
639 #define ELF_SHN_HIRESERVE 0xffff
640 /* End of ELF definitions */
642 typedef struct _ProfilerExecutableFileSectionRegion {
643 ProfilerExecutableMemoryRegionData *region;
644 guint8 *section_address;
645 gsize section_offset;
646 } ProfilerExecutableFileSectionRegion;
648 typedef struct _ProfilerExecutableFile {
649 guint32 reference_count;
651 /* Used for mmap and munmap */
652 int fd;
653 guint8 *data;
654 size_t length;
656 /* File data */
657 ElfHeader *header;
658 guint8 *symbols_start;
659 guint32 symbols_count;
660 guint32 symbol_size;
661 const char *symbols_string_table;
662 const char *main_string_table;
664 ProfilerExecutableFileSectionRegion *section_regions;
666 struct _ProfilerExecutableFile *next_new_file;
667 } ProfilerExecutableFile;
669 typedef struct _ProfilerExecutableFiles {
670 GHashTable *table;
671 ProfilerExecutableFile *new_files;
672 } ProfilerExecutableFiles;
675 #define CLEANUP_WRITER_THREAD() do {profiler->writer_thread_terminated = TRUE;} while (0)
676 #define CHECK_WRITER_THREAD() (! profiler->writer_thread_terminated)
678 #ifndef HOST_WIN32
679 #include <sys/types.h>
680 #include <sys/time.h>
681 #include <sys/stat.h>
682 #include <unistd.h>
683 #include <fcntl.h>
684 #include <pthread.h>
685 #include <semaphore.h>
687 #include <sys/mman.h>
688 #include <sys/types.h>
689 #include <sys/stat.h>
690 #include <unistd.h>
691 #include <errno.h>
693 #define MUTEX_TYPE pthread_mutex_t
694 #define INITIALIZE_PROFILER_MUTEX() pthread_mutex_init (&(profiler->mutex), NULL)
695 #define DELETE_PROFILER_MUTEX() pthread_mutex_destroy (&(profiler->mutex))
696 #define LOCK_PROFILER() do {/*LOG_WRITER_THREAD ("LOCK_PROFILER");*/ pthread_mutex_lock (&(profiler->mutex));} while (0)
697 #define UNLOCK_PROFILER() do {/*LOG_WRITER_THREAD ("UNLOCK_PROFILER");*/ pthread_mutex_unlock (&(profiler->mutex));} while (0)
699 #define THREAD_TYPE pthread_t
700 #define CREATE_WRITER_THREAD(f) pthread_create (&(profiler->data_writer_thread), NULL, ((void*(*)(void*))f), NULL)
701 #define CREATE_USER_THREAD(f) pthread_create (&(profiler->user_thread), NULL, ((void*(*)(void*))f), NULL)
702 #define EXIT_THREAD() pthread_exit (NULL);
703 #define WAIT_WRITER_THREAD() do {\
704 if (CHECK_WRITER_THREAD ()) {\
705 pthread_join (profiler->data_writer_thread, NULL);\
707 } while (0)
708 #define CURRENT_THREAD_ID() (gsize) pthread_self ()
710 #ifndef HAVE_KW_THREAD
711 static pthread_key_t pthread_profiler_key;
712 static pthread_once_t profiler_pthread_once = PTHREAD_ONCE_INIT;
713 static void
714 make_pthread_profiler_key (void) {
715 (void) pthread_key_create (&pthread_profiler_key, NULL);
717 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*) pthread_getspecific (pthread_profiler_key))
718 #define SET_PROFILER_THREAD_DATA(x) (void) pthread_setspecific (pthread_profiler_key, (x))
719 #define ALLOCATE_PROFILER_THREAD_DATA() (void) pthread_once (&profiler_pthread_once, make_pthread_profiler_key)
720 #define FREE_PROFILER_THREAD_DATA() (void) pthread_key_delete (pthread_profiler_key)
721 #endif
723 #define EVENT_TYPE sem_t
724 #define WRITER_EVENT_INIT() do {\
725 sem_init (&(profiler->enable_data_writer_event), 0, 0);\
726 sem_init (&(profiler->wake_data_writer_event), 0, 0);\
727 sem_init (&(profiler->done_data_writer_event), 0, 0);\
728 } while (0)
729 #define WRITER_EVENT_DESTROY() do {\
730 sem_destroy (&(profiler->enable_data_writer_event));\
731 sem_destroy (&(profiler->wake_data_writer_event));\
732 sem_destroy (&(profiler->done_data_writer_event));\
733 } while (0)
734 #define WRITER_EVENT_WAIT() (void) sem_wait (&(profiler->wake_data_writer_event))
735 #define WRITER_EVENT_RAISE() (void) sem_post (&(profiler->wake_data_writer_event))
736 #define WRITER_EVENT_ENABLE_WAIT() (void) sem_wait (&(profiler->enable_data_writer_event))
737 #define WRITER_EVENT_ENABLE_RAISE() (void) sem_post (&(profiler->enable_data_writer_event))
738 #define WRITER_EVENT_DONE_WAIT() do {\
739 if (CHECK_WRITER_THREAD ()) {\
740 (void) sem_wait (&(profiler->done_data_writer_event));\
742 } while (0)
743 #define WRITER_EVENT_DONE_RAISE() (void) sem_post (&(profiler->done_data_writer_event))
745 #if 0
746 #define FILE_HANDLE_TYPE FILE*
747 #define OPEN_FILE() profiler->file = fopen (profiler->file_name, "wb");
748 #define WRITE_BUFFER(b,s) fwrite ((b), 1, (s), profiler->file)
749 #define FLUSH_FILE() fflush (profiler->file)
750 #define CLOSE_FILE() fclose (profiler->file)
751 #else
752 #define FILE_HANDLE_TYPE int
753 #define OPEN_FILE() profiler->file = open (profiler->file_name, O_WRONLY|O_CREAT|O_TRUNC, 0664);
754 #define WRITE_BUFFER(b,s) write (profiler->file, (b), (s))
755 #define FLUSH_FILE() fsync (profiler->file)
756 #define CLOSE_FILE() close (profiler->file)
757 #endif
759 #else
761 #include <windows.h>
763 #define MUTEX_TYPE CRITICAL_SECTION
764 #define INITIALIZE_PROFILER_MUTEX() InitializeCriticalSection (&(profiler->mutex))
765 #define DELETE_PROFILER_MUTEX() DeleteCriticalSection (&(profiler->mutex))
766 #define LOCK_PROFILER() EnterCriticalSection (&(profiler->mutex))
767 #define UNLOCK_PROFILER() LeaveCriticalSection (&(profiler->mutex))
769 #define THREAD_TYPE HANDLE
770 #define CREATE_WRITER_THREAD(f) CreateThread (NULL, (1*1024*1024), (f), NULL, 0, NULL);
771 #define EXIT_THREAD() ExitThread (0);
772 #define WAIT_WRITER_THREAD() do {\
773 if (CHECK_WRITER_THREAD ()) {\
774 WaitForSingleObject (profiler->data_writer_thread, INFINITE);\
776 } while (0)
777 #define CURRENT_THREAD_ID() (gsize) GetCurrentThreadId ()
779 #ifndef HAVE_KW_THREAD
780 static guint32 profiler_thread_id = -1;
781 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*)TlsGetValue (profiler_thread_id))
782 #define SET_PROFILER_THREAD_DATA(x) TlsSetValue (profiler_thread_id, (x));
783 #define ALLOCATE_PROFILER_THREAD_DATA() profiler_thread_id = TlsAlloc ()
784 #define FREE_PROFILER_THREAD_DATA() TlsFree (profiler_thread_id)
785 #endif
787 #define EVENT_TYPE HANDLE
788 #define WRITER_EVENT_INIT() (void) do {\
789 profiler->enable_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
790 profiler->wake_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
791 profiler->done_data_writer_event = CreateEvent (NULL, FALSE, FALSE, NULL);\
792 } while (0)
793 #define WRITER_EVENT_DESTROY() CloseHandle (profiler->statistical_data_writer_event)
794 #define WRITER_EVENT_INIT() (void) do {\
795 CloseHandle (profiler->enable_data_writer_event);\
796 CloseHandle (profiler->wake_data_writer_event);\
797 CloseHandle (profiler->done_data_writer_event);\
798 } while (0)
799 #define WRITER_EVENT_WAIT() WaitForSingleObject (profiler->wake_data_writer_event, INFINITE)
800 #define WRITER_EVENT_RAISE() SetEvent (profiler->wake_data_writer_event)
801 #define WRITER_EVENT_ENABLE_WAIT() WaitForSingleObject (profiler->enable_data_writer_event, INFINITE)
802 #define WRITER_EVENT_ENABLE_RAISE() SetEvent (profiler->enable_data_writer_event)
803 #define WRITER_EVENT_DONE_WAIT() do {\
804 if (CHECK_WRITER_THREAD ()) {\
805 WaitForSingleObject (profiler->done_data_writer_event, INFINITE);\
807 } while (0)
808 #define WRITER_EVENT_DONE_RAISE() SetEvent (profiler->done_data_writer_event)
810 #define FILE_HANDLE_TYPE FILE*
811 #define OPEN_FILE() profiler->file = fopen (profiler->file_name, "wb");
812 #define WRITE_BUFFER(b,s) fwrite ((b), 1, (s), profiler->file)
813 #define FLUSH_FILE() fflush (profiler->file)
814 #define CLOSE_FILE() fclose (profiler->file);
816 #endif
818 #ifdef HAVE_KW_THREAD
819 static __thread ProfilerPerThreadData * tls_profiler_per_thread_data;
820 #define LOOKUP_PROFILER_THREAD_DATA() ((ProfilerPerThreadData*) tls_profiler_per_thread_data)
821 #define SET_PROFILER_THREAD_DATA(x) tls_profiler_per_thread_data = (x)
822 #define ALLOCATE_PROFILER_THREAD_DATA() /* nop */
823 #define FREE_PROFILER_THREAD_DATA() /* nop */
824 #endif
826 #define GET_PROFILER_THREAD_DATA(data) do {\
827 ProfilerPerThreadData *_result = LOOKUP_PROFILER_THREAD_DATA ();\
828 if (!_result) {\
829 _result = profiler_per_thread_data_new (profiler->per_thread_buffer_size);\
830 LOCK_PROFILER ();\
831 _result->next = profiler->per_thread_data;\
832 profiler->per_thread_data = _result;\
833 UNLOCK_PROFILER ();\
834 SET_PROFILER_THREAD_DATA (_result);\
836 (data) = _result;\
837 } while (0)
839 #define PROFILER_FILE_WRITE_BUFFER_SIZE (profiler->write_buffer_size)
840 typedef struct _ProfilerFileWriteBuffer {
841 struct _ProfilerFileWriteBuffer *next;
842 guint8 buffer [MONO_ZERO_LEN_ARRAY];
843 } ProfilerFileWriteBuffer;
845 #define CHECK_PROFILER_ENABLED() do {\
846 if (! profiler->profiler_enabled)\
847 return;\
848 } while (0)
849 struct _MonoProfiler {
850 MUTEX_TYPE mutex;
852 MonoProfileFlags flags;
853 gboolean profiler_enabled;
854 char *file_name;
855 char *file_name_suffix;
856 FILE_HANDLE_TYPE file;
858 guint64 start_time;
859 guint64 start_counter;
860 guint64 end_time;
861 guint64 end_counter;
863 guint64 last_header_counter;
865 MethodIdMapping *methods;
866 ClassIdMapping *classes;
868 guint32 loaded_element_next_free_id;
869 GHashTable *loaded_assemblies;
870 GHashTable *loaded_modules;
871 GHashTable *loaded_appdomains;
873 guint32 per_thread_buffer_size;
874 guint32 statistical_buffer_size;
875 ProfilerPerThreadData* per_thread_data;
876 ProfilerStatisticalData *statistical_data;
877 ProfilerStatisticalData *statistical_data_ready;
878 ProfilerStatisticalData *statistical_data_second_buffer;
879 int statistical_call_chain_depth;
880 MonoProfilerCallChainStrategy statistical_call_chain_strategy;
882 ProfilerCodeChunks code_chunks;
884 THREAD_TYPE data_writer_thread;
885 THREAD_TYPE user_thread;
886 EVENT_TYPE enable_data_writer_event;
887 EVENT_TYPE wake_data_writer_event;
888 EVENT_TYPE done_data_writer_event;
889 gboolean terminate_writer_thread;
890 gboolean writer_thread_terminated;
892 ProfilerFileWriteBuffer *write_buffers;
893 ProfilerFileWriteBuffer *current_write_buffer;
894 int write_buffer_size;
895 int current_write_position;
896 int full_write_buffers;
898 ProfilerHeapShotWriteJob *heap_shot_write_jobs;
899 ProfilerHeapShotHeapBuffers heap;
901 int command_port;
903 int dump_next_heap_snapshots;
904 gboolean heap_shot_was_requested;
905 guint32 garbage_collection_counter;
907 ProfilerExecutableMemoryRegions *executable_regions;
908 ProfilerExecutableFiles executable_files;
910 struct {
911 #if (HAS_OPROFILE)
912 gboolean oprofile;
913 #endif
914 gboolean jit_time;
915 gboolean unreachable_objects;
916 gboolean collection_summary;
917 gboolean report_gc_events;
918 gboolean heap_shot;
919 gboolean track_stack;
920 gboolean track_calls;
921 gboolean save_allocation_caller;
922 gboolean save_allocation_stack;
923 gboolean allocations_carry_id;
924 } action_flags;
926 static MonoProfiler *profiler;
928 static void
929 enable_profiler (void) {
930 profiler->profiler_enabled = TRUE;
933 static void flush_everything (void);
935 static void
936 disable_profiler (void) {
937 profiler->profiler_enabled = FALSE;
938 flush_everything ();
941 static void
942 request_heap_snapshot (void) {
943 profiler->heap_shot_was_requested = TRUE;
944 mono_gc_collect (mono_gc_max_generation ());
947 #define DEBUG_LOAD_EVENTS 0
948 #define DEBUG_MAPPING_EVENTS 0
949 #define DEBUG_LOGGING_PROFILER 0
950 #define DEBUG_HEAP_PROFILER 0
951 #define DEBUG_CLASS_BITMAPS 0
952 #define DEBUG_STATISTICAL_PROFILER 0
953 #define DEBUG_WRITER_THREAD 0
954 #define DEBUG_USER_THREAD 0
955 #define DEBUG_FILE_WRITES 0
956 #if (DEBUG_LOGGING_PROFILER || DEBUG_STATISTICAL_PROFILER || DEBUG_HEAP_PROFILER || DEBUG_WRITER_THREAD || DEBUG_FILE_WRITES)
957 #define LOG_WRITER_THREAD(m) printf ("WRITER-THREAD-LOG %s\n", m)
958 #else
959 #define LOG_WRITER_THREAD(m)
960 #endif
961 #if (DEBUG_LOGGING_PROFILER || DEBUG_STATISTICAL_PROFILER || DEBUG_HEAP_PROFILER || DEBUG_USER_THREAD || DEBUG_FILE_WRITES)
962 #define LOG_USER_THREAD(m) printf ("USER-THREAD-LOG %s\n", m)
963 #else
964 #define LOG_USER_THREAD(m)
965 #endif
967 #if DEBUG_LOGGING_PROFILER
968 static int event_counter = 0;
969 #define EVENT_MARK() printf ("[EVENT:%d]", ++ event_counter)
970 #endif
972 static void
973 thread_stack_initialize_empty (ProfilerThreadStack *stack) {
974 stack->capacity = 0;
975 stack->top = 0;
976 stack->last_saved_top = 0;
977 stack->last_written_frame = 0;
978 stack->stack = NULL;
979 stack->method_is_jitted = NULL;
980 stack->written_frames = NULL;
983 static void
984 thread_stack_free (ProfilerThreadStack *stack) {
985 stack->capacity = 0;
986 stack->top = 0;
987 stack->last_saved_top = 0;
988 stack->last_written_frame = 0;
989 if (stack->stack != NULL) {
990 g_free (stack->stack);
991 stack->stack = NULL;
993 if (stack->method_is_jitted != NULL) {
994 g_free (stack->method_is_jitted);
995 stack->method_is_jitted = NULL;
997 if (stack->written_frames != NULL) {
998 g_free (stack->written_frames);
999 stack->written_frames = NULL;
1003 static void
1004 thread_stack_initialize (ProfilerThreadStack *stack, guint32 capacity) {
1005 stack->capacity = capacity;
1006 stack->top = 0;
1007 stack->last_saved_top = 0;
1008 stack->last_written_frame = 0;
1009 stack->stack = g_new0 (MonoMethod*, capacity);
1010 stack->method_is_jitted = g_new0 (guint8, capacity);
1011 stack->written_frames = g_new0 (guint32, capacity);
1014 static void
1015 thread_stack_push_jitted (ProfilerThreadStack *stack, MonoMethod* method, gboolean method_is_jitted) {
1016 if (stack->top >= stack->capacity) {
1017 MonoMethod **old_stack = stack->stack;
1018 guint8 *old_method_is_jitted = stack->method_is_jitted;
1019 guint32 *old_written_frames = stack->written_frames;
1020 guint32 top = stack->top;
1021 guint32 last_saved_top = stack->last_saved_top;
1022 guint32 last_written_frame = stack->last_written_frame;
1023 thread_stack_initialize (stack, stack->capacity * 2);
1024 memcpy (stack->stack, old_stack, top * sizeof (MonoMethod*));
1025 memcpy (stack->method_is_jitted, old_method_is_jitted, top * sizeof (guint8));
1026 memcpy (stack->written_frames, old_written_frames, top * sizeof (guint32));
1027 g_free (old_stack);
1028 g_free (old_method_is_jitted);
1029 g_free (old_written_frames);
1030 stack->top = top;
1031 stack->last_saved_top = last_saved_top;
1032 stack->last_written_frame = last_written_frame;
1034 stack->stack [stack->top] = method;
1035 stack->method_is_jitted [stack->top] = method_is_jitted;
1036 stack->top ++;
1039 static inline void
1040 thread_stack_push (ProfilerThreadStack *stack, MonoMethod* method) {
1041 thread_stack_push_jitted (stack, method, FALSE);
1044 static MonoMethod*
1045 thread_stack_pop (ProfilerThreadStack *stack) {
1046 if (stack->top > 0) {
1047 stack->top --;
1048 if (stack->last_saved_top > stack->top) {
1049 stack->last_saved_top = stack->top;
1051 return stack->stack [stack->top];
1052 } else {
1053 return NULL;
1057 static MonoMethod*
1058 thread_stack_top (ProfilerThreadStack *stack) {
1059 if (stack->top > 0) {
1060 return stack->stack [stack->top - 1];
1061 } else {
1062 return NULL;
1066 static gboolean
1067 thread_stack_top_is_jitted (ProfilerThreadStack *stack) {
1068 if (stack->top > 0) {
1069 return stack->method_is_jitted [stack->top - 1];
1070 } else {
1071 return FALSE;
1075 static MonoMethod*
1076 thread_stack_index_from_top (ProfilerThreadStack *stack, int index) {
1077 if (stack->top > index) {
1078 return stack->stack [stack->top - (index + 1)];
1079 } else {
1080 return NULL;
1084 static gboolean
1085 thread_stack_index_from_top_is_jitted (ProfilerThreadStack *stack, int index) {
1086 if (stack->top > index) {
1087 return stack->method_is_jitted [stack->top - (index + 1)];
1088 } else {
1089 return FALSE;
1093 static inline void
1094 thread_stack_push_safely (ProfilerThreadStack *stack, MonoMethod* method) {
1095 if (stack->stack != NULL) {
1096 thread_stack_push (stack, method);
1100 static inline void
1101 thread_stack_push_jitted_safely (ProfilerThreadStack *stack, MonoMethod* method, gboolean method_is_jitted) {
1102 if (stack->stack != NULL) {
1103 thread_stack_push_jitted (stack, method, method_is_jitted);
1107 static inline int
1108 thread_stack_count_unsaved_frames (ProfilerThreadStack *stack) {
1109 int result = stack->top - stack->last_saved_top;
1110 return (result > 0) ? result : 0;
1113 static inline int
1114 thread_stack_get_last_written_frame (ProfilerThreadStack *stack) {
1115 return stack->last_written_frame;
1118 static inline void
1119 thread_stack_set_last_written_frame (ProfilerThreadStack *stack, int last_written_frame) {
1120 stack->last_written_frame = last_written_frame;
1123 static inline guint32
1124 thread_stack_written_frame_at_index (ProfilerThreadStack *stack, int index) {
1125 return stack->written_frames [index];
1128 static inline void
1129 thread_stack_write_frame_at_index (ProfilerThreadStack *stack, int index, guint32 method_id_and_is_jitted) {
1130 stack->written_frames [index] = method_id_and_is_jitted;
1133 static ClassIdMappingElement*
1134 class_id_mapping_element_get (MonoClass *klass) {
1135 return g_hash_table_lookup (profiler->classes->table, (gconstpointer) klass);
1138 static MethodIdMappingElement*
1139 method_id_mapping_element_get (MonoMethod *method) {
1140 return g_hash_table_lookup (profiler->methods->table, (gconstpointer) method);
1143 #define BITS_TO_BYTES(v) do {\
1144 (v) += 7;\
1145 (v) &= ~7;\
1146 (v) >>= 3;\
1147 } while (0)
1149 static ClassIdMappingElement*
1150 class_id_mapping_element_new (MonoClass *klass) {
1151 ClassIdMappingElement *result = g_new (ClassIdMappingElement, 1);
1153 result->name = mono_type_full_name (mono_class_get_type (klass));
1154 result->klass = klass;
1155 result->next_unwritten = profiler->classes->unwritten;
1156 profiler->classes->unwritten = result;
1157 result->id = profiler->classes->next_id;
1158 profiler->classes->next_id ++;
1160 result->data.bitmap.compact = 0;
1161 result->data.layout.slots = CLASS_LAYOUT_NOT_INITIALIZED;
1162 result->data.layout.references = CLASS_LAYOUT_NOT_INITIALIZED;
1164 g_hash_table_insert (profiler->classes->table, klass, result);
1166 #if (DEBUG_MAPPING_EVENTS)
1167 printf ("Created new CLASS mapping element \"%s\" (%p)[%d]\n", result->name, klass, result->id);
1168 #endif
1169 return result;
1172 static void
1173 class_id_mapping_element_build_layout_bitmap (MonoClass *klass, ClassIdMappingElement *klass_id) {
1174 MonoClass *parent_class = mono_class_get_parent (klass);
1175 int number_of_reference_fields = 0;
1176 int max_offset_of_reference_fields = 0;
1177 ClassIdMappingElement *parent_id;
1178 gpointer iter;
1179 MonoClassField *field;
1181 #if (DEBUG_CLASS_BITMAPS)
1182 printf ("class_id_mapping_element_build_layout_bitmap: building layout for class %s.%s: ", mono_class_get_namespace (klass), mono_class_get_name (klass));
1183 #endif
1185 if (parent_class != NULL) {
1186 parent_id = class_id_mapping_element_get (parent_class);
1187 g_assert (parent_id != NULL);
1189 if (parent_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
1190 #if (DEBUG_CLASS_BITMAPS)
1191 printf ("[recursively building bitmap for father class]\n");
1192 #endif
1193 class_id_mapping_element_build_layout_bitmap (parent_class, parent_id);
1195 } else {
1196 parent_id = NULL;
1199 iter = NULL;
1200 while ((field = mono_class_get_fields (klass, &iter)) != NULL) {
1201 MonoType* field_type = mono_field_get_type (field);
1202 // For now, skip static fields
1203 if (mono_field_get_flags (field) & 0x0010 /*FIELD_ATTRIBUTE_STATIC*/)
1204 continue;
1206 if (MONO_TYPE_IS_REFERENCE (field_type)) {
1207 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1208 if (field_offset > max_offset_of_reference_fields) {
1209 max_offset_of_reference_fields = field_offset;
1211 number_of_reference_fields ++;
1212 } else {
1213 MonoClass *field_class = mono_class_from_mono_type (field_type);
1214 if (field_class && mono_class_is_valuetype (field_class)) {
1215 ClassIdMappingElement *field_id = class_id_mapping_element_get (field_class);
1216 g_assert (field_id != NULL);
1218 if (field_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
1219 if (field_id != klass_id) {
1220 #if (DEBUG_CLASS_BITMAPS)
1221 printf ("[recursively building bitmap for field %s]\n", mono_field_get_name (field));
1222 #endif
1223 class_id_mapping_element_build_layout_bitmap (field_class, field_id);
1224 } else {
1225 #if (DEBUG_CLASS_BITMAPS)
1226 printf ("[breaking recursive bitmap build for field %s]", mono_field_get_name (field));
1228 #endif
1229 klass_id->data.bitmap.compact = 0;
1230 klass_id->data.layout.slots = 0;
1231 klass_id->data.layout.references = 0;
1235 if (field_id->data.layout.references > 0) {
1236 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1237 int max_offset_reference_in_field = (field_id->data.layout.slots - 1) * sizeof (gpointer);
1239 if ((field_offset + max_offset_reference_in_field) > max_offset_of_reference_fields) {
1240 max_offset_of_reference_fields = field_offset + max_offset_reference_in_field;
1243 number_of_reference_fields += field_id->data.layout.references;
1249 #if (DEBUG_CLASS_BITMAPS)
1250 printf ("[allocating bitmap for class %s.%s (references %d, max offset %d, slots %d)]", mono_class_get_namespace (klass), mono_class_get_name (klass), number_of_reference_fields, max_offset_of_reference_fields, (int)(max_offset_of_reference_fields / sizeof (gpointer)) + 1);
1251 #endif
1252 if ((number_of_reference_fields == 0) && ((parent_id == NULL) || (parent_id->data.layout.references == 0))) {
1253 #if (DEBUG_CLASS_BITMAPS)
1254 printf ("[no references at all]");
1255 #endif
1256 klass_id->data.bitmap.compact = 0;
1257 klass_id->data.layout.slots = 0;
1258 klass_id->data.layout.references = 0;
1259 } else {
1260 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1261 #if (DEBUG_CLASS_BITMAPS)
1262 printf ("[parent %s.%s has %d references in %d slots]", mono_class_get_namespace (parent_class), mono_class_get_name (parent_class), parent_id->data.layout.references, parent_id->data.layout.slots);
1263 #endif
1264 klass_id->data.layout.slots = parent_id->data.layout.slots;
1265 klass_id->data.layout.references = parent_id->data.layout.references;
1266 } else {
1267 #if (DEBUG_CLASS_BITMAPS)
1268 printf ("[no references from parent]");
1269 #endif
1270 klass_id->data.layout.slots = 0;
1271 klass_id->data.layout.references = 0;
1274 if (number_of_reference_fields > 0) {
1275 klass_id->data.layout.slots += ((max_offset_of_reference_fields / sizeof (gpointer)) + 1);
1276 klass_id->data.layout.references += number_of_reference_fields;
1277 #if (DEBUG_CLASS_BITMAPS)
1278 printf ("[adding data, going to %d references in %d slots]", klass_id->data.layout.references, klass_id->data.layout.slots);
1279 #endif
1282 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1283 #if (DEBUG_CLASS_BITMAPS)
1284 printf ("[zeroing bitmap]");
1285 #endif
1286 klass_id->data.bitmap.compact = 0;
1287 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1288 #if (DEBUG_CLASS_BITMAPS)
1289 printf ("[copying compact father bitmap]");
1290 #endif
1291 klass_id->data.bitmap.compact = parent_id->data.bitmap.compact;
1293 } else {
1294 int size_of_bitmap = klass_id->data.layout.slots;
1295 BITS_TO_BYTES (size_of_bitmap);
1296 #if (DEBUG_CLASS_BITMAPS)
1297 printf ("[allocating %d bytes for bitmap]", size_of_bitmap);
1298 #endif
1299 klass_id->data.bitmap.extended = g_malloc0 (size_of_bitmap);
1300 if ((parent_id != NULL) && (parent_id->data.layout.references > 0)) {
1301 int size_of_father_bitmap = parent_id->data.layout.slots;
1302 if (size_of_father_bitmap <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1303 int father_slot;
1304 #if (DEBUG_CLASS_BITMAPS)
1305 printf ("[copying %d bits from father bitmap]", size_of_father_bitmap);
1306 #endif
1307 for (father_slot = 0; father_slot < size_of_father_bitmap; father_slot ++) {
1308 if (parent_id->data.bitmap.compact & (((guint64)1) << father_slot)) {
1309 klass_id->data.bitmap.extended [father_slot >> 3] |= (1 << (father_slot & 7));
1312 } else {
1313 BITS_TO_BYTES (size_of_father_bitmap);
1314 #if (DEBUG_CLASS_BITMAPS)
1315 printf ("[copying %d bytes from father bitmap]", size_of_father_bitmap);
1316 #endif
1317 memcpy (klass_id->data.bitmap.extended, parent_id->data.bitmap.extended, size_of_father_bitmap);
1323 #if (DEBUG_CLASS_BITMAPS)
1324 printf ("[starting filling iteration]\n");
1325 #endif
1326 iter = NULL;
1327 while ((field = mono_class_get_fields (klass, &iter)) != NULL) {
1328 MonoType* field_type = mono_field_get_type (field);
1329 // For now, skip static fields
1330 if (mono_field_get_flags (field) & 0x0010 /*FIELD_ATTRIBUTE_STATIC*/)
1331 continue;
1333 #if (DEBUG_CLASS_BITMAPS)
1334 printf ("[Working on field %s]", mono_field_get_name (field));
1335 #endif
1336 if (MONO_TYPE_IS_REFERENCE (field_type)) {
1337 int field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1338 int field_slot;
1339 g_assert ((field_offset % sizeof (gpointer)) == 0);
1340 field_slot = field_offset / sizeof (gpointer);
1341 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1342 klass_id->data.bitmap.compact |= (((guint64)1) << field_slot);
1343 } else {
1344 klass_id->data.bitmap.extended [field_slot >> 3] |= (1 << (field_slot & 7));
1346 #if (DEBUG_CLASS_BITMAPS)
1347 printf ("[reference at offset %d, slot %d]", field_offset, field_slot);
1348 #endif
1349 } else {
1350 MonoClass *field_class = mono_class_from_mono_type (field_type);
1351 if (field_class && mono_class_is_valuetype (field_class)) {
1352 ClassIdMappingElement *field_id = class_id_mapping_element_get (field_class);
1353 int field_offset;
1354 int field_slot;
1356 g_assert (field_id != NULL);
1357 field_offset = mono_field_get_offset (field) - sizeof (MonoObject);
1358 g_assert ((field_id->data.layout.references == 0) || ((field_offset % sizeof (gpointer)) == 0));
1359 field_slot = field_offset / sizeof (gpointer);
1360 #if (DEBUG_CLASS_BITMAPS)
1361 printf ("[value type at offset %d, slot %d, with %d references in %d slots]", field_offset, field_slot, field_id->data.layout.references, field_id->data.layout.slots);
1362 #endif
1364 if (field_id->data.layout.references > 0) {
1365 int sub_field_slot;
1366 if (field_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1367 for (sub_field_slot = 0; sub_field_slot < field_id->data.layout.slots; sub_field_slot ++) {
1368 if (field_id->data.bitmap.compact & (((guint64)1) << sub_field_slot)) {
1369 int actual_slot = field_slot + sub_field_slot;
1370 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1371 klass_id->data.bitmap.compact |= (((guint64)1) << actual_slot);
1372 } else {
1373 klass_id->data.bitmap.extended [actual_slot >> 3] |= (1 << (actual_slot & 7));
1377 } else {
1378 for (sub_field_slot = 0; sub_field_slot < field_id->data.layout.slots; sub_field_slot ++) {
1379 if (field_id->data.bitmap.extended [sub_field_slot >> 3] & (1 << (sub_field_slot & 7))) {
1380 int actual_slot = field_slot + sub_field_slot;
1381 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1382 klass_id->data.bitmap.compact |= (((guint64)1) << actual_slot);
1383 } else {
1384 klass_id->data.bitmap.extended [actual_slot >> 3] |= (1 << (actual_slot & 7));
1393 #if (DEBUG_CLASS_BITMAPS)
1394 do {
1395 int slot;
1396 printf ("\nLayot of class \"%s.%s\": references %d, slots %d, bitmap {", mono_class_get_namespace (klass), mono_class_get_name (klass), klass_id->data.layout.references, klass_id->data.layout.slots);
1397 for (slot = 0; slot < klass_id->data.layout.slots; slot ++) {
1398 if (klass_id->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
1399 if (klass_id->data.bitmap.compact & (((guint64)1) << slot)) {
1400 printf (" 1");
1401 } else {
1402 printf (" 0");
1404 } else {
1405 if (klass_id->data.bitmap.extended [slot >> 3] & (1 << (slot & 7))) {
1406 printf (" 1");
1407 } else {
1408 printf (" 0");
1413 printf (" }\n");
1415 } while (0);
1416 #endif
1419 static MethodIdMappingElement*
1420 method_id_mapping_element_new (MonoMethod *method) {
1421 MethodIdMappingElement *result = g_new (MethodIdMappingElement, 1);
1422 char *signature = mono_signature_get_desc (mono_method_signature (method), TRUE);
1424 result->name = g_strdup_printf ("%s (%s)", mono_method_get_name (method), signature);
1425 g_free (signature);
1426 result->method = method;
1427 result->next_unwritten = profiler->methods->unwritten;
1428 profiler->methods->unwritten = result;
1429 result->id = profiler->methods->next_id;
1430 profiler->methods->next_id ++;
1431 g_hash_table_insert (profiler->methods->table, method, result);
1433 result->data.code_start = NULL;
1434 result->data.code_size = 0;
1436 #if (DEBUG_MAPPING_EVENTS)
1437 printf ("Created new METHOD mapping element \"%s\" (%p)[%d]\n", result->name, method, result->id);
1438 #endif
1439 return result;
1443 static void
1444 method_id_mapping_element_destroy (gpointer element) {
1445 MethodIdMappingElement *e = (MethodIdMappingElement*) element;
1446 if (e->name)
1447 g_free (e->name);
1448 g_free (element);
1451 static void
1452 class_id_mapping_element_destroy (gpointer element) {
1453 ClassIdMappingElement *e = (ClassIdMappingElement*) element;
1454 if (e->name)
1455 g_free (e->name);
1456 if ((e->data.layout.slots != CLASS_LAYOUT_NOT_INITIALIZED) && (e->data.layout.slots > CLASS_LAYOUT_PACKED_BITMAP_SIZE))
1457 g_free (e->data.bitmap.extended);
1458 g_free (element);
1461 static MethodIdMapping*
1462 method_id_mapping_new (void) {
1463 MethodIdMapping *result = g_new (MethodIdMapping, 1);
1464 //result->table = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, method_id_mapping_element_destroy);
1465 result->table = g_hash_table_new_full (g_direct_hash, NULL, NULL, method_id_mapping_element_destroy);
1466 result->unwritten = NULL;
1467 result->next_id = 1;
1468 return result;
1471 static ClassIdMapping*
1472 class_id_mapping_new (void) {
1473 ClassIdMapping *result = g_new (ClassIdMapping, 1);
1474 //result->table = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, class_id_mapping_element_destroy);
1475 result->table = g_hash_table_new_full (g_direct_hash, NULL, NULL, class_id_mapping_element_destroy);
1476 result->unwritten = NULL;
1477 result->next_id = 1;
1478 return result;
1481 static void
1482 method_id_mapping_destroy (MethodIdMapping *map) {
1483 g_hash_table_destroy (map->table);
1484 g_free (map);
1487 static void
1488 class_id_mapping_destroy (ClassIdMapping *map) {
1489 g_hash_table_destroy (map->table);
1490 g_free (map);
1493 #if (DEBUG_LOAD_EVENTS)
1494 static void
1495 print_load_event (const char *event_name, GHashTable *table, gpointer item, LoadedElement *element);
1496 #endif
1498 static LoadedElement*
1499 loaded_element_load_start (GHashTable *table, gpointer item) {
1500 LoadedElement *element = g_new0 (LoadedElement, 1);
1501 element->id = profiler->loaded_element_next_free_id;
1502 profiler->loaded_element_next_free_id ++;
1503 #if (DEBUG_LOAD_EVENTS)
1504 print_load_event ("LOAD START", table, item, element);
1505 #endif
1506 MONO_PROFILER_GET_CURRENT_COUNTER (element->load_start_counter);
1507 g_hash_table_insert (table, item, element);
1508 return element;
1511 static LoadedElement*
1512 loaded_element_load_end (GHashTable *table, gpointer item, char *name) {
1513 LoadedElement *element = g_hash_table_lookup (table, item);
1514 #if (DEBUG_LOAD_EVENTS)
1515 print_load_event ("LOAD END", table, item, element);
1516 #endif
1517 g_assert (element != NULL);
1518 MONO_PROFILER_GET_CURRENT_COUNTER (element->load_end_counter);
1519 element->name = name;
1520 element->loaded = TRUE;
1521 return element;
1524 static LoadedElement*
1525 loaded_element_unload_start (GHashTable *table, gpointer item) {
1526 LoadedElement *element = g_hash_table_lookup (table, item);
1527 #if (DEBUG_LOAD_EVENTS)
1528 print_load_event ("UNLOAD START", table, item, element);
1529 #endif
1530 g_assert (element != NULL);
1531 MONO_PROFILER_GET_CURRENT_COUNTER (element->unload_start_counter);
1532 return element;
1535 static LoadedElement*
1536 loaded_element_unload_end (GHashTable *table, gpointer item) {
1537 LoadedElement *element = g_hash_table_lookup (table, item);
1538 #if (DEBUG_LOAD_EVENTS)
1539 print_load_event ("UNLOAD END", table, item, element);
1540 #endif
1541 g_assert (element != NULL);
1542 MONO_PROFILER_GET_CURRENT_COUNTER (element->unload_end_counter);
1543 element->unloaded = TRUE;
1544 return element;
1547 static LoadedElement*
1548 loaded_element_find (GHashTable *table, gpointer item) {
1549 LoadedElement *element = g_hash_table_lookup (table, item);
1550 return element;
1553 static guint32
1554 loaded_element_get_id (GHashTable *table, gpointer item) {
1555 LoadedElement *element = loaded_element_find (table, item);
1556 if (element != NULL) {
1557 return element->id;
1558 } else {
1559 return 0;
1563 static void
1564 loaded_element_destroy (gpointer element) {
1565 if (((LoadedElement*)element)->name)
1566 g_free (((LoadedElement*)element)->name);
1567 g_free (element);
1570 #if (DEBUG_LOAD_EVENTS)
1571 static void
1572 print_load_event (const char *event_name, GHashTable *table, gpointer item, LoadedElement *element) {
1573 const char* item_name;
1574 char* item_info;
1576 if (table == profiler->loaded_assemblies) {
1577 //item_info = g_strdup_printf("ASSEMBLY %p (dynamic %d)", item, mono_image_is_dynamic (mono_assembly_get_image((MonoAssembly*)item)));
1578 item_info = g_strdup_printf("ASSEMBLY %p", item);
1579 } else if (table == profiler->loaded_modules) {
1580 //item_info = g_strdup_printf("MODULE %p (dynamic %d)", item, mono_image_is_dynamic ((MonoImage*)item));
1581 item_info = g_strdup_printf("MODULE %p", item);
1582 } else if (table == profiler->loaded_appdomains) {
1583 item_info = g_strdup_printf("APPDOMAIN %p (id %d)", item, mono_domain_get_id ((MonoDomain*)item));
1584 } else {
1585 item_info = NULL;
1586 g_assert_not_reached ();
1589 if (element != NULL) {
1590 item_name = element->name;
1591 } else {
1592 item_name = "<NULL>";
1595 printf ("%s EVENT for %s (%s [id %d])\n", event_name, item_info, item_name, element->id);
1596 g_free (item_info);
1598 #endif
1600 static void
1601 profiler_heap_shot_object_buffers_destroy (ProfilerHeapShotObjectBuffer *buffer) {
1602 while (buffer != NULL) {
1603 ProfilerHeapShotObjectBuffer *next = buffer->next;
1604 #if DEBUG_HEAP_PROFILER
1605 printf ("profiler_heap_shot_object_buffers_destroy: destroyed buffer %p (%p-%p)\n", buffer, & (buffer->buffer [0]), buffer->end);
1606 #endif
1607 g_free (buffer);
1608 buffer = next;
1612 static ProfilerHeapShotObjectBuffer*
1613 profiler_heap_shot_object_buffer_new (ProfilerPerThreadData *data) {
1614 ProfilerHeapShotObjectBuffer *buffer;
1615 ProfilerHeapShotObjectBuffer *result = g_new (ProfilerHeapShotObjectBuffer, 1);
1616 result->next_free_slot = & (result->buffer [0]);
1617 result->end = & (result->buffer [PROFILER_HEAP_SHOT_OBJECT_BUFFER_SIZE]);
1618 result->first_unprocessed_slot = & (result->buffer [0]);
1619 result->next = data->heap_shot_object_buffers;
1620 data->heap_shot_object_buffers = result;
1621 #if DEBUG_HEAP_PROFILER
1622 printf ("profiler_heap_shot_object_buffer_new: created buffer %p (%p-%p)\n", result, result->next_free_slot, result->end);
1623 #endif
1624 for (buffer = result; buffer != NULL; buffer = buffer->next) {
1625 ProfilerHeapShotObjectBuffer *last = buffer->next;
1626 if ((last != NULL) && (last->first_unprocessed_slot == last->end)) {
1627 buffer->next = NULL;
1628 profiler_heap_shot_object_buffers_destroy (last);
1632 return result;
1635 static ProfilerHeapShotWriteJob*
1636 profiler_heap_shot_write_job_new (gboolean heap_shot_was_requested, gboolean dump_heap_data, guint32 collection) {
1637 ProfilerHeapShotWriteJob *job = g_new (ProfilerHeapShotWriteJob, 1);
1638 job->next = NULL;
1639 job->next_unwritten = NULL;
1641 if (profiler->action_flags.unreachable_objects || dump_heap_data) {
1642 job->buffers = g_new (ProfilerHeapShotWriteBuffer, 1);
1643 job->buffers->next = NULL;
1644 job->last_next = & (job->buffers->next);
1645 job->start = & (job->buffers->buffer [0]);
1646 job->cursor = job->start;
1647 job->end = & (job->buffers->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
1648 } else {
1649 job->buffers = NULL;
1650 job->last_next = NULL;
1651 job->start = NULL;
1652 job->cursor = NULL;
1653 job->end = NULL;
1655 job->full_buffers = 0;
1657 if (profiler->action_flags.collection_summary) {
1658 job->summary.capacity = profiler->classes->next_id;
1659 job->summary.per_class_data = g_new0 (ProfilerHeapShotClassSummary, job->summary.capacity);
1660 } else {
1661 job->summary.capacity = 0;
1662 job->summary.per_class_data = NULL;
1665 job->heap_shot_was_requested = heap_shot_was_requested;
1666 job->collection = collection;
1667 job->dump_heap_data = dump_heap_data;
1668 #if DEBUG_HEAP_PROFILER
1669 printf ("profiler_heap_shot_write_job_new: created job %p with buffer %p(%p-%p) (collection %d, dump %d)\n", job, job->buffers, job->start, job->end, collection, dump_heap_data);
1670 #endif
1671 return job;
1674 static gboolean
1675 profiler_heap_shot_write_job_has_data (ProfilerHeapShotWriteJob *job) {
1676 return ((job->buffers != NULL) || (job->summary.capacity > 0));
1679 static void
1680 profiler_heap_shot_write_job_add_buffer (ProfilerHeapShotWriteJob *job, gpointer value) {
1681 ProfilerHeapShotWriteBuffer *buffer = g_new (ProfilerHeapShotWriteBuffer, 1);
1682 buffer->next = NULL;
1683 *(job->last_next) = buffer;
1684 job->last_next = & (buffer->next);
1685 job->full_buffers ++;
1686 buffer->buffer [0] = value;
1687 job->start = & (buffer->buffer [0]);
1688 job->cursor = & (buffer->buffer [1]);
1689 job->end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
1690 #if DEBUG_HEAP_PROFILER
1691 printf ("profiler_heap_shot_write_job_add_buffer: in job %p, added buffer %p(%p-%p) with value %p at address %p (cursor now %p)\n", job, buffer, job->start, job->end, value, &(buffer->buffer [0]), job->cursor);
1692 do {
1693 ProfilerHeapShotWriteBuffer *current_buffer;
1694 for (current_buffer = job->buffers; current_buffer != NULL; current_buffer = current_buffer->next) {
1695 printf ("profiler_heap_shot_write_job_add_buffer: now job %p has buffer %p\n", job, current_buffer);
1697 } while (0);
1698 #endif
1701 static void
1702 profiler_heap_shot_write_job_free_buffers (ProfilerHeapShotWriteJob *job) {
1703 ProfilerHeapShotWriteBuffer *buffer = job->buffers;
1705 while (buffer != NULL) {
1706 ProfilerHeapShotWriteBuffer *next = buffer->next;
1707 #if DEBUG_HEAP_PROFILER
1708 printf ("profiler_heap_shot_write_job_free_buffers: in job %p, freeing buffer %p\n", job, buffer);
1709 #endif
1710 g_free (buffer);
1711 buffer = next;
1714 job->buffers = NULL;
1716 if (job->summary.per_class_data != NULL) {
1717 g_free (job->summary.per_class_data);
1718 job->summary.per_class_data = NULL;
1720 job->summary.capacity = 0;
1723 static void
1724 profiler_heap_shot_write_block (ProfilerHeapShotWriteJob *job);
1726 static void
1727 profiler_process_heap_shot_write_jobs (void) {
1728 gboolean done = FALSE;
1730 while (!done) {
1731 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1732 ProfilerHeapShotWriteJob *previous_job = NULL;
1733 ProfilerHeapShotWriteJob *next_job;
1735 done = TRUE;
1736 while (current_job != NULL) {
1737 next_job = current_job->next_unwritten;
1739 if (next_job != NULL) {
1740 if (profiler_heap_shot_write_job_has_data (current_job)) {
1741 done = FALSE;
1743 if (! profiler_heap_shot_write_job_has_data (next_job)) {
1744 current_job->next_unwritten = NULL;
1745 next_job = NULL;
1747 } else {
1748 if (profiler_heap_shot_write_job_has_data (current_job)) {
1749 LOG_WRITER_THREAD ("profiler_process_heap_shot_write_jobs: writing...");
1750 profiler_heap_shot_write_block (current_job);
1751 LOG_WRITER_THREAD ("profiler_process_heap_shot_write_jobs: done");
1752 if (previous_job != NULL) {
1753 previous_job->next_unwritten = NULL;
1758 previous_job = current_job;
1759 current_job = next_job;
1764 static void
1765 profiler_free_heap_shot_write_jobs (void) {
1766 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1767 ProfilerHeapShotWriteJob *next_job;
1769 if (current_job != NULL) {
1770 while (current_job->next_unwritten != NULL) {
1771 #if DEBUG_HEAP_PROFILER
1772 printf ("profiler_free_heap_shot_write_jobs: job %p must not be freed\n", current_job);
1773 #endif
1774 current_job = current_job->next_unwritten;
1777 next_job = current_job->next;
1778 current_job->next = NULL;
1779 current_job = next_job;
1781 while (current_job != NULL) {
1782 #if DEBUG_HEAP_PROFILER
1783 printf ("profiler_free_heap_shot_write_jobs: job %p will be freed\n", current_job);
1784 #endif
1785 next_job = current_job->next;
1786 profiler_heap_shot_write_job_free_buffers (current_job);
1787 g_free (current_job);
1788 current_job = next_job;
1793 static void
1794 profiler_destroy_heap_shot_write_jobs (void) {
1795 ProfilerHeapShotWriteJob *current_job = profiler->heap_shot_write_jobs;
1796 ProfilerHeapShotWriteJob *next_job;
1798 while (current_job != NULL) {
1799 next_job = current_job->next;
1800 profiler_heap_shot_write_job_free_buffers (current_job);
1801 g_free (current_job);
1802 current_job = next_job;
1806 static void
1807 profiler_add_heap_shot_write_job (ProfilerHeapShotWriteJob *job) {
1808 job->next = profiler->heap_shot_write_jobs;
1809 job->next_unwritten = job->next;
1810 profiler->heap_shot_write_jobs = job;
1811 #if DEBUG_HEAP_PROFILER
1812 printf ("profiler_add_heap_shot_write_job: added job %p\n", job);
1813 #endif
1816 #if DEBUG_HEAP_PROFILER
1817 #define STORE_ALLOCATED_OBJECT_MESSAGE1(d,o) printf ("STORE_ALLOCATED_OBJECT[TID %ld]: storing object %p at address %p\n", (d)->thread_id, (o), (d)->heap_shot_object_buffers->next_free_slot)
1818 #define STORE_ALLOCATED_OBJECT_MESSAGE2(d,o) printf ("STORE_ALLOCATED_OBJECT[TID %ld]: storing object %p at address %p in new buffer %p\n", (d)->thread_id, (o), buffer->next_free_slot, buffer)
1819 #else
1820 #define STORE_ALLOCATED_OBJECT_MESSAGE1(d,o)
1821 #define STORE_ALLOCATED_OBJECT_MESSAGE2(d,o)
1822 #endif
1823 #define STORE_ALLOCATED_OBJECT(d,o) do {\
1824 if ((d)->heap_shot_object_buffers->next_free_slot < (d)->heap_shot_object_buffers->end) {\
1825 STORE_ALLOCATED_OBJECT_MESSAGE1 ((d), (o));\
1826 *((d)->heap_shot_object_buffers->next_free_slot) = (o);\
1827 (d)->heap_shot_object_buffers->next_free_slot ++;\
1828 } else {\
1829 ProfilerHeapShotObjectBuffer *buffer = profiler_heap_shot_object_buffer_new (d);\
1830 STORE_ALLOCATED_OBJECT_MESSAGE2 ((d), (o));\
1831 *((buffer)->next_free_slot) = (o);\
1832 (buffer)->next_free_slot ++;\
1834 } while (0)
1836 static ProfilerPerThreadData*
1837 profiler_per_thread_data_new (guint32 buffer_size)
1839 ProfilerPerThreadData *data = g_new (ProfilerPerThreadData, 1);
1841 data->events = g_new0 (ProfilerEventData, buffer_size);
1842 data->next_free_event = data->events;
1843 data->next_unreserved_event = data->events;
1844 data->end_event = data->events + (buffer_size - 1);
1845 data->first_unwritten_event = data->events;
1846 data->first_unmapped_event = data->events;
1847 MONO_PROFILER_GET_CURRENT_COUNTER (data->start_event_counter);
1848 data->last_event_counter = data->start_event_counter;
1849 data->thread_id = CURRENT_THREAD_ID ();
1850 data->heap_shot_object_buffers = NULL;
1851 if ((profiler->action_flags.unreachable_objects == TRUE) ||
1852 (profiler->action_flags.heap_shot == TRUE) ||
1853 (profiler->action_flags.collection_summary == TRUE)) {
1854 profiler_heap_shot_object_buffer_new (data);
1856 if (profiler->action_flags.track_stack) {
1857 thread_stack_initialize (&(data->stack), 64);
1858 } else {
1859 thread_stack_initialize_empty (&(data->stack));
1861 return data;
1864 static void
1865 profiler_per_thread_data_destroy (ProfilerPerThreadData *data) {
1866 g_free (data->events);
1867 profiler_heap_shot_object_buffers_destroy (data->heap_shot_object_buffers);
1868 thread_stack_free (&(data->stack));
1869 g_free (data);
1872 static ProfilerStatisticalData*
1873 profiler_statistical_data_new (MonoProfiler *profiler) {
1874 int buffer_size = profiler->statistical_buffer_size * (profiler->statistical_call_chain_depth + 1);
1875 ProfilerStatisticalData *data = g_new (ProfilerStatisticalData, 1);
1877 data->hits = g_new0 (ProfilerStatisticalHit, buffer_size);
1878 data->next_free_index = 0;
1879 data->end_index = profiler->statistical_buffer_size;
1880 data->first_unwritten_index = 0;
1882 return data;
1885 static void
1886 profiler_statistical_data_destroy (ProfilerStatisticalData *data) {
1887 g_free (data->hits);
1888 g_free (data);
1891 static ProfilerCodeBufferArray*
1892 profiler_code_buffer_array_new (ProfilerCodeBufferArray *child) {
1893 ProfilerCodeBufferArray *result = g_new0 (ProfilerCodeBufferArray, 1);
1894 if (child == NULL) {
1895 result->level = 0;
1896 } else {
1897 result->level = child->level + 1;
1898 result->number_of_buffers = 1;
1899 result->buffers [0].info.data.sub_buffers = child;
1900 result->buffers [0].start = child->buffers [0].start;
1901 result->buffers [0].end = child->buffers [child->number_of_buffers - 1].end;
1903 return result;
1906 static void
1907 profiler_code_buffer_array_destroy (ProfilerCodeBufferArray *buffers) {
1908 if (buffers->level > 0) {
1909 int i;
1910 for (i = 0; i < buffers->number_of_buffers; i++) {
1911 ProfilerCodeBufferArray *sub_buffers = buffers->buffers [i].info.data.sub_buffers;
1912 profiler_code_buffer_array_destroy (sub_buffers);
1915 g_free (buffers);
1918 static gboolean
1919 profiler_code_buffer_array_is_full (ProfilerCodeBufferArray *buffers) {
1920 while (buffers->level > 0) {
1921 ProfilerCodeBufferArray *next;
1922 if (buffers->number_of_buffers < PROFILER_CODE_BUFFER_ARRAY_SIZE) {
1923 return FALSE;
1925 next = buffers->buffers [PROFILER_CODE_BUFFER_ARRAY_SIZE - 1].info.data.sub_buffers;
1926 if (next->level < (buffers->level - 1)) {
1927 return FALSE;
1929 buffers = next;
1931 return (buffers->number_of_buffers == PROFILER_CODE_BUFFER_ARRAY_SIZE);
1934 static ProfilerCodeBufferArray*
1935 profiler_code_buffer_add (ProfilerCodeBufferArray *buffers, gpointer *buffer, int size, MonoProfilerCodeBufferType type, void *data) {
1936 if (buffers == NULL) {
1937 buffers = profiler_code_buffer_array_new (NULL);
1940 if (profiler_code_buffer_array_is_full (buffers)) {
1941 ProfilerCodeBufferArray *new_slot = profiler_code_buffer_add (NULL, buffer, size, type, data);
1942 buffers = profiler_code_buffer_array_new (buffers);
1943 buffers->buffers [buffers->number_of_buffers].info.data.sub_buffers = new_slot;
1944 buffers->buffers [buffers->number_of_buffers].start = new_slot->buffers [0].start;
1945 buffers->buffers [buffers->number_of_buffers].end = new_slot->buffers [new_slot->number_of_buffers - 1].end;
1946 buffers->number_of_buffers ++;
1947 } else if (buffers->level > 0) {
1948 ProfilerCodeBufferArray *new_slot = profiler_code_buffer_add (buffers->buffers [buffers->number_of_buffers - 1].info.data.sub_buffers, buffer, size, type, data);
1949 buffers->buffers [buffers->number_of_buffers - 1].info.data.sub_buffers = new_slot;
1950 buffers->buffers [buffers->number_of_buffers - 1].start = new_slot->buffers [0].start;
1951 buffers->buffers [buffers->number_of_buffers - 1].end = new_slot->buffers [new_slot->number_of_buffers - 1].end;
1952 } else {
1953 buffers->buffers [buffers->number_of_buffers].start = buffer;
1954 buffers->buffers [buffers->number_of_buffers].end = (((guint8*) buffer) + size);
1955 buffers->buffers [buffers->number_of_buffers].info.type = type;
1956 switch (type) {
1957 case MONO_PROFILER_CODE_BUFFER_UNKNOWN:
1958 buffers->buffers [buffers->number_of_buffers].info.data.data = NULL;
1959 break;
1960 case MONO_PROFILER_CODE_BUFFER_METHOD:
1961 buffers->buffers [buffers->number_of_buffers].info.data.method = data;
1962 break;
1963 default:
1964 buffers->buffers [buffers->number_of_buffers].info.type = MONO_PROFILER_CODE_BUFFER_UNKNOWN;
1965 buffers->buffers [buffers->number_of_buffers].info.data.data = NULL;
1967 buffers->number_of_buffers ++;
1969 return buffers;
1972 static ProfilerCodeBuffer*
1973 profiler_code_buffer_find (ProfilerCodeBufferArray *buffers, gpointer *address) {
1974 if (buffers != NULL) {
1975 ProfilerCodeBuffer *result = NULL;
1976 do {
1977 int low = 0;
1978 int high = buffers->number_of_buffers - 1;
1980 while (high != low) {
1981 int middle = low + ((high - low) >> 1);
1983 if ((guint8*) address < (guint8*) buffers->buffers [low].start) {
1984 return NULL;
1986 if ((guint8*) address >= (guint8*) buffers->buffers [high].end) {
1987 return NULL;
1990 if ((guint8*) address < (guint8*) buffers->buffers [middle].start) {
1991 high = middle - 1;
1992 if (high < low) {
1993 high = low;
1995 } else if ((guint8*) address >= (guint8*) buffers->buffers [middle].end) {
1996 low = middle + 1;
1997 if (low > high) {
1998 low = high;
2000 } else {
2001 high = middle;
2002 low = middle;
2006 if (((guint8*) address >= (guint8*) buffers->buffers [low].start) && ((guint8*) address < (guint8*) buffers->buffers [low].end)) {
2007 if (buffers->level == 0) {
2008 result = & (buffers->buffers [low]);
2009 } else {
2010 buffers = buffers->buffers [low].info.data.sub_buffers;
2012 } else {
2013 return NULL;
2015 } while (result == NULL);
2016 return result;
2017 } else {
2018 return NULL;
2022 static void
2023 profiler_code_chunk_initialize (ProfilerCodeChunk *chunk, gpointer memory, gsize size) {
2024 chunk->buffers = profiler_code_buffer_array_new (NULL);
2025 chunk->destroyed = FALSE;
2026 chunk->start = memory;
2027 chunk->end = ((guint8*)memory) + size;
2030 static void
2031 profiler_code_chunk_cleanup (ProfilerCodeChunk *chunk) {
2032 if (chunk->buffers != NULL) {
2033 profiler_code_buffer_array_destroy (chunk->buffers);
2034 chunk->buffers = NULL;
2036 chunk->start = NULL;
2037 chunk->end = NULL;
2040 static void
2041 profiler_code_chunks_initialize (ProfilerCodeChunks *chunks) {
2042 chunks->capacity = 32;
2043 chunks->chunks = g_new0 (ProfilerCodeChunk, 32);
2044 chunks->number_of_chunks = 0;
2047 static void
2048 profiler_code_chunks_cleanup (ProfilerCodeChunks *chunks) {
2049 int i;
2050 for (i = 0; i < chunks->number_of_chunks; i++) {
2051 profiler_code_chunk_cleanup (& (chunks->chunks [i]));
2053 chunks->capacity = 0;
2054 chunks->number_of_chunks = 0;
2055 g_free (chunks->chunks);
2056 chunks->chunks = NULL;
2059 static int
2060 compare_code_chunks (const void* c1, const void* c2) {
2061 ProfilerCodeChunk *chunk1 = (ProfilerCodeChunk*) c1;
2062 ProfilerCodeChunk *chunk2 = (ProfilerCodeChunk*) c2;
2063 return ((guint8*) chunk1->end < (guint8*) chunk2->start) ? -1 : (((guint8*) chunk1->start >= (guint8*) chunk2->end) ? 1 : 0);
2066 static int
2067 compare_address_and_code_chunk (const void* a, const void* c) {
2068 gpointer address = (gpointer) a;
2069 ProfilerCodeChunk *chunk = (ProfilerCodeChunk*) c;
2070 return ((guint8*) address < (guint8*) chunk->start) ? -1 : (((guint8*) address >= (guint8*) chunk->end) ? 1 : 0);
2073 static void
2074 profiler_code_chunks_sort (ProfilerCodeChunks *chunks) {
2075 qsort (chunks->chunks, chunks->number_of_chunks, sizeof (ProfilerCodeChunk), compare_code_chunks);
2078 static ProfilerCodeChunk*
2079 profiler_code_chunk_find (ProfilerCodeChunks *chunks, gpointer address) {
2080 return bsearch (address, chunks->chunks, chunks->number_of_chunks, sizeof (ProfilerCodeChunk), compare_address_and_code_chunk);
2083 static ProfilerCodeChunk*
2084 profiler_code_chunk_new (ProfilerCodeChunks *chunks, gpointer memory, gsize size) {
2085 ProfilerCodeChunk *result;
2087 if (chunks->number_of_chunks == chunks->capacity) {
2088 ProfilerCodeChunk *new_chunks = g_new0 (ProfilerCodeChunk, chunks->capacity * 2);
2089 memcpy (new_chunks, chunks->chunks, chunks->capacity * sizeof (ProfilerCodeChunk));
2090 chunks->capacity *= 2;
2091 g_free (chunks->chunks);
2092 chunks->chunks = new_chunks;
2095 result = & (chunks->chunks [chunks->number_of_chunks]);
2096 chunks->number_of_chunks ++;
2097 profiler_code_chunk_initialize (result, memory, size);
2098 profiler_code_chunks_sort (chunks);
2099 return result;
2102 static int
2103 profiler_code_chunk_to_index (ProfilerCodeChunks *chunks, ProfilerCodeChunk *chunk) {
2104 return (int) (chunk - chunks->chunks);
2107 static void
2108 profiler_code_chunk_remove (ProfilerCodeChunks *chunks, ProfilerCodeChunk *chunk) {
2109 int index = profiler_code_chunk_to_index (chunks, chunk);
2111 profiler_code_chunk_cleanup (chunk);
2112 if ((index >= 0) && (index < chunks->number_of_chunks)) {
2113 memmove (chunk, chunk + 1, (chunks->number_of_chunks - index) * sizeof (ProfilerCodeChunk));
2117 /* This assumes the profiler lock is held */
2118 static ProfilerCodeBuffer*
2119 profiler_code_buffer_from_address (MonoProfiler *prof, gpointer address) {
2120 ProfilerCodeChunks *chunks = & (prof->code_chunks);
2122 ProfilerCodeChunk *chunk = profiler_code_chunk_find (chunks, address);
2123 if (chunk != NULL) {
2124 return profiler_code_buffer_find (chunk->buffers, address);
2125 } else {
2126 return NULL;
2130 static void
2131 profiler_code_chunk_new_callback (MonoProfiler *prof, gpointer address, int size) {
2132 ProfilerCodeChunks *chunks = & (prof->code_chunks);
2134 if (prof->code_chunks.chunks != NULL) {
2135 LOCK_PROFILER ();
2136 profiler_code_chunk_new (chunks, address, size);
2137 UNLOCK_PROFILER ();
2141 static void
2142 profiler_code_chunk_destroy_callback (MonoProfiler *prof, gpointer address) {
2143 ProfilerCodeChunks *chunks = & (prof->code_chunks);
2144 ProfilerCodeChunk *chunk;
2146 if (prof->code_chunks.chunks != NULL) {
2147 LOCK_PROFILER ();
2148 chunk = profiler_code_chunk_find (chunks, address);
2149 if (chunk != NULL) {
2150 profiler_code_chunk_remove (chunks, chunk);
2152 UNLOCK_PROFILER ();
2156 static void
2157 profiler_code_buffer_new_callback (MonoProfiler *prof, gpointer address, int size, MonoProfilerCodeBufferType type, void *data) {
2158 ProfilerCodeChunks *chunks = & (prof->code_chunks);
2159 ProfilerCodeChunk *chunk;
2161 if (prof->code_chunks.chunks != NULL) {
2162 LOCK_PROFILER ();
2163 chunk = profiler_code_chunk_find (chunks, address);
2164 if (chunk != NULL) {
2165 chunk->buffers = profiler_code_buffer_add (chunk->buffers, address, size, type, data);
2167 UNLOCK_PROFILER ();
2171 static void
2172 profiler_add_write_buffer (void) {
2173 if (profiler->current_write_buffer->next == NULL) {
2174 profiler->current_write_buffer->next = g_malloc (sizeof (ProfilerFileWriteBuffer) + PROFILER_FILE_WRITE_BUFFER_SIZE);
2175 profiler->current_write_buffer->next->next = NULL;
2177 //printf ("Added next buffer %p, to buffer %p\n", profiler->current_write_buffer->next, profiler->current_write_buffer);
2180 profiler->current_write_buffer = profiler->current_write_buffer->next;
2181 profiler->current_write_position = 0;
2182 profiler->full_write_buffers ++;
2185 static void
2186 profiler_free_write_buffers (void) {
2187 ProfilerFileWriteBuffer *current_buffer = profiler->write_buffers;
2188 while (current_buffer != NULL) {
2189 ProfilerFileWriteBuffer *next_buffer = current_buffer->next;
2191 //printf ("Freeing write buffer %p, next is %p\n", current_buffer, next_buffer);
2193 g_free (current_buffer);
2194 current_buffer = next_buffer;
2198 #define WRITE_BYTE(b) do {\
2199 if (profiler->current_write_position >= PROFILER_FILE_WRITE_BUFFER_SIZE) {\
2200 profiler_add_write_buffer ();\
2202 profiler->current_write_buffer->buffer [profiler->current_write_position] = (b);\
2203 profiler->current_write_position ++;\
2204 } while (0)
2206 #if (DEBUG_FILE_WRITES)
2207 static int bytes_written = 0;
2208 #endif
2210 static void
2211 write_current_block (guint16 code) {
2212 guint32 size = (profiler->full_write_buffers * PROFILER_FILE_WRITE_BUFFER_SIZE) + profiler->current_write_position;
2213 ProfilerFileWriteBuffer *current_buffer = profiler->write_buffers;
2214 guint64 current_counter;
2215 guint32 counter_delta;
2216 guint8 header [10];
2218 MONO_PROFILER_GET_CURRENT_COUNTER (current_counter);
2219 if (profiler->last_header_counter != 0) {
2220 counter_delta = current_counter - profiler->last_header_counter;
2221 } else {
2222 counter_delta = 0;
2224 profiler->last_header_counter = current_counter;
2226 header [0] = code & 0xff;
2227 header [1] = (code >> 8) & 0xff;
2228 header [2] = size & 0xff;
2229 header [3] = (size >> 8) & 0xff;
2230 header [4] = (size >> 16) & 0xff;
2231 header [5] = (size >> 24) & 0xff;
2232 header [6] = counter_delta & 0xff;
2233 header [7] = (counter_delta >> 8) & 0xff;
2234 header [8] = (counter_delta >> 16) & 0xff;
2235 header [9] = (counter_delta >> 24) & 0xff;
2237 #if (DEBUG_FILE_WRITES)
2238 printf ("write_current_block: writing header (code %d) at offset %d\n", code, bytes_written);
2239 bytes_written += 10;
2240 #endif
2241 WRITE_BUFFER (& (header [0]), 10);
2243 while ((current_buffer != NULL) && (profiler->full_write_buffers > 0)) {
2244 #if (DEBUG_FILE_WRITES)
2245 printf ("write_current_block: writing buffer (size %d)\n", PROFILER_FILE_WRITE_BUFFER_SIZE);
2246 bytes_written += PROFILER_FILE_WRITE_BUFFER_SIZE;
2247 #endif
2248 WRITE_BUFFER (& (current_buffer->buffer [0]), PROFILER_FILE_WRITE_BUFFER_SIZE);
2249 profiler->full_write_buffers --;
2250 current_buffer = current_buffer->next;
2252 if (profiler->current_write_position > 0) {
2253 #if (DEBUG_FILE_WRITES)
2254 printf ("write_current_block: writing last buffer (size %d)\n", profiler->current_write_position);
2255 bytes_written += profiler->current_write_position;
2256 #endif
2257 WRITE_BUFFER (& (current_buffer->buffer [0]), profiler->current_write_position);
2259 FLUSH_FILE ();
2260 #if (DEBUG_FILE_WRITES)
2261 printf ("write_current_block: buffers flushed (file size %d)\n", bytes_written);
2262 #endif
2264 profiler->current_write_buffer = profiler->write_buffers;
2265 profiler->current_write_position = 0;
2266 profiler->full_write_buffers = 0;
2270 #define SEVEN_BITS_MASK (0x7f)
2271 #define EIGHT_BIT_MASK (0x80)
2273 static void
2274 write_uint32 (guint32 value) {
2275 while (value > SEVEN_BITS_MASK) {
2276 WRITE_BYTE (value & SEVEN_BITS_MASK);
2277 value >>= 7;
2279 WRITE_BYTE (value | EIGHT_BIT_MASK);
2281 static void
2282 write_uint64 (guint64 value) {
2283 while (value > SEVEN_BITS_MASK) {
2284 WRITE_BYTE (value & SEVEN_BITS_MASK);
2285 value >>= 7;
2287 WRITE_BYTE (value | EIGHT_BIT_MASK);
2289 static void
2290 write_string (const char *string) {
2291 while (*string != 0) {
2292 WRITE_BYTE (*string);
2293 string ++;
2295 WRITE_BYTE (0);
2298 static void write_clock_data (void);
2299 static void
2300 write_directives_block (gboolean start) {
2301 write_clock_data ();
2303 if (start) {
2304 if (profiler->action_flags.save_allocation_caller) {
2305 write_uint32 (MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_CALLER);
2307 if (profiler->action_flags.save_allocation_stack || profiler->action_flags.track_calls) {
2308 write_uint32 (MONO_PROFILER_DIRECTIVE_ALLOCATIONS_HAVE_STACK);
2310 if (profiler->action_flags.allocations_carry_id) {
2311 write_uint32 (MONO_PROFILER_DIRECTIVE_ALLOCATIONS_CARRY_ID);
2313 write_uint32 (MONO_PROFILER_DIRECTIVE_LOADED_ELEMENTS_CARRY_ID);
2314 write_uint32 (MONO_PROFILER_DIRECTIVE_CLASSES_CARRY_ASSEMBLY_ID);
2315 write_uint32 (MONO_PROFILER_DIRECTIVE_METHODS_CARRY_WRAPPER_FLAG);
2317 write_uint32 (MONO_PROFILER_DIRECTIVE_END);
2319 write_clock_data ();
2320 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_DIRECTIVES);
2323 #if DEBUG_HEAP_PROFILER
2324 #define WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE(v,c) printf ("WRITE_HEAP_SHOT_JOB_VALUE: writing value %p at cursor %p\n", (v), (c))
2325 #else
2326 #define WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE(v,c)
2327 #endif
2328 #define WRITE_HEAP_SHOT_JOB_VALUE(j,v) do {\
2329 if ((j)->cursor < (j)->end) {\
2330 WRITE_HEAP_SHOT_JOB_VALUE_MESSAGE ((v), ((j)->cursor));\
2331 *((j)->cursor) = (v);\
2332 (j)->cursor ++;\
2333 } else {\
2334 profiler_heap_shot_write_job_add_buffer (j, v);\
2336 } while (0)
2339 #undef GUINT_TO_POINTER
2340 #undef GPOINTER_TO_UINT
2341 #if (SIZEOF_VOID_P == 4)
2342 #define GUINT_TO_POINTER(u) ((void*)(guint32)(u))
2343 #define GPOINTER_TO_UINT(p) ((guint32)(void*)(p))
2344 #elif (SIZEOF_VOID_P == 8)
2345 #define GUINT_TO_POINTER(u) ((void*)(guint64)(u))
2346 #define GPOINTER_TO_UINT(p) ((guint64)(void*)(p))
2347 #else
2348 #error Bad size of void pointer
2349 #endif
2351 #define WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE(j,v,c) WRITE_HEAP_SHOT_JOB_VALUE (j, GUINT_TO_POINTER (GPOINTER_TO_UINT (v)|(c)))
2353 #if DEBUG_HEAP_PROFILER
2354 #define UPDATE_JOB_BUFFER_CURSOR_MESSAGE() printf ("profiler_heap_shot_write_block[UPDATE_JOB_BUFFER_CURSOR]: in job %p, moving to buffer %p and cursor %p\n", job, buffer, cursor)
2355 #else
2356 #define UPDATE_JOB_BUFFER_CURSOR_MESSAGE()
2357 #endif
2358 #define UPDATE_JOB_BUFFER_CURSOR() do {\
2359 cursor++;\
2360 if (cursor >= end) {\
2361 buffer = buffer->next;\
2362 if (buffer != NULL) {\
2363 cursor = & (buffer->buffer [0]);\
2364 if (buffer->next != NULL) {\
2365 end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);\
2366 } else {\
2367 end = job->cursor;\
2369 } else {\
2370 cursor = NULL;\
2373 UPDATE_JOB_BUFFER_CURSOR_MESSAGE ();\
2374 } while (0)
2376 static void
2377 profiler_heap_shot_write_data_block (ProfilerHeapShotWriteJob *job) {
2378 ProfilerHeapShotWriteBuffer *buffer;
2379 gpointer* cursor;
2380 gpointer* end;
2381 guint64 start_counter;
2382 guint64 start_time;
2383 guint64 end_counter;
2384 guint64 end_time;
2386 write_uint64 (job->start_counter);
2387 write_uint64 (job->start_time);
2388 write_uint64 (job->end_counter);
2389 write_uint64 (job->end_time);
2390 write_uint32 (job->collection);
2391 MONO_PROFILER_GET_CURRENT_COUNTER (start_counter);
2392 MONO_PROFILER_GET_CURRENT_TIME (start_time);
2393 write_uint64 (start_counter);
2394 write_uint64 (start_time);
2395 #if DEBUG_HEAP_PROFILER
2396 printf ("profiler_heap_shot_write_data_block: start writing job %p (start %p, end %p)...\n", job, & (job->buffers->buffer [0]), job->cursor);
2397 #endif
2398 buffer = job->buffers;
2399 cursor = & (buffer->buffer [0]);
2400 if (buffer->next != NULL) {
2401 end = & (buffer->buffer [PROFILER_HEAP_SHOT_WRITE_BUFFER_SIZE]);
2402 } else {
2403 end = job->cursor;
2405 if (cursor >= end) {
2406 cursor = NULL;
2408 #if DEBUG_HEAP_PROFILER
2409 printf ("profiler_heap_shot_write_data_block: in job %p, starting at buffer %p and cursor %p\n", job, buffer, cursor);
2410 #endif
2411 while (cursor != NULL) {
2412 gpointer value = *cursor;
2413 HeapProfilerJobValueCode code = GPOINTER_TO_UINT (value) & HEAP_CODE_MASK;
2414 #if DEBUG_HEAP_PROFILER
2415 printf ("profiler_heap_shot_write_data_block: got value %p and code %d\n", value, code);
2416 #endif
2418 UPDATE_JOB_BUFFER_CURSOR ();
2419 if (code == HEAP_CODE_FREE_OBJECT_CLASS) {
2420 MonoClass *klass = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) & (~ (guint64) HEAP_CODE_MASK));
2421 //MonoClass *klass = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) % 4);
2422 ClassIdMappingElement *class_id;
2423 guint32 size;
2425 class_id = class_id_mapping_element_get (klass);
2426 if (class_id == NULL) {
2427 printf ("profiler_heap_shot_write_data_block: unknown class %p", klass);
2429 g_assert (class_id != NULL);
2430 write_uint32 ((class_id->id << 2) | HEAP_CODE_FREE_OBJECT_CLASS);
2432 size = GPOINTER_TO_UINT (*cursor);
2433 UPDATE_JOB_BUFFER_CURSOR ();
2434 write_uint32 (size);
2435 #if DEBUG_HEAP_PROFILER
2436 printf ("profiler_heap_shot_write_data_block: wrote unreachable object of class %p (id %d, size %d)\n", klass, class_id->id, size);
2437 #endif
2438 } else if (code == HEAP_CODE_OBJECT) {
2439 MonoObject *object = GUINT_TO_POINTER (GPOINTER_TO_UINT (value) & (~ (guint64) HEAP_CODE_MASK));
2440 MonoClass *klass = mono_object_get_class (object);
2441 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
2442 guint32 size = mono_object_get_size (object);
2443 guint32 references = GPOINTER_TO_UINT (*cursor);
2444 UPDATE_JOB_BUFFER_CURSOR ();
2446 if (class_id == NULL) {
2447 printf ("profiler_heap_shot_write_data_block: unknown class %p", klass);
2449 g_assert (class_id != NULL);
2451 write_uint64 (GPOINTER_TO_UINT (value));
2452 write_uint32 (class_id->id);
2453 write_uint32 (size);
2454 write_uint32 (references);
2455 #if DEBUG_HEAP_PROFILER
2456 printf ("profiler_heap_shot_write_data_block: writing object %p (references %d)\n", value, references);
2457 #endif
2459 while (references > 0) {
2460 gpointer reference = *cursor;
2461 write_uint64 (GPOINTER_TO_UINT (reference));
2462 UPDATE_JOB_BUFFER_CURSOR ();
2463 references --;
2464 #if DEBUG_HEAP_PROFILER
2465 printf ("profiler_heap_shot_write_data_block: inside object %p, wrote reference %p)\n", value, reference);
2466 #endif
2468 } else {
2469 #if DEBUG_HEAP_PROFILER
2470 printf ("profiler_heap_shot_write_data_block: unknown code %d in value %p\n", code, value);
2471 #endif
2472 g_assert_not_reached ();
2475 write_uint32 (0);
2477 MONO_PROFILER_GET_CURRENT_COUNTER (end_counter);
2478 MONO_PROFILER_GET_CURRENT_TIME (end_time);
2479 write_uint64 (end_counter);
2480 write_uint64 (end_time);
2482 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_HEAP_DATA);
2483 #if DEBUG_HEAP_PROFILER
2484 printf ("profiler_heap_shot_write_data_block: writing job %p done.\n", job);
2485 #endif
2487 static void
2488 profiler_heap_shot_write_summary_block (ProfilerHeapShotWriteJob *job) {
2489 guint64 start_counter;
2490 guint64 start_time;
2491 guint64 end_counter;
2492 guint64 end_time;
2493 int id;
2495 #if DEBUG_HEAP_PROFILER
2496 printf ("profiler_heap_shot_write_summary_block: start writing job %p...\n", job);
2497 #endif
2498 MONO_PROFILER_GET_CURRENT_COUNTER (start_counter);
2499 MONO_PROFILER_GET_CURRENT_TIME (start_time);
2500 write_uint64 (start_counter);
2501 write_uint64 (start_time);
2503 write_uint32 (job->collection);
2505 for (id = 0; id < job->summary.capacity; id ++) {
2506 if ((job->summary.per_class_data [id].reachable.instances > 0) || (job->summary.per_class_data [id].unreachable.instances > 0)) {
2507 write_uint32 (id);
2508 write_uint32 (job->summary.per_class_data [id].reachable.instances);
2509 write_uint32 (job->summary.per_class_data [id].reachable.bytes);
2510 write_uint32 (job->summary.per_class_data [id].unreachable.instances);
2511 write_uint32 (job->summary.per_class_data [id].unreachable.bytes);
2514 write_uint32 (0);
2516 MONO_PROFILER_GET_CURRENT_COUNTER (end_counter);
2517 MONO_PROFILER_GET_CURRENT_TIME (end_time);
2518 write_uint64 (end_counter);
2519 write_uint64 (end_time);
2521 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_HEAP_SUMMARY);
2522 #if DEBUG_HEAP_PROFILER
2523 printf ("profiler_heap_shot_write_summary_block: writing job %p done.\n", job);
2524 #endif
2527 static void
2528 profiler_heap_shot_write_block (ProfilerHeapShotWriteJob *job) {
2529 #if DEBUG_HEAP_PROFILER
2530 printf ("profiler_heap_shot_write_block: working on job %p...\n", job);
2531 #endif
2533 if (profiler->action_flags.collection_summary == TRUE) {
2534 profiler_heap_shot_write_summary_block (job);
2537 if ((profiler->action_flags.unreachable_objects == TRUE) || (profiler->action_flags.heap_shot == TRUE)) {
2538 profiler_heap_shot_write_data_block (job);
2541 profiler_heap_shot_write_job_free_buffers (job);
2542 #if DEBUG_HEAP_PROFILER
2543 printf ("profiler_heap_shot_write_block: work on job %p done.\n", job);
2544 #endif
2547 static void
2548 write_element_load_block (LoadedElement *element, guint8 kind, gsize thread_id, gpointer item) {
2549 WRITE_BYTE (kind);
2550 write_uint64 (element->load_start_counter);
2551 write_uint64 (element->load_end_counter);
2552 write_uint64 (thread_id);
2553 write_uint32 (element->id);
2554 write_string (element->name);
2555 if (kind & MONO_PROFILER_LOADED_EVENT_ASSEMBLY) {
2556 MonoImage *image = mono_assembly_get_image ((MonoAssembly*) item);
2557 MonoAssemblyName aname;
2558 if (mono_assembly_fill_assembly_name (image, &aname)) {
2559 write_string (aname.name);
2560 write_uint32 (aname.major);
2561 write_uint32 (aname.minor);
2562 write_uint32 (aname.build);
2563 write_uint32 (aname.revision);
2564 write_string (aname.culture && *aname.culture? aname.culture: "neutral");
2565 write_string (aname.public_key_token [0] ? (char *)aname.public_key_token : "null");
2566 /* Retargetable flag */
2567 write_uint32 ((aname.flags & 0x00000100) ? 1 : 0);
2568 } else {
2569 write_string ("UNKNOWN");
2570 write_uint32 (0);
2571 write_uint32 (0);
2572 write_uint32 (0);
2573 write_uint32 (0);
2574 write_string ("neutral");
2575 write_string ("null");
2576 write_uint32 (0);
2579 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_LOADED);
2580 element->load_written = TRUE;
2583 static void
2584 write_element_unload_block (LoadedElement *element, guint8 kind, gsize thread_id) {
2585 WRITE_BYTE (kind);
2586 write_uint64 (element->unload_start_counter);
2587 write_uint64 (element->unload_end_counter);
2588 write_uint64 (thread_id);
2589 write_uint32 (element->id);
2590 write_string (element->name);
2591 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_UNLOADED);
2592 element->unload_written = TRUE;
2595 static void
2596 write_clock_data (void) {
2597 guint64 counter;
2598 guint64 time;
2600 MONO_PROFILER_GET_CURRENT_COUNTER (counter);
2601 MONO_PROFILER_GET_CURRENT_TIME (time);
2603 write_uint64 (counter);
2604 write_uint64 (time);
2607 static void
2608 write_mapping_block (gsize thread_id) {
2609 ClassIdMappingElement *current_class;
2610 MethodIdMappingElement *current_method;
2612 if ((profiler->classes->unwritten == NULL) && (profiler->methods->unwritten == NULL))
2613 return;
2615 #if (DEBUG_MAPPING_EVENTS || DEBUG_FILE_WRITES)
2616 printf ("[write_mapping_block][TID %ld] START\n", thread_id);
2617 #endif
2619 write_clock_data ();
2620 write_uint64 (thread_id);
2622 for (current_class = profiler->classes->unwritten; current_class != NULL; current_class = current_class->next_unwritten) {
2623 MonoImage *image = mono_class_get_image (current_class->klass);
2624 MonoAssembly *assembly = mono_image_get_assembly (image);
2625 guint32 assembly_id = loaded_element_get_id (profiler->loaded_assemblies, assembly);
2626 write_uint32 (current_class->id);
2627 write_uint32 (assembly_id);
2628 write_string (current_class->name);
2629 #if (DEBUG_MAPPING_EVENTS)
2630 printf ("mapping CLASS (%d => %s)\n", current_class->id, current_class->name);
2631 #endif
2632 g_free (current_class->name);
2633 current_class->name = NULL;
2635 write_uint32 (0);
2636 profiler->classes->unwritten = NULL;
2638 for (current_method = profiler->methods->unwritten; current_method != NULL; current_method = current_method->next_unwritten) {
2639 MonoMethod *method = current_method->method;
2640 MonoClass *klass = mono_method_get_class (method);
2641 ClassIdMappingElement *class_element = class_id_mapping_element_get (klass);
2642 g_assert (class_element != NULL);
2643 write_uint32 (current_method->id);
2644 write_uint32 (class_element->id);
2645 if (method->wrapper_type != 0) {
2646 write_uint32 (1);
2647 } else {
2648 write_uint32 (0);
2650 write_string (current_method->name);
2651 #if (DEBUG_MAPPING_EVENTS)
2652 printf ("mapping METHOD ([%d]%d => %s)\n", class_element?class_element->id:1, current_method->id, current_method->name);
2653 #endif
2654 g_free (current_method->name);
2655 current_method->name = NULL;
2657 write_uint32 (0);
2658 profiler->methods->unwritten = NULL;
2660 write_clock_data ();
2661 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_MAPPING);
2663 #if (DEBUG_MAPPING_EVENTS || DEBUG_FILE_WRITES)
2664 printf ("[write_mapping_block][TID %ld] END\n", thread_id);
2665 #endif
2668 typedef enum {
2669 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_ENTER = 1,
2670 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_IMPLICIT = 2,
2671 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_EXPLICIT = 3,
2672 MONO_PROFILER_PACKED_EVENT_CODE_CLASS_ALLOCATION = 4,
2673 MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EVENT = 5,
2674 MONO_PROFILER_PACKED_EVENT_CODE_CLASS_EVENT = 6,
2675 MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT = 7
2676 } MonoProfilerPackedEventCode;
2677 #define MONO_PROFILER_PACKED_EVENT_CODE_BITS 3
2678 #define MONO_PROFILER_PACKED_EVENT_DATA_BITS (8-MONO_PROFILER_PACKED_EVENT_CODE_BITS)
2679 #define MONO_PROFILER_PACKED_EVENT_DATA_MASK ((1<<MONO_PROFILER_PACKED_EVENT_DATA_BITS)-1)
2681 #define MONO_PROFILER_EVENT_MAKE_PACKED_CODE(result,data,base) do {\
2682 result = ((base)|((data & MONO_PROFILER_PACKED_EVENT_DATA_MASK) << MONO_PROFILER_PACKED_EVENT_CODE_BITS));\
2683 data >>= MONO_PROFILER_PACKED_EVENT_DATA_BITS;\
2684 } while (0)
2685 #define MONO_PROFILER_EVENT_MAKE_FULL_CODE(result,code,kind,base) do {\
2686 result = ((base)|((((kind)<<4) | (code)) << MONO_PROFILER_PACKED_EVENT_CODE_BITS));\
2687 } while (0)
2689 static void
2690 rewrite_last_written_stack (ProfilerThreadStack *stack) {
2691 guint8 event_code;
2692 int i = thread_stack_get_last_written_frame (stack);
2694 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, MONO_PROFILER_EVENT_STACK_SECTION, 0, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2695 WRITE_BYTE (event_code);
2696 write_uint32 (0);
2697 write_uint32 (i);
2699 while (i > 0) {
2700 i--;
2701 write_uint32 (thread_stack_written_frame_at_index (stack, i));
2706 static ProfilerEventData*
2707 write_stack_section_event (ProfilerEventData *events, ProfilerPerThreadData *data) {
2708 int last_saved_frame = events->data.number;
2709 int saved_frames = events->value;
2710 guint8 event_code;
2711 int i;
2713 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, MONO_PROFILER_EVENT_STACK_SECTION, 0, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2714 WRITE_BYTE (event_code);
2715 write_uint32 (last_saved_frame);
2716 write_uint32 (saved_frames);
2717 thread_stack_set_last_written_frame (&(data->stack), last_saved_frame + saved_frames);
2718 events++;
2720 for (i = 0; i < saved_frames; i++) {
2721 guint8 code = events->code;
2722 guint32 jit_flag;
2723 MethodIdMappingElement *method;
2724 guint32 frame_value;
2726 if (code == MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER) {
2727 jit_flag = 0;
2728 } else if (code == MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER) {
2729 jit_flag = 1;
2730 } else {
2731 g_assert_not_reached ();
2732 jit_flag = 0;
2735 method = method_id_mapping_element_get (events->data.address);
2736 g_assert (method != NULL);
2737 frame_value = (method->id << 1) | jit_flag;
2738 write_uint32 (frame_value);
2739 thread_stack_write_frame_at_index (&(data->stack), last_saved_frame + saved_frames - (1 + i), frame_value);
2740 events ++;
2743 return events;
2746 static ProfilerEventData*
2747 write_event (ProfilerEventData *event, ProfilerPerThreadData *data) {
2748 ProfilerEventData *next = event + 1;
2749 gboolean write_event_value = TRUE;
2750 guint8 event_code;
2751 guint64 event_data;
2752 guint64 event_value;
2753 gboolean write_event_value_extension_1 = FALSE;
2754 guint64 event_value_extension_1 = 0;
2755 gboolean write_event_value_extension_2 = FALSE;
2756 guint64 event_value_extension_2 = 0;
2758 event_value = event->value;
2759 if (event_value == MAX_EVENT_VALUE) {
2760 event_value = *((guint64*)next);
2761 next ++;
2764 if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
2765 MethodIdMappingElement *element = method_id_mapping_element_get (event->data.address);
2766 g_assert (element != NULL);
2767 event_data = element->id;
2769 if (event->code == MONO_PROFILER_EVENT_METHOD_CALL) {
2770 if (event->kind == MONO_PROFILER_EVENT_KIND_START) {
2771 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_ENTER);
2772 } else {
2773 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EXIT_EXPLICIT);
2775 } else {
2776 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_METHOD_EVENT);
2778 } else if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
2779 ClassIdMappingElement *element = class_id_mapping_element_get (event->data.address);
2780 g_assert (element != NULL);
2781 event_data = element->id;
2783 if (event->code == MONO_PROFILER_EVENT_CLASS_ALLOCATION) {
2784 if ((! profiler->action_flags.save_allocation_caller) || (! (next->code == MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER))) {
2785 MONO_PROFILER_EVENT_MAKE_PACKED_CODE (event_code, event_data, MONO_PROFILER_PACKED_EVENT_CODE_CLASS_ALLOCATION);
2786 } else {
2787 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, MONO_PROFILER_EVENT_JIT_TIME_ALLOCATION, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2790 if (profiler->action_flags.save_allocation_caller) {
2791 MonoMethod *caller_method = next->data.address;
2793 if ((next->code != MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER) && (next->code != MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER)) {
2794 g_assert_not_reached ();
2797 if (caller_method != NULL) {
2798 MethodIdMappingElement *caller = method_id_mapping_element_get (caller_method);
2799 g_assert (caller != NULL);
2800 event_value_extension_1 = caller->id;
2803 write_event_value_extension_1 = TRUE;
2804 next ++;
2807 if (profiler->action_flags.allocations_carry_id) {
2808 event_value_extension_2 = GPOINTER_TO_UINT (next->data.address);
2810 if (next->code != MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID) {
2811 g_assert_not_reached ();
2814 write_event_value_extension_2 = TRUE;
2815 next ++;
2817 } else if (event->code == MONO_PROFILER_EVENT_CLASS_MONITOR) {
2818 g_assert (next->code == MONO_PROFILER_EVENT_OBJECT_MONITOR);
2820 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_CLASS_EVENT);
2821 event_value_extension_1 = next->value;
2822 write_event_value_extension_1 = TRUE;
2823 event_value_extension_2 = GPOINTER_TO_UINT (next->data.address);
2824 write_event_value_extension_2 = TRUE;
2825 next ++;
2826 } else {
2827 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_CLASS_EVENT);
2829 } else {
2830 if (event->code == MONO_PROFILER_EVENT_STACK_SECTION) {
2831 return write_stack_section_event (event, data);
2832 } else {
2833 event_data = event->data.number;
2834 MONO_PROFILER_EVENT_MAKE_FULL_CODE (event_code, event->code, event->kind, MONO_PROFILER_PACKED_EVENT_CODE_OTHER_EVENT);
2838 /* Skip writing JIT events if the user did not ask for them */
2839 if ((event->code == MONO_PROFILER_EVENT_METHOD_JIT) && ! profiler->action_flags.jit_time) {
2840 return next;
2843 #if (DEBUG_LOGGING_PROFILER)
2844 EVENT_MARK ();
2845 printf ("writing EVENT[%p] data_type:%d, kind:%d, code:%d (%d:%ld:%ld)\n", event,
2846 event->data_type, event->kind, event->code,
2847 event_code, event_data, event_value);
2848 #endif
2850 WRITE_BYTE (event_code);
2851 write_uint64 (event_data);
2852 if (write_event_value) {
2853 write_uint64 (event_value);
2854 if (write_event_value_extension_1) {
2855 write_uint64 (event_value_extension_1);
2857 if (write_event_value_extension_2) {
2858 write_uint64 (event_value_extension_2);
2862 return next;
2865 static void
2866 write_thread_data_block (ProfilerPerThreadData *data) {
2867 ProfilerEventData *start = data->first_unwritten_event;
2868 ProfilerEventData *end = data->first_unmapped_event;
2870 if (start == end)
2871 return;
2872 #if (DEBUG_FILE_WRITES)
2873 printf ("write_thread_data_block: preparing buffer for thread %ld\n", (guint64) data->thread_id);
2874 #endif
2875 write_clock_data ();
2876 write_uint64 (data->thread_id);
2878 write_uint64 (data->start_event_counter);
2880 /* If we are tracking the stack, make sure that stack sections */
2881 /* can be fully reconstructed even reading only one block */
2882 if (profiler->action_flags.track_stack) {
2883 rewrite_last_written_stack (&(data->stack));
2886 while (start < end) {
2887 start = write_event (start, data);
2889 WRITE_BYTE (0);
2890 data->first_unwritten_event = end;
2892 write_clock_data ();
2893 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_EVENTS);
2894 #if (DEBUG_FILE_WRITES)
2895 printf ("write_thread_data_block: buffer for thread %ld written\n", (guint64) data->thread_id);
2896 #endif
2899 static ProfilerExecutableMemoryRegionData*
2900 profiler_executable_memory_region_new (gpointer *start, gpointer *end, guint32 file_offset, char *file_name, guint32 id) {
2901 ProfilerExecutableMemoryRegionData *result = g_new (ProfilerExecutableMemoryRegionData, 1);
2902 result->start = start;
2903 result->end = end;
2904 result->file_offset = file_offset;
2905 result->file_name = g_strdup (file_name);
2906 result->id = id;
2907 result->is_new = TRUE;
2909 result->file = NULL;
2910 result->file_region_reference = NULL;
2911 result->symbols_capacity = id;
2912 result->symbols_count = id;
2913 result->symbols = NULL;
2915 return result;
2918 static void
2919 executable_file_close (ProfilerExecutableMemoryRegionData *region);
2921 static void
2922 profiler_executable_memory_region_destroy (ProfilerExecutableMemoryRegionData *data) {
2923 if (data->file != NULL) {
2924 executable_file_close (data);
2925 data->file = NULL;
2927 if (data->symbols != NULL) {
2928 g_free (data->symbols);
2929 data->symbols = NULL;
2931 if (data->file_name != NULL) {
2932 g_free (data->file_name);
2933 data->file_name = NULL;
2935 g_free (data);
2938 static ProfilerExecutableMemoryRegions*
2939 profiler_executable_memory_regions_new (int next_id, int next_unmanaged_function_id) {
2940 ProfilerExecutableMemoryRegions *result = g_new (ProfilerExecutableMemoryRegions, 1);
2941 result->regions = g_new0 (ProfilerExecutableMemoryRegionData*, 32);
2942 result->regions_capacity = 32;
2943 result->regions_count = 0;
2944 result->next_id = next_id;
2945 result->next_unmanaged_function_id = next_unmanaged_function_id;
2946 return result;
2949 static void
2950 profiler_executable_memory_regions_destroy (ProfilerExecutableMemoryRegions *regions) {
2951 int i;
2953 for (i = 0; i < regions->regions_count; i++) {
2954 profiler_executable_memory_region_destroy (regions->regions [i]);
2956 g_free (regions->regions);
2957 g_free (regions);
2960 static ProfilerExecutableMemoryRegionData*
2961 find_address_region (ProfilerExecutableMemoryRegions *regions, gpointer address) {
2962 int low_index = 0;
2963 int high_index = regions->regions_count;
2964 int middle_index = 0;
2965 ProfilerExecutableMemoryRegionData *middle_region = regions->regions [0];
2967 if ((regions->regions_count == 0) || (regions->regions [low_index]->start > address) || (regions->regions [high_index - 1]->end < address)) {
2968 return NULL;
2971 //printf ("find_address_region: Looking for address %p in %d regions (from %p to %p)\n", address, regions->regions_count, regions->regions [low_index]->start, regions->regions [high_index - 1]->end);
2973 while (low_index != high_index) {
2974 middle_index = low_index + ((high_index - low_index) / 2);
2975 middle_region = regions->regions [middle_index];
2977 //printf ("find_address_region: Looking for address %p, considering index %d[%p-%p] (%d-%d)\n", address, middle_index, middle_region->start, middle_region->end, low_index, high_index);
2979 if (middle_region->start > address) {
2980 if (middle_index > 0) {
2981 high_index = middle_index;
2982 } else {
2983 return NULL;
2985 } else if (middle_region->end < address) {
2986 if (middle_index < regions->regions_count - 1) {
2987 low_index = middle_index + 1;
2988 } else {
2989 return NULL;
2991 } else {
2992 return middle_region;
2996 if ((middle_region == NULL) || (middle_region->start > address) || (middle_region->end < address)) {
2997 return NULL;
2998 } else {
2999 return middle_region;
3003 static void
3004 append_region (ProfilerExecutableMemoryRegions *regions, gpointer *start, gpointer *end, guint32 file_offset, char *file_name) {
3005 if (regions->regions_count >= regions->regions_capacity) {
3006 ProfilerExecutableMemoryRegionData **new_regions = g_new0 (ProfilerExecutableMemoryRegionData*, regions->regions_capacity * 2);
3007 memcpy (new_regions, regions->regions, regions->regions_capacity * sizeof (ProfilerExecutableMemoryRegionData*));
3008 g_free (regions->regions);
3009 regions->regions = new_regions;
3010 regions->regions_capacity = regions->regions_capacity * 2;
3012 regions->regions [regions->regions_count] = profiler_executable_memory_region_new (start, end, file_offset, file_name, regions->next_id);
3013 regions->regions_count ++;
3014 regions->next_id ++;
3017 static gboolean
3018 regions_are_equivalent (ProfilerExecutableMemoryRegionData *region1, ProfilerExecutableMemoryRegionData *region2) {
3019 if ((region1->start == region2->start) &&
3020 (region1->end == region2->end) &&
3021 (region1->file_offset == region2->file_offset) &&
3022 ! strcmp (region1->file_name, region2->file_name)) {
3023 return TRUE;
3024 } else {
3025 return FALSE;
3029 static int
3030 compare_regions (const void *a1, const void *a2) {
3031 ProfilerExecutableMemoryRegionData *r1 = * (ProfilerExecutableMemoryRegionData**) a1;
3032 ProfilerExecutableMemoryRegionData *r2 = * (ProfilerExecutableMemoryRegionData**) a2;
3033 return (r1->start < r2->start)? -1 : ((r1->start > r2->start)? 1 : 0);
3036 static void
3037 restore_old_regions (ProfilerExecutableMemoryRegions *old_regions, ProfilerExecutableMemoryRegions *new_regions) {
3038 int old_i;
3039 int new_i;
3041 for (new_i = 0; new_i < new_regions->regions_count; new_i++) {
3042 ProfilerExecutableMemoryRegionData *new_region = new_regions->regions [new_i];
3043 for (old_i = 0; old_i < old_regions->regions_count; old_i++) {
3044 ProfilerExecutableMemoryRegionData *old_region = old_regions->regions [old_i];
3045 if ( regions_are_equivalent (old_region, new_region)) {
3046 new_regions->regions [new_i] = old_region;
3047 old_regions->regions [old_i] = new_region;
3049 // FIXME (sanity check)
3050 g_assert (new_region->is_new && ! old_region->is_new);
3056 static void
3057 sort_regions (ProfilerExecutableMemoryRegions *regions) {
3058 if (regions->regions_count > 1) {
3059 int i;
3061 qsort (regions->regions, regions->regions_count, sizeof (ProfilerExecutableMemoryRegionData *), compare_regions);
3063 i = 1;
3064 while (i < regions->regions_count) {
3065 ProfilerExecutableMemoryRegionData *current_region = regions->regions [i];
3066 ProfilerExecutableMemoryRegionData *previous_region = regions->regions [i - 1];
3068 if (regions_are_equivalent (previous_region, current_region)) {
3069 int j;
3071 if (! current_region->is_new) {
3072 profiler_executable_memory_region_destroy (previous_region);
3073 regions->regions [i - 1] = current_region;
3074 } else {
3075 profiler_executable_memory_region_destroy (current_region);
3078 for (j = i + 1; j < regions->regions_count; j++) {
3079 regions->regions [j - 1] = regions->regions [j];
3082 regions->regions_count --;
3083 } else {
3084 i++;
3090 static void
3091 fix_region_references (ProfilerExecutableMemoryRegions *regions) {
3092 int i;
3093 for (i = 0; i < regions->regions_count; i++) {
3094 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
3095 if (region->file_region_reference != NULL) {
3096 region->file_region_reference->region = region;
3101 static void
3102 executable_file_add_region_reference (ProfilerExecutableFile *file, ProfilerExecutableMemoryRegionData *region) {
3103 guint8 *section_headers = file->data + file->header->e_shoff;
3104 int section_index;
3106 for (section_index = 1; section_index < file->header->e_shnum; section_index ++) {
3107 ElfSection *section_header = (ElfSection*) (section_headers + (file->header->e_shentsize * section_index));
3109 if ((section_header->sh_addr != 0) && (section_header->sh_flags & ELF_SHF_EXECINSTR) &&
3110 (region->file_offset <= section_header->sh_offset) && (region->file_offset + (((guint8*)region->end)-((guint8*)region->start)) >= (section_header->sh_offset + section_header->sh_size))) {
3111 ProfilerExecutableFileSectionRegion *section_region = & (file->section_regions [section_index]);
3112 section_region->region = region;
3113 section_region->section_address = (gpointer) section_header->sh_addr;
3114 section_region->section_offset = section_header->sh_offset;
3115 region->file_region_reference = section_region;
3120 static gboolean check_elf_header (ElfHeader* header) {
3121 guint16 test = 0x0102;
3123 if ((header->e_ident [EI_MAG0] != 0x7f) || (header->e_ident [EI_MAG1] != 'E') ||
3124 (header->e_ident [EI_MAG2] != 'L') || (header->e_ident [EI_MAG3] != 'F')) {
3125 return FALSE;
3128 if (sizeof (gsize) == 4) {
3129 if (header->e_ident [EI_CLASS] != ELF_CLASS_32) {
3130 g_warning ("Class is not ELF_CLASS_32 with gsize size %d", (int) sizeof (gsize));
3131 return FALSE;
3133 } else if (sizeof (gsize) == 8) {
3134 if (header->e_ident [EI_CLASS] != ELF_CLASS_64) {
3135 g_warning ("Class is not ELF_CLASS_64 with gsize size %d", (int) sizeof (gsize));
3136 return FALSE;
3138 } else {
3139 g_warning ("Absurd gsize size %d", (int) sizeof (gsize));
3140 return FALSE;
3143 if ((*(guint8*)(&test)) == 0x01) {
3144 if (header->e_ident [EI_DATA] != ELF_DATA_MSB) {
3145 g_warning ("Data is not ELF_DATA_MSB with first test byte 0x01");
3146 return FALSE;
3148 } else if ((*(guint8*)(&test)) == 0x02) {
3149 if (header->e_ident [EI_DATA] != ELF_DATA_LSB) {
3150 g_warning ("Data is not ELF_DATA_LSB with first test byte 0x02");
3151 return FALSE;
3153 } else {
3154 g_warning ("Absurd test byte value");
3155 return FALSE;
3158 return TRUE;
3161 static gboolean check_elf_file (int fd) {
3162 void *header = malloc (sizeof (ElfHeader));
3163 ssize_t read_result = read (fd, header, sizeof (ElfHeader));
3164 gboolean result;
3166 if (read_result != sizeof (ElfHeader)) {
3167 result = FALSE;
3168 } else {
3169 result = check_elf_header ((ElfHeader*) header);
3172 free (header);
3173 return result;
3176 static ProfilerExecutableFile*
3177 executable_file_open (ProfilerExecutableMemoryRegionData *region) {
3178 ProfilerExecutableFiles *files = & (profiler->executable_files);
3179 ProfilerExecutableFile *file = region->file;
3181 if (file == NULL) {
3182 file = (ProfilerExecutableFile*) g_hash_table_lookup (files->table, region->file_name);
3184 if (file == NULL) {
3185 struct stat stat_buffer;
3186 int symtab_index = 0;
3187 int strtab_index = 0;
3188 int dynsym_index = 0;
3189 int dynstr_index = 0;
3190 ElfHeader *header;
3191 guint8 *section_headers;
3192 int section_index;
3193 int strings_index;
3195 file = g_new0 (ProfilerExecutableFile, 1);
3196 region->file = file;
3197 g_hash_table_insert (files->table, region->file_name, file);
3198 file->reference_count ++;
3199 file->next_new_file = files->new_files;
3200 files->new_files = file;
3202 file->fd = open (region->file_name, O_RDONLY);
3203 if (file->fd == -1) {
3204 //g_warning ("Cannot open file '%s': '%s'", region->file_name, strerror (errno));
3205 return file;
3206 } else {
3207 if (fstat (file->fd, &stat_buffer) != 0) {
3208 //g_warning ("Cannot stat file '%s': '%s'", region->file_name, strerror (errno));
3209 return file;
3210 } else if (! check_elf_file (file->fd)) {
3211 return file;
3212 } else {
3213 size_t region_length = ((guint8*)region->end) - ((guint8*)region->start);
3214 file->length = stat_buffer.st_size;
3216 if (file->length == region_length) {
3217 file->data = region->start;
3218 close (file->fd);
3219 file->fd = -1;
3220 } else {
3221 file->data = mmap (NULL, file->length, PROT_READ, MAP_PRIVATE, file->fd, 0);
3223 if (file->data == MAP_FAILED) {
3224 close (file->fd);
3225 //g_warning ("Cannot map file '%s': '%s'", region->file_name, strerror (errno));
3226 file->data = NULL;
3227 return file;
3233 /* OK, this is a usable elf file, and we mmapped it... */
3234 header = (ElfHeader*) file->data;
3235 file->header = header;
3236 section_headers = file->data + file->header->e_shoff;
3237 file->main_string_table = ((const char*) file->data) + (((ElfSection*) (section_headers + (header->e_shentsize * header->e_shstrndx)))->sh_offset);
3239 for (section_index = 0; section_index < header->e_shnum; section_index ++) {
3240 ElfSection *section_header = (ElfSection*) (section_headers + (header->e_shentsize * section_index));
3242 if (section_header->sh_type == ELF_SHT_SYMTAB) {
3243 symtab_index = section_index;
3244 } else if (section_header->sh_type == ELF_SHT_DYNSYM) {
3245 dynsym_index = section_index;
3246 } else if (section_header->sh_type == ELF_SHT_STRTAB) {
3247 if (! strcmp (file->main_string_table + section_header->sh_name, ".strtab")) {
3248 strtab_index = section_index;
3249 } else if (! strcmp (file->main_string_table + section_header->sh_name, ".dynstr")) {
3250 dynstr_index = section_index;
3255 if ((symtab_index != 0) && (strtab_index != 0)) {
3256 section_index = symtab_index;
3257 strings_index = strtab_index;
3258 } else if ((dynsym_index != 0) && (dynstr_index != 0)) {
3259 section_index = dynsym_index;
3260 strings_index = dynstr_index;
3261 } else {
3262 section_index = 0;
3263 strings_index = 0;
3266 if (section_index != 0) {
3267 ElfSection *section_header = (ElfSection*) (section_headers + (header->e_shentsize * section_index));
3268 file->symbol_size = section_header->sh_entsize;
3269 file->symbols_count = (guint32) (section_header->sh_size / section_header->sh_entsize);
3270 file->symbols_start = file->data + section_header->sh_offset;
3271 file->symbols_string_table = ((const char*) file->data) + (((ElfSection*) (section_headers + (header->e_shentsize * strings_index)))->sh_offset);
3274 file->section_regions = g_new0 (ProfilerExecutableFileSectionRegion, file->header->e_shnum);
3275 } else {
3276 region->file = file;
3277 file->reference_count ++;
3281 if (file->header != NULL) {
3282 executable_file_add_region_reference (file, region);
3285 return file;
3288 static void
3289 executable_file_free (ProfilerExecutableFile* file) {
3290 if (file->fd != -1) {
3291 if (close (file->fd) != 0) {
3292 g_warning ("Cannot close file: '%s'", strerror (errno));
3294 if (file->data != NULL) {
3295 if (munmap (file->data, file->length) != 0) {
3296 g_warning ("Cannot unmap file: '%s'", strerror (errno));
3300 if (file->section_regions != NULL) {
3301 g_free (file->section_regions);
3302 file->section_regions = NULL;
3304 g_free (file);
3307 static void
3308 executable_file_close (ProfilerExecutableMemoryRegionData *region) {
3309 region->file->reference_count --;
3311 if ((region->file_region_reference != NULL) && (region->file_region_reference->region == region)) {
3312 region->file_region_reference->region = NULL;
3313 region->file_region_reference->section_address = 0;
3314 region->file_region_reference->section_offset = 0;
3317 if (region->file->reference_count <= 0) {
3318 ProfilerExecutableFiles *files = & (profiler->executable_files);
3319 g_hash_table_remove (files->table, region->file_name);
3320 executable_file_free (region->file);
3321 region->file = NULL;
3325 static void
3326 executable_file_count_symbols (ProfilerExecutableFile *file) {
3327 int symbol_index;
3329 for (symbol_index = 0; symbol_index < file->symbols_count; symbol_index ++) {
3330 ElfSymbol *symbol = (ElfSymbol*) (file->symbols_start + (symbol_index * file->symbol_size));
3332 if ((ELF_ST_TYPE (symbol->st_info) == ELF_STT_FUNC) &&
3333 (symbol->st_shndx > 0) &&
3334 (symbol->st_shndx < file->header->e_shnum)) {
3335 int symbol_section_index = symbol->st_shndx;
3336 ProfilerExecutableMemoryRegionData *region = file->section_regions [symbol_section_index].region;
3337 if ((region != NULL) && (region->symbols == NULL)) {
3338 region->symbols_count ++;
3344 static void
3345 executable_memory_regions_prepare_symbol_tables (ProfilerExecutableMemoryRegions *regions) {
3346 int i;
3347 for (i = 0; i < regions->regions_count; i++) {
3348 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
3349 if ((region->symbols_count > 0) && (region->symbols == NULL)) {
3350 region->symbols = g_new (ProfilerUnmanagedSymbol, region->symbols_count);
3351 region->symbols_capacity = region->symbols_count;
3352 region->symbols_count = 0;
3357 static const char*
3358 executable_region_symbol_get_name (ProfilerExecutableMemoryRegionData *region, ProfilerUnmanagedSymbol *symbol) {
3359 ElfSymbol *elf_symbol = (ElfSymbol*) (region->file->symbols_start + (symbol->index * region->file->symbol_size));
3360 return region->file->symbols_string_table + elf_symbol->st_name;
3363 static void
3364 executable_file_build_symbol_tables (ProfilerExecutableFile *file) {
3365 int symbol_index;
3367 for (symbol_index = 0; symbol_index < file->symbols_count; symbol_index ++) {
3368 ElfSymbol *symbol = (ElfSymbol*) (file->symbols_start + (symbol_index * file->symbol_size));
3370 if ((ELF_ST_TYPE (symbol->st_info) == ELF_STT_FUNC) &&
3371 (symbol->st_shndx > 0) &&
3372 (symbol->st_shndx < file->header->e_shnum)) {
3373 int symbol_section_index = symbol->st_shndx;
3374 ProfilerExecutableFileSectionRegion *section_region = & (file->section_regions [symbol_section_index]);
3375 ProfilerExecutableMemoryRegionData *region = section_region->region;
3377 if (region != NULL) {
3378 ProfilerUnmanagedSymbol *new_symbol = & (region->symbols [region->symbols_count]);
3379 region->symbols_count ++;
3381 new_symbol->id = 0;
3382 new_symbol->index = symbol_index;
3383 new_symbol->size = symbol->st_size;
3384 new_symbol->offset = (((guint8*) symbol->st_value) - section_region->section_address) - (region->file_offset - section_region->section_offset);
3390 static int
3391 compare_region_symbols (const void *p1, const void *p2) {
3392 const ProfilerUnmanagedSymbol *s1 = p1;
3393 const ProfilerUnmanagedSymbol *s2 = p2;
3394 return (s1->offset < s2->offset)? -1 : ((s1->offset > s2->offset)? 1 : 0);
3397 static void
3398 executable_memory_regions_sort_symbol_tables (ProfilerExecutableMemoryRegions *regions) {
3399 int i;
3400 for (i = 0; i < regions->regions_count; i++) {
3401 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
3402 if ((region->is_new) && (region->symbols != NULL)) {
3403 qsort (region->symbols, region->symbols_count, sizeof (ProfilerUnmanagedSymbol), compare_region_symbols);
3408 static void
3409 build_symbol_tables (ProfilerExecutableMemoryRegions *regions, ProfilerExecutableFiles *files) {
3410 int i;
3411 ProfilerExecutableFile *file;
3413 for (i = 0; i < regions->regions_count; i++) {
3414 ProfilerExecutableMemoryRegionData *region = regions->regions [i];
3415 if ((region->is_new) && (region->file == NULL)) {
3416 executable_file_open (region);
3420 for (file = files->new_files; file != NULL; file = file->next_new_file) {
3421 executable_file_count_symbols (file);
3424 executable_memory_regions_prepare_symbol_tables (regions);
3426 for (file = files->new_files; file != NULL; file = file->next_new_file) {
3427 executable_file_build_symbol_tables (file);
3430 executable_memory_regions_sort_symbol_tables (regions);
3432 file = files->new_files;
3433 while (file != NULL) {
3434 ProfilerExecutableFile *next_file = file->next_new_file;
3435 file->next_new_file = NULL;
3436 file = next_file;
3438 files->new_files = NULL;
3441 static ProfilerUnmanagedSymbol*
3442 executable_memory_region_find_symbol (ProfilerExecutableMemoryRegionData *region, guint32 offset) {
3443 if (region->symbols_count > 0) {
3444 ProfilerUnmanagedSymbol *low = region->symbols;
3445 ProfilerUnmanagedSymbol *high = region->symbols + (region->symbols_count - 1);
3446 int step = region->symbols_count >> 1;
3447 ProfilerUnmanagedSymbol *current = region->symbols + step;
3449 do {
3450 step = (high - low) >> 1;
3452 if (offset < current->offset) {
3453 high = current;
3454 current = high - step;
3455 } else if (offset >= current->offset) {
3456 if (offset >= (current->offset + current->size)) {
3457 low = current;
3458 current = low + step;
3459 } else {
3460 return current;
3463 } while (step > 0);
3465 if ((offset >= current->offset) && (offset < (current->offset + current->size))) {
3466 return current;
3467 } else {
3468 return NULL;
3470 } else {
3471 return NULL;
3475 //FIXME: make also Win32 and BSD variants
3476 #define MAPS_BUFFER_SIZE 4096
3477 #define MAPS_FILENAME_SIZE 2048
3479 static gboolean
3480 update_regions_buffer (int fd, char *buffer) {
3481 ssize_t result = read (fd, buffer, MAPS_BUFFER_SIZE);
3483 if (result == MAPS_BUFFER_SIZE) {
3484 return TRUE;
3485 } else if (result >= 0) {
3486 *(buffer + result) = 0;
3487 return FALSE;
3488 } else {
3489 *buffer = 0;
3490 return FALSE;
3494 #define GOTO_NEXT_CHAR(c,b,fd) do {\
3495 (c)++;\
3496 if (((c) - (b) >= MAPS_BUFFER_SIZE) || ((*(c) == 0) && ((c) != (b)))) {\
3497 update_regions_buffer ((fd), (b));\
3498 (c) = (b);\
3500 } while (0);
3502 static int hex_digit_value (char c) {
3503 if ((c >= '0') && (c <= '9')) {
3504 return c - '0';
3505 } else if ((c >= 'a') && (c <= 'f')) {
3506 return c - 'a' + 10;
3507 } else if ((c >= 'A') && (c <= 'F')) {
3508 return c - 'A' + 10;
3509 } else {
3510 return 0;
3515 * Start address
3517 * End address
3518 * (space)
3519 * Permissions
3520 * Offset
3521 * (space)
3522 * Device
3523 * (space)
3524 * Inode
3525 * (space)
3526 * File
3527 * \n
3529 typedef enum {
3530 MAP_LINE_PARSER_STATE_INVALID,
3531 MAP_LINE_PARSER_STATE_START_ADDRESS,
3532 MAP_LINE_PARSER_STATE_END_ADDRESS,
3533 MAP_LINE_PARSER_STATE_PERMISSIONS,
3534 MAP_LINE_PARSER_STATE_OFFSET,
3535 MAP_LINE_PARSER_STATE_DEVICE,
3536 MAP_LINE_PARSER_STATE_INODE,
3537 MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME,
3538 MAP_LINE_PARSER_STATE_FILENAME,
3539 MAP_LINE_PARSER_STATE_DONE
3540 } MapLineParserState;
3542 const char *map_line_parser_state [] = {
3543 "INVALID",
3544 "START_ADDRESS",
3545 "END_ADDRESS",
3546 "PERMISSIONS",
3547 "OFFSET",
3548 "DEVICE",
3549 "INODE",
3550 "BLANK_BEFORE_FILENAME",
3551 "FILENAME",
3552 "DONE"
3555 static char*
3556 parse_map_line (ProfilerExecutableMemoryRegions *regions, int fd, char *buffer, char *filename, char *current) {
3557 MapLineParserState state = MAP_LINE_PARSER_STATE_START_ADDRESS;
3558 gsize start_address = 0;
3559 gsize end_address = 0;
3560 guint32 offset = 0;
3561 int filename_index = 0;
3562 gboolean is_executable = FALSE;
3563 gboolean done = FALSE;
3565 char c = *current;
3567 while (1) {
3568 switch (state) {
3569 case MAP_LINE_PARSER_STATE_START_ADDRESS:
3570 if (isxdigit (c)) {
3571 start_address <<= 4;
3572 start_address |= hex_digit_value (c);
3573 } else if (c == '-') {
3574 state = MAP_LINE_PARSER_STATE_END_ADDRESS;
3575 } else {
3576 state = MAP_LINE_PARSER_STATE_INVALID;
3578 break;
3579 case MAP_LINE_PARSER_STATE_END_ADDRESS:
3580 if (isxdigit (c)) {
3581 end_address <<= 4;
3582 end_address |= hex_digit_value (c);
3583 } else if (isblank (c)) {
3584 state = MAP_LINE_PARSER_STATE_PERMISSIONS;
3585 } else {
3586 state = MAP_LINE_PARSER_STATE_INVALID;
3588 break;
3589 case MAP_LINE_PARSER_STATE_PERMISSIONS:
3590 if (c == 'x') {
3591 is_executable = TRUE;
3592 } else if (isblank (c)) {
3593 state = MAP_LINE_PARSER_STATE_OFFSET;
3594 } else if ((c != '-') && ! isalpha (c)) {
3595 state = MAP_LINE_PARSER_STATE_INVALID;
3597 break;
3598 case MAP_LINE_PARSER_STATE_OFFSET:
3599 if (isxdigit (c)) {
3600 offset <<= 4;
3601 offset |= hex_digit_value (c);
3602 } else if (isblank (c)) {
3603 state = MAP_LINE_PARSER_STATE_DEVICE;
3604 } else {
3605 state = MAP_LINE_PARSER_STATE_INVALID;
3607 break;
3608 case MAP_LINE_PARSER_STATE_DEVICE:
3609 if (isblank (c)) {
3610 state = MAP_LINE_PARSER_STATE_INODE;
3611 } else if ((c != ':') && ! isxdigit (c)) {
3612 state = MAP_LINE_PARSER_STATE_INVALID;
3614 break;
3615 case MAP_LINE_PARSER_STATE_INODE:
3616 if (isblank (c)) {
3617 state = MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME;
3618 } else if (! isdigit (c)) {
3619 state = MAP_LINE_PARSER_STATE_INVALID;
3621 break;
3622 case MAP_LINE_PARSER_STATE_BLANK_BEFORE_FILENAME:
3623 if ((c == '/') || (c == '[')) {
3624 state = MAP_LINE_PARSER_STATE_FILENAME;
3625 filename [filename_index] = *current;
3626 filename_index ++;
3627 } else if (! isblank (c)) {
3628 state = MAP_LINE_PARSER_STATE_INVALID;
3630 break;
3631 case MAP_LINE_PARSER_STATE_FILENAME:
3632 if (filename_index < MAPS_FILENAME_SIZE) {
3633 if (c == '\n') {
3634 state = MAP_LINE_PARSER_STATE_DONE;
3635 done = TRUE;
3636 filename [filename_index] = 0;
3637 } else {
3638 filename [filename_index] = *current;
3639 filename_index ++;
3641 } else {
3642 filename [filename_index] = 0;
3643 g_warning ("ELF filename too long: \"%s\"...\n", filename);
3645 break;
3646 case MAP_LINE_PARSER_STATE_DONE:
3647 if (done && is_executable) {
3648 filename [filename_index] = 0;
3649 append_region (regions, (gpointer) start_address, (gpointer) end_address, offset, filename);
3651 return current;
3652 case MAP_LINE_PARSER_STATE_INVALID:
3653 if (c == '\n') {
3654 state = MAP_LINE_PARSER_STATE_DONE;
3656 break;
3659 if (c == 0) {
3660 return NULL;
3661 } else if (c == '\n') {
3662 state = MAP_LINE_PARSER_STATE_DONE;
3665 GOTO_NEXT_CHAR(current, buffer, fd);
3666 c = *current;
3670 static gboolean
3671 scan_process_regions (ProfilerExecutableMemoryRegions *regions) {
3672 char *buffer;
3673 char *filename;
3674 char *current;
3675 int fd;
3677 fd = open ("/proc/self/maps", O_RDONLY);
3678 if (fd == -1) {
3679 return FALSE;
3682 buffer = malloc (MAPS_BUFFER_SIZE);
3683 filename = malloc (MAPS_FILENAME_SIZE);
3684 update_regions_buffer (fd, buffer);
3685 current = buffer;
3686 while (current != NULL) {
3687 current = parse_map_line (regions, fd, buffer, filename, current);
3690 free (buffer);
3691 free (filename);
3693 close (fd);
3694 return TRUE;
3696 //End of Linux code
3698 typedef enum {
3699 MONO_PROFILER_STATISTICAL_CODE_END = 0,
3700 MONO_PROFILER_STATISTICAL_CODE_METHOD = 1,
3701 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_ID = 2,
3702 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_NEW_ID = 3,
3703 MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION = 4,
3704 MONO_PROFILER_STATISTICAL_CODE_CALL_CHAIN = 5,
3705 MONO_PROFILER_STATISTICAL_CODE_REGIONS = 7
3706 } MonoProfilerStatisticalCode;
3708 static void
3709 refresh_memory_regions (void) {
3710 ProfilerExecutableMemoryRegions *old_regions = profiler->executable_regions;
3711 ProfilerExecutableMemoryRegions *new_regions = profiler_executable_memory_regions_new (old_regions->next_id, old_regions->next_unmanaged_function_id);
3712 int i;
3714 LOG_WRITER_THREAD ("Refreshing memory regions...");
3715 scan_process_regions (new_regions);
3716 sort_regions (new_regions);
3717 restore_old_regions (old_regions, new_regions);
3718 fix_region_references (new_regions);
3719 LOG_WRITER_THREAD ("Refreshed memory regions.");
3721 LOG_WRITER_THREAD ("Building symbol tables...");
3722 build_symbol_tables (new_regions, & (profiler->executable_files));
3723 #if 0
3724 printf ("Symbol tables done!\n");
3725 printf ("Region summary...\n");
3726 for (i = 0; i < new_regions->regions_count; i++) {
3727 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3728 printf ("Region %d[%d][NEW:%d] (%p-%p) at %d in file %s\n", i, region->id, region->is_new,
3729 region->start, region->end, region->file_offset, region->file_name);
3731 printf ("New symbol tables dump...\n");
3732 for (i = 0; i < new_regions->regions_count; i++) {
3733 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3735 if (region->is_new) {
3736 int symbol_index;
3738 printf ("Region %d[%d][NEW:%d] (%p-%p) at %d in file %s\n", i, region->id, region->is_new,
3739 region->start, region->end, region->file_offset, region->file_name);
3740 for (symbol_index = 0; symbol_index < region->symbols_count; symbol_index ++) {
3741 ProfilerUnmanagedSymbol *symbol = & (region->symbols [symbol_index]);
3742 printf (" [%d] Symbol %s (offset %d, size %d)\n", symbol_index,
3743 executable_region_symbol_get_name (region, symbol),
3744 symbol->offset, symbol->size);
3748 #endif
3749 LOG_WRITER_THREAD ("Built symbol tables.");
3751 // This marks the region "sub-block"
3752 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_REGIONS);
3754 // First write the "removed" regions
3755 for (i = 0; i < old_regions->regions_count; i++) {
3756 ProfilerExecutableMemoryRegionData *region = old_regions->regions [i];
3757 if (! region->is_new) {
3758 #if DEBUG_STATISTICAL_PROFILER
3759 printf ("[refresh_memory_regions] Invalidated region %d\n", region->id);
3760 #endif
3761 write_uint32 (region->id);
3764 write_uint32 (0);
3766 // Then write the new ones
3767 for (i = 0; i < new_regions->regions_count; i++) {
3768 ProfilerExecutableMemoryRegionData *region = new_regions->regions [i];
3769 if (region->is_new) {
3770 region->is_new = FALSE;
3772 #if DEBUG_STATISTICAL_PROFILER
3773 printf ("[refresh_memory_regions] Wrote region %d (%p-%p[%d] '%s')\n", region->id, region->start, region->end, region->file_offset, region->file_name);
3774 #endif
3775 write_uint32 (region->id);
3776 write_uint64 (GPOINTER_TO_UINT (region->start));
3777 write_uint32 (GPOINTER_TO_UINT (region->end) - GPOINTER_TO_UINT (region->start));
3778 write_uint32 (region->file_offset);
3779 write_string (region->file_name);
3782 write_uint32 (0);
3784 // Finally, free the old ones, and replace them
3785 profiler_executable_memory_regions_destroy (old_regions);
3786 profiler->executable_regions = new_regions;
3789 static gboolean
3790 write_statistical_hit (gpointer address, gboolean regions_refreshed) {
3791 ProfilerCodeBuffer *code_buffer = profiler_code_buffer_from_address (profiler, address);
3793 if ((code_buffer != NULL) && (code_buffer->info.type == MONO_PROFILER_CODE_BUFFER_METHOD)) {
3794 MonoMethod *method = code_buffer->info.data.method;
3795 MethodIdMappingElement *element = method_id_mapping_element_get (method);
3797 if (element != NULL) {
3798 #if DEBUG_STATISTICAL_PROFILER
3799 printf ("[write_statistical_hit] Wrote method %d\n", element->id);
3800 #endif
3801 write_uint32 ((element->id << 3) | MONO_PROFILER_STATISTICAL_CODE_METHOD);
3802 } else {
3803 #if DEBUG_STATISTICAL_PROFILER
3804 printf ("[write_statistical_hit] Wrote unknown method %p\n", method);
3805 #endif
3806 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_METHOD);
3808 } else {
3809 ProfilerExecutableMemoryRegionData *region = find_address_region (profiler->executable_regions, address);
3811 if (region == NULL && ! regions_refreshed) {
3812 #if DEBUG_STATISTICAL_PROFILER
3813 printf ("[write_statistical_hit] Cannot find region for address %p, refreshing...\n", address);
3814 #endif
3815 refresh_memory_regions ();
3816 regions_refreshed = TRUE;
3817 region = find_address_region (profiler->executable_regions, address);
3820 if (region != NULL) {
3821 guint32 offset = ((guint8*)address) - ((guint8*)region->start);
3822 ProfilerUnmanagedSymbol *symbol = executable_memory_region_find_symbol (region, offset);
3824 if (symbol != NULL) {
3825 if (symbol->id > 0) {
3826 #if DEBUG_STATISTICAL_PROFILER
3827 printf ("[write_statistical_hit] Wrote unmanaged symbol %d\n", symbol->id);
3828 #endif
3829 write_uint32 ((symbol->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_ID);
3830 } else {
3831 ProfilerExecutableMemoryRegions *regions = profiler->executable_regions;
3832 const char *symbol_name = executable_region_symbol_get_name (region, symbol);
3833 symbol->id = regions->next_unmanaged_function_id;
3834 regions->next_unmanaged_function_id ++;
3835 #if DEBUG_STATISTICAL_PROFILER
3836 printf ("[write_statistical_hit] Wrote new unmanaged symbol in region %d[%d]\n", region->id, offset);
3837 #endif
3838 write_uint32 ((region->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_NEW_ID);
3839 write_uint32 (symbol->id);
3840 write_string (symbol_name);
3842 } else {
3843 #if DEBUG_STATISTICAL_PROFILER
3844 printf ("[write_statistical_hit] Wrote unknown unmanaged hit in region %d[%d] (address %p)\n", region->id, offset, address);
3845 #endif
3846 write_uint32 ((region->id << 3) | MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION);
3847 write_uint32 (offset);
3849 } else {
3850 #if DEBUG_STATISTICAL_PROFILER
3851 printf ("[write_statistical_hit] Wrote unknown unmanaged hit %p\n", address);
3852 #endif
3853 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_UNMANAGED_FUNCTION_OFFSET_IN_REGION);
3854 write_uint64 (GPOINTER_TO_UINT (address));
3858 return regions_refreshed;
3861 static void
3862 flush_all_mappings (void);
3864 static void
3865 write_statistical_data_block (ProfilerStatisticalData *data) {
3866 int start_index = data->first_unwritten_index;
3867 int end_index = data->next_free_index;
3868 gboolean regions_refreshed = FALSE;
3869 int call_chain_depth = profiler->statistical_call_chain_depth;
3870 int index;
3872 if (end_index > data->end_index)
3873 end_index = data->end_index;
3875 if (start_index == end_index)
3876 return;
3878 data->first_unwritten_index = end_index;
3880 write_clock_data ();
3882 #if DEBUG_STATISTICAL_PROFILER
3883 printf ("[write_statistical_data_block] Starting loop at index %d\n", start_index);
3884 #endif
3886 for (index = start_index; index < end_index; index ++) {
3887 int base_index = index * (call_chain_depth + 1);
3888 ProfilerStatisticalHit hit = data->hits [base_index];
3889 int callers_count;
3891 regions_refreshed = write_statistical_hit (hit.address, regions_refreshed);
3892 base_index ++;
3894 for (callers_count = 0; callers_count < call_chain_depth; callers_count ++) {
3895 hit = data->hits [base_index + callers_count];
3896 if (hit.address == NULL) {
3897 break;
3901 if (callers_count > 0) {
3902 write_uint32 ((callers_count << 3) | MONO_PROFILER_STATISTICAL_CODE_CALL_CHAIN);
3904 for (callers_count = 0; callers_count < call_chain_depth; callers_count ++) {
3905 hit = data->hits [base_index + callers_count];
3906 if (hit.address != NULL) {
3907 regions_refreshed = write_statistical_hit (hit.address, regions_refreshed);
3908 } else {
3909 break;
3914 write_uint32 (MONO_PROFILER_STATISTICAL_CODE_END);
3916 #if DEBUG_STATISTICAL_PROFILER
3917 printf ("[write_statistical_data_block] Ending loop at index %d\n", end_index);
3918 #endif
3919 write_clock_data ();
3921 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_STATISTICAL);
3924 static void
3925 write_intro_block (void) {
3926 write_uint32 (1);
3927 write_string ("mono");
3928 write_uint32 (profiler->flags);
3929 write_uint64 (profiler->start_counter);
3930 write_uint64 (profiler->start_time);
3931 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_INTRO);
3934 static void
3935 write_end_block (void) {
3936 write_uint32 (1);
3937 write_uint64 (profiler->end_counter);
3938 write_uint64 (profiler->end_time);
3939 write_current_block (MONO_PROFILER_FILE_BLOCK_KIND_END);
3942 static void
3943 update_mapping (ProfilerPerThreadData *data) {
3944 ProfilerEventData *start = data->first_unmapped_event;
3945 ProfilerEventData *end = data->next_free_event;
3946 data->first_unmapped_event = end;
3948 #if (DEBUG_LOGGING_PROFILER)
3949 printf ("[update_mapping][TID %ld] START\n", data->thread_id);
3950 #endif
3951 while (start < end) {
3952 #if DEBUG_LOGGING_PROFILER
3953 printf ("Examining event %p[TID %ld] looking for a new mapping...\n", start, data->thread_id);
3954 #endif
3955 if (start->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
3956 ClassIdMappingElement *element = class_id_mapping_element_get (start->data.address);
3957 if (element == NULL) {
3958 MonoClass *klass = start->data.address;
3959 class_id_mapping_element_new (klass);
3961 } else if (start->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
3962 MethodIdMappingElement *element = method_id_mapping_element_get (start->data.address);
3963 if (element == NULL) {
3964 MonoMethod *method = start->data.address;
3965 if (method != NULL) {
3966 method_id_mapping_element_new (method);
3971 if (start->value == MAX_EVENT_VALUE) {
3972 start ++;
3974 start ++;
3976 #if (DEBUG_LOGGING_PROFILER)
3977 printf ("[update_mapping][TID %ld] END\n", data->thread_id);
3978 #endif
3981 static void
3982 flush_all_mappings (void) {
3983 ProfilerPerThreadData *data;
3985 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
3986 update_mapping (data);
3988 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
3989 write_mapping_block (data->thread_id);
3993 static void
3994 flush_full_event_data_buffer (ProfilerPerThreadData *data) {
3995 LOCK_PROFILER ();
3997 // We flush all mappings because some id definitions could come
3998 // from other threads
3999 flush_all_mappings ();
4000 g_assert (data->first_unmapped_event >= data->next_free_event);
4002 write_thread_data_block (data);
4004 data->next_free_event = data->events;
4005 data->next_unreserved_event = data->events;
4006 data->first_unwritten_event = data->events;
4007 data->first_unmapped_event = data->events;
4008 MONO_PROFILER_GET_CURRENT_COUNTER (data->start_event_counter);
4009 data->last_event_counter = data->start_event_counter;
4011 UNLOCK_PROFILER ();
4014 /* The ">=" operator is intentional, to leave one spare slot for "extended values" */
4015 #define RESERVE_EVENTS(d,e,count) do {\
4016 if ((d)->next_unreserved_event >= ((d)->end_event - (count))) {\
4017 flush_full_event_data_buffer (d);\
4019 (e) = (d)->next_unreserved_event;\
4020 (d)->next_unreserved_event += (count);\
4021 } while (0)
4022 #define GET_NEXT_FREE_EVENT(d,e) RESERVE_EVENTS ((d),(e),1)
4023 #define COMMIT_RESERVED_EVENTS(d) do {\
4024 data->next_free_event = data->next_unreserved_event;\
4025 } while (0)
4027 static void
4028 flush_everything (void) {
4029 ProfilerPerThreadData *data;
4031 flush_all_mappings ();
4032 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
4033 write_thread_data_block (data);
4035 write_statistical_data_block (profiler->statistical_data);
4038 #define RESULT_TO_LOAD_CODE(r) (((r)==MONO_PROFILE_OK)?MONO_PROFILER_LOADED_EVENT_SUCCESS:MONO_PROFILER_LOADED_EVENT_FAILURE)
4039 static void
4040 appdomain_start_load (MonoProfiler *profiler, MonoDomain *domain) {
4041 LOCK_PROFILER ();
4042 loaded_element_load_start (profiler->loaded_appdomains, domain);
4043 UNLOCK_PROFILER ();
4046 static void
4047 appdomain_end_load (MonoProfiler *profiler, MonoDomain *domain, int result) {
4048 char *name;
4049 LoadedElement *element;
4051 name = g_strdup_printf ("%d", mono_domain_get_id (domain));
4052 LOCK_PROFILER ();
4053 element = loaded_element_load_end (profiler->loaded_appdomains, domain, name);
4054 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_APPDOMAIN | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID (), domain);
4055 UNLOCK_PROFILER ();
4058 static void
4059 appdomain_start_unload (MonoProfiler *profiler, MonoDomain *domain) {
4060 LOCK_PROFILER ();
4061 loaded_element_unload_start (profiler->loaded_appdomains, domain);
4062 flush_everything ();
4063 UNLOCK_PROFILER ();
4066 static void
4067 appdomain_end_unload (MonoProfiler *profiler, MonoDomain *domain) {
4068 LoadedElement *element;
4070 LOCK_PROFILER ();
4071 element = loaded_element_unload_end (profiler->loaded_appdomains, domain);
4072 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_APPDOMAIN, CURRENT_THREAD_ID ());
4073 UNLOCK_PROFILER ();
4076 static void
4077 module_start_load (MonoProfiler *profiler, MonoImage *module) {
4078 LOCK_PROFILER ();
4079 loaded_element_load_start (profiler->loaded_modules, module);
4080 UNLOCK_PROFILER ();
4083 static void
4084 module_end_load (MonoProfiler *profiler, MonoImage *module, int result) {
4085 char *name;
4086 MonoAssemblyName aname;
4087 LoadedElement *element;
4089 if (mono_assembly_fill_assembly_name (module, &aname)) {
4090 name = mono_stringify_assembly_name (&aname);
4091 } else {
4092 name = g_strdup_printf ("Dynamic module \"%p\"", module);
4094 LOCK_PROFILER ();
4095 element = loaded_element_load_end (profiler->loaded_modules, module, name);
4096 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_MODULE | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID (), module);
4097 UNLOCK_PROFILER ();
4100 static void
4101 module_start_unload (MonoProfiler *profiler, MonoImage *module) {
4102 LOCK_PROFILER ();
4103 loaded_element_unload_start (profiler->loaded_modules, module);
4104 flush_everything ();
4105 UNLOCK_PROFILER ();
4108 static void
4109 module_end_unload (MonoProfiler *profiler, MonoImage *module) {
4110 LoadedElement *element;
4112 LOCK_PROFILER ();
4113 element = loaded_element_unload_end (profiler->loaded_modules, module);
4114 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_MODULE, CURRENT_THREAD_ID ());
4115 UNLOCK_PROFILER ();
4118 static void
4119 assembly_start_load (MonoProfiler *profiler, MonoAssembly *assembly) {
4120 LOCK_PROFILER ();
4121 loaded_element_load_start (profiler->loaded_assemblies, assembly);
4122 UNLOCK_PROFILER ();
4125 static void
4126 assembly_end_load (MonoProfiler *profiler, MonoAssembly *assembly, int result) {
4127 char *name;
4128 MonoAssemblyName aname;
4129 LoadedElement *element;
4131 if (mono_assembly_fill_assembly_name (mono_assembly_get_image (assembly), &aname)) {
4132 name = mono_stringify_assembly_name (&aname);
4133 } else {
4134 name = g_strdup_printf ("Dynamic assembly \"%p\"", assembly);
4136 LOCK_PROFILER ();
4137 element = loaded_element_load_end (profiler->loaded_assemblies, assembly, name);
4138 write_element_load_block (element, MONO_PROFILER_LOADED_EVENT_ASSEMBLY | RESULT_TO_LOAD_CODE (result), CURRENT_THREAD_ID (), assembly);
4139 UNLOCK_PROFILER ();
4142 static void
4143 assembly_start_unload (MonoProfiler *profiler, MonoAssembly *assembly) {
4144 LOCK_PROFILER ();
4145 loaded_element_unload_start (profiler->loaded_assemblies, assembly);
4146 flush_everything ();
4147 UNLOCK_PROFILER ();
4149 static void
4150 assembly_end_unload (MonoProfiler *profiler, MonoAssembly *assembly) {
4151 LoadedElement *element;
4153 LOCK_PROFILER ();
4154 element = loaded_element_unload_end (profiler->loaded_assemblies, assembly);
4155 write_element_unload_block (element, MONO_PROFILER_LOADED_EVENT_ASSEMBLY, CURRENT_THREAD_ID ());
4156 UNLOCK_PROFILER ();
4159 #if (DEBUG_LOGGING_PROFILER)
4160 static const char*
4161 class_event_code_to_string (MonoProfilerClassEvents code) {
4162 switch (code) {
4163 case MONO_PROFILER_EVENT_CLASS_LOAD: return "LOAD";
4164 case MONO_PROFILER_EVENT_CLASS_UNLOAD: return "UNLOAD";
4165 case MONO_PROFILER_EVENT_CLASS_ALLOCATION: return "ALLOCATION";
4166 case MONO_PROFILER_EVENT_CLASS_EXCEPTION: return "EXCEPTION";
4167 default: g_assert_not_reached (); return "";
4170 static const char*
4171 method_event_code_to_string (MonoProfilerMethodEvents code) {
4172 switch (code) {
4173 case MONO_PROFILER_EVENT_METHOD_CALL: return "CALL";
4174 case MONO_PROFILER_EVENT_METHOD_JIT: return "JIT";
4175 case MONO_PROFILER_EVENT_METHOD_FREED: return "FREED";
4176 case MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER: return "ALLOCATION_CALLER";
4177 case MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER: return "ALLOCATION_JIT_TIME_CALLER";
4178 case MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID: return "ALLOCATION_OBJECT_ID";
4179 default: g_assert_not_reached (); return "";
4182 static const char*
4183 number_event_code_to_string (MonoProfilerEvents code) {
4184 switch (code) {
4185 case MONO_PROFILER_EVENT_THREAD: return "THREAD";
4186 case MONO_PROFILER_EVENT_GC_COLLECTION: return "GC_COLLECTION";
4187 case MONO_PROFILER_EVENT_GC_MARK: return "GC_MARK";
4188 case MONO_PROFILER_EVENT_GC_SWEEP: return "GC_SWEEP";
4189 case MONO_PROFILER_EVENT_GC_RESIZE: return "GC_RESIZE";
4190 case MONO_PROFILER_EVENT_GC_STOP_WORLD: return "GC_STOP_WORLD";
4191 case MONO_PROFILER_EVENT_GC_START_WORLD: return "GC_START_WORLD";
4192 case MONO_PROFILER_EVENT_JIT_TIME_ALLOCATION: return "JIT_TIME_ALLOCATION";
4193 case MONO_PROFILER_EVENT_STACK_SECTION: return "STACK_SECTION";
4194 case MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID: return "ALLOCATION_OBJECT_ID";
4195 default: g_assert_not_reached (); return "";
4198 static const char*
4199 event_result_to_string (MonoProfilerEventResult code) {
4200 switch (code) {
4201 case MONO_PROFILER_EVENT_RESULT_SUCCESS: return "SUCCESS";
4202 case MONO_PROFILER_EVENT_RESULT_FAILURE: return "FAILURE";
4203 default: g_assert_not_reached (); return "";
4206 static const char*
4207 event_kind_to_string (MonoProfilerEventKind code) {
4208 switch (code) {
4209 case MONO_PROFILER_EVENT_KIND_START: return "START";
4210 case MONO_PROFILER_EVENT_KIND_END: return "END";
4211 default: g_assert_not_reached (); return "";
4214 static void
4215 print_event_data (ProfilerPerThreadData *data, ProfilerEventData *event, guint64 value) {
4216 if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_CLASS) {
4217 printf ("STORE EVENT [TID %ld][EVENT %ld] CLASS[%p] %s:%s:%s[%d-%d-%d] %ld (%s.%s)\n",
4218 data->thread_id,
4219 event - data->events,
4220 event->data.address,
4221 class_event_code_to_string (event->code & ~MONO_PROFILER_EVENT_RESULT_MASK),
4222 event_result_to_string (event->code & MONO_PROFILER_EVENT_RESULT_MASK),
4223 event_kind_to_string (event->kind),
4224 event->data_type,
4225 event->kind,
4226 event->code,
4227 value,
4228 mono_class_get_namespace ((MonoClass*) event->data.address),
4229 mono_class_get_name ((MonoClass*) event->data.address));
4230 } else if (event->data_type == MONO_PROFILER_EVENT_DATA_TYPE_METHOD) {
4231 printf ("STORE EVENT [TID %ld][EVENT %ld] METHOD[%p] %s:%s:%s[%d-%d-%d] %ld (%s.%s:%s (?))\n",
4232 data->thread_id,
4233 event - data->events,
4234 event->data.address,
4235 method_event_code_to_string (event->code & ~MONO_PROFILER_EVENT_RESULT_MASK),
4236 event_result_to_string (event->code & MONO_PROFILER_EVENT_RESULT_MASK),
4237 event_kind_to_string (event->kind),
4238 event->data_type,
4239 event->kind,
4240 event->code,
4241 value,
4242 (event->data.address != NULL) ? mono_class_get_namespace (mono_method_get_class ((MonoMethod*) event->data.address)) : "<NULL>",
4243 (event->data.address != NULL) ? mono_class_get_name (mono_method_get_class ((MonoMethod*) event->data.address)) : "<NULL>",
4244 (event->data.address != NULL) ? mono_method_get_name ((MonoMethod*) event->data.address) : "<NULL>");
4245 } else {
4246 printf ("STORE EVENT [TID %ld][EVENT %ld] NUMBER[%ld] %s:%s[%d-%d-%d] %ld\n",
4247 data->thread_id,
4248 event - data->events,
4249 (guint64) event->data.number,
4250 number_event_code_to_string (event->code),
4251 event_kind_to_string (event->kind),
4252 event->data_type,
4253 event->kind,
4254 event->code,
4255 value);
4258 #define LOG_EVENT(data,ev,val) print_event_data ((data),(ev),(val))
4259 #else
4260 #define LOG_EVENT(data,ev,val)
4261 #endif
4263 #define RESULT_TO_EVENT_CODE(r) (((r)==MONO_PROFILE_OK)?MONO_PROFILER_EVENT_RESULT_SUCCESS:MONO_PROFILER_EVENT_RESULT_FAILURE)
4265 #define STORE_EVENT_ITEM_COUNTER(event,p,i,dt,c,k) do {\
4266 guint64 counter;\
4267 guint64 delta;\
4268 MONO_PROFILER_GET_CURRENT_COUNTER (counter);\
4269 (event)->data.address = (i);\
4270 (event)->data_type = (dt);\
4271 (event)->code = (c);\
4272 (event)->kind = (k);\
4273 delta = counter - data->last_event_counter;\
4274 if (delta < MAX_EVENT_VALUE) {\
4275 (event)->value = delta;\
4276 } else {\
4277 ProfilerEventData *extension = data->next_unreserved_event;\
4278 data->next_unreserved_event ++;\
4279 (event)->value = MAX_EVENT_VALUE;\
4280 *(guint64*)extension = delta;\
4282 data->last_event_counter = counter;\
4283 LOG_EVENT (data, (event), delta);\
4284 } while (0);
4285 #define STORE_EVENT_ITEM_VALUE(event,p,i,dt,c,k,v) do {\
4286 (event)->data.address = (i);\
4287 (event)->data_type = (dt);\
4288 (event)->code = (c);\
4289 (event)->kind = (k);\
4290 if ((v) < MAX_EVENT_VALUE) {\
4291 (event)->value = (v);\
4292 } else {\
4293 ProfilerEventData *extension = data->next_unreserved_event;\
4294 data->next_unreserved_event ++;\
4295 (event)->value = MAX_EVENT_VALUE;\
4296 *(guint64*)extension = (v);\
4298 LOG_EVENT (data, (event), (v));\
4299 }while (0);
4300 #define STORE_EVENT_NUMBER_COUNTER(event,p,n,dt,c,k) do {\
4301 guint64 counter;\
4302 guint64 delta;\
4303 MONO_PROFILER_GET_CURRENT_COUNTER (counter);\
4304 (event)->data.number = (n);\
4305 (event)->data_type = (dt);\
4306 (event)->code = (c);\
4307 (event)->kind = (k);\
4308 delta = counter - data->last_event_counter;\
4309 if (delta < MAX_EVENT_VALUE) {\
4310 (event)->value = delta;\
4311 } else {\
4312 ProfilerEventData *extension = data->next_unreserved_event;\
4313 data->next_unreserved_event ++;\
4314 (event)->value = MAX_EVENT_VALUE;\
4315 *(guint64*)extension = delta;\
4317 data->last_event_counter = counter;\
4318 LOG_EVENT (data, (event), delta);\
4319 }while (0);
4320 #define STORE_EVENT_NUMBER_VALUE(event,p,n,dt,c,k,v) do {\
4321 (event)->data.number = (n);\
4322 (event)->data_type = (dt);\
4323 (event)->code = (c);\
4324 (event)->kind = (k);\
4325 if ((v) < MAX_EVENT_VALUE) {\
4326 (event)->value = (v);\
4327 } else {\
4328 ProfilerEventData *extension = data->next_unreserved_event;\
4329 data->next_unreserved_event ++;\
4330 (event)->value = MAX_EVENT_VALUE;\
4331 *(guint64*)extension = (v);\
4333 LOG_EVENT (data, (event), (v));\
4334 }while (0);
4335 #define INCREMENT_EVENT(event) do {\
4336 if ((event)->value != MAX_EVENT_VALUE) {\
4337 (event) ++;\
4338 } else {\
4339 (event) += 2;\
4341 }while (0);
4343 static void
4344 class_start_load (MonoProfiler *profiler, MonoClass *klass) {
4345 ProfilerPerThreadData *data;
4346 ProfilerEventData *event;
4347 GET_PROFILER_THREAD_DATA (data);
4348 GET_NEXT_FREE_EVENT (data, event);
4349 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_LOAD, MONO_PROFILER_EVENT_KIND_START);
4350 COMMIT_RESERVED_EVENTS (data);
4352 static void
4353 class_end_load (MonoProfiler *profiler, MonoClass *klass, int result) {
4354 ProfilerPerThreadData *data;
4355 ProfilerEventData *event;
4356 GET_PROFILER_THREAD_DATA (data);
4357 GET_NEXT_FREE_EVENT (data, event);
4358 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_LOAD | RESULT_TO_EVENT_CODE (result), MONO_PROFILER_EVENT_KIND_END);
4359 COMMIT_RESERVED_EVENTS (data);
4361 static void
4362 class_start_unload (MonoProfiler *profiler, MonoClass *klass) {
4363 ProfilerPerThreadData *data;
4364 ProfilerEventData *event;
4365 GET_PROFILER_THREAD_DATA (data);
4366 GET_NEXT_FREE_EVENT (data, event);
4367 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_UNLOAD, MONO_PROFILER_EVENT_KIND_START);
4368 COMMIT_RESERVED_EVENTS (data);
4370 static void
4371 class_end_unload (MonoProfiler *profiler, MonoClass *klass) {
4372 ProfilerPerThreadData *data;
4373 ProfilerEventData *event;
4374 GET_PROFILER_THREAD_DATA (data);
4375 GET_NEXT_FREE_EVENT (data, event);
4376 STORE_EVENT_ITEM_COUNTER (event, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_UNLOAD, MONO_PROFILER_EVENT_KIND_END);
4377 COMMIT_RESERVED_EVENTS (data);
4380 static void
4381 method_start_jit (MonoProfiler *profiler, MonoMethod *method) {
4382 ProfilerPerThreadData *data;
4383 ProfilerEventData *event;
4384 GET_PROFILER_THREAD_DATA (data);
4385 GET_NEXT_FREE_EVENT (data, event);
4386 thread_stack_push_jitted_safely (&(data->stack), method, TRUE);
4387 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_JIT, MONO_PROFILER_EVENT_KIND_START);
4388 COMMIT_RESERVED_EVENTS (data);
4390 static void
4391 method_end_jit (MonoProfiler *profiler, MonoMethod *method, int result) {
4392 ProfilerPerThreadData *data;
4393 ProfilerEventData *event;
4394 GET_PROFILER_THREAD_DATA (data);
4395 GET_NEXT_FREE_EVENT (data, event);
4396 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_JIT | RESULT_TO_EVENT_CODE (result), MONO_PROFILER_EVENT_KIND_END);
4397 thread_stack_pop (&(data->stack));
4398 COMMIT_RESERVED_EVENTS (data);
4401 #if (HAS_OPROFILE)
4402 static void
4403 method_jit_result (MonoProfiler *prof, MonoMethod *method, MonoJitInfo* jinfo, int result) {
4404 if (profiler->action_flags.oprofile && (result == MONO_PROFILE_OK)) {
4405 MonoClass *klass = mono_method_get_class (method);
4406 char *signature = mono_signature_get_desc (mono_method_signature (method), TRUE);
4407 char *name = g_strdup_printf ("%s.%s:%s (%s)", mono_class_get_namespace (klass), mono_class_get_name (klass), mono_method_get_name (method), signature);
4408 gpointer code_start = mono_jit_info_get_code_start (jinfo);
4409 int code_size = mono_jit_info_get_code_size (jinfo);
4411 if (op_write_native_code (name, code_start, code_size)) {
4412 g_warning ("Problem calling op_write_native_code\n");
4415 g_free (signature);
4416 g_free (name);
4419 #endif
4422 static void
4423 method_enter (MonoProfiler *profiler, MonoMethod *method) {
4424 ProfilerPerThreadData *data;
4426 CHECK_PROFILER_ENABLED ();
4427 GET_PROFILER_THREAD_DATA (data);
4428 if (profiler->action_flags.track_calls) {
4429 ProfilerEventData *event;
4430 GET_NEXT_FREE_EVENT (data, event);
4431 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_CALL, MONO_PROFILER_EVENT_KIND_START);
4432 COMMIT_RESERVED_EVENTS (data);
4434 if (profiler->action_flags.track_stack) {
4435 thread_stack_push_safely (&(data->stack), method);
4438 static void
4439 method_leave (MonoProfiler *profiler, MonoMethod *method) {
4440 ProfilerPerThreadData *data;
4442 CHECK_PROFILER_ENABLED ();
4443 GET_PROFILER_THREAD_DATA (data);
4444 if (profiler->action_flags.track_calls) {
4445 ProfilerEventData *event;
4446 GET_NEXT_FREE_EVENT (data, event);
4447 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_CALL, MONO_PROFILER_EVENT_KIND_END);
4448 COMMIT_RESERVED_EVENTS (data);
4450 if (profiler->action_flags.track_stack) {
4451 thread_stack_pop (&(data->stack));
4455 static void
4456 method_free (MonoProfiler *profiler, MonoMethod *method) {
4457 ProfilerPerThreadData *data;
4458 ProfilerEventData *event;
4459 GET_PROFILER_THREAD_DATA (data);
4460 GET_NEXT_FREE_EVENT (data, event);
4461 STORE_EVENT_ITEM_COUNTER (event, profiler, method, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_FREED, 0);
4462 COMMIT_RESERVED_EVENTS (data);
4465 static void
4466 thread_start (MonoProfiler *profiler, intptr_t tid) {
4467 ProfilerPerThreadData *data;
4468 ProfilerEventData *event;
4469 GET_PROFILER_THREAD_DATA (data);
4470 GET_NEXT_FREE_EVENT (data, event);
4471 STORE_EVENT_NUMBER_COUNTER (event, profiler, tid, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_THREAD, MONO_PROFILER_EVENT_KIND_START);
4472 COMMIT_RESERVED_EVENTS (data);
4474 static void
4475 thread_end (MonoProfiler *profiler, intptr_t tid) {
4476 ProfilerPerThreadData *data;
4477 ProfilerEventData *event;
4478 GET_PROFILER_THREAD_DATA (data);
4479 GET_NEXT_FREE_EVENT (data, event);
4480 STORE_EVENT_NUMBER_COUNTER (event, profiler, tid, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_THREAD, MONO_PROFILER_EVENT_KIND_END);
4481 COMMIT_RESERVED_EVENTS (data);
4484 static ProfilerEventData*
4485 save_stack_delta (MonoProfiler *profiler, ProfilerPerThreadData *data, ProfilerEventData *events, int unsaved_frames) {
4486 int i;
4488 /* In this loop it is safe to simply increment "events" because MAX_EVENT_VALUE cannot be reached. */
4489 STORE_EVENT_NUMBER_VALUE (events, profiler, data->stack.last_saved_top, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_STACK_SECTION, 0, unsaved_frames);
4490 events++;
4491 for (i = 0; i < unsaved_frames; i++) {
4492 if (! thread_stack_index_from_top_is_jitted (&(data->stack), i)) {
4493 STORE_EVENT_ITEM_VALUE (events, profiler, thread_stack_index_from_top (&(data->stack), i), MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER, 0, 0);
4494 } else {
4495 STORE_EVENT_ITEM_VALUE (events, profiler, thread_stack_index_from_top (&(data->stack), i), MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER, 0, 0);
4497 events ++;
4500 data->stack.last_saved_top = data->stack.top;
4502 return events;
4505 static void
4506 object_allocated (MonoProfiler *profiler, MonoObject *obj, MonoClass *klass) {
4507 ProfilerPerThreadData *data;
4508 ProfilerEventData *events;
4509 int unsaved_frames;
4510 int event_slot_count;
4512 GET_PROFILER_THREAD_DATA (data);
4513 event_slot_count = 1;
4514 if (profiler->action_flags.save_allocation_caller) {
4515 event_slot_count ++;
4517 if (profiler->action_flags.allocations_carry_id) {
4518 event_slot_count ++;
4520 if (profiler->action_flags.save_allocation_stack) {
4521 unsaved_frames = thread_stack_count_unsaved_frames (&(data->stack));
4522 event_slot_count += (unsaved_frames + 1);
4523 } else {
4524 unsaved_frames = 0;
4526 RESERVE_EVENTS (data, events, event_slot_count);
4528 if (profiler->action_flags.save_allocation_stack) {
4529 events = save_stack_delta (profiler, data, events, unsaved_frames);
4532 STORE_EVENT_ITEM_VALUE (events, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_ALLOCATION, 0, (guint64) mono_object_get_size (obj));
4533 if (profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary) {
4534 STORE_ALLOCATED_OBJECT (data, obj);
4537 if (profiler->action_flags.save_allocation_caller) {
4538 MonoMethod *caller = thread_stack_top (&(data->stack));
4539 gboolean caller_is_jitted = thread_stack_top_is_jitted (&(data->stack));
4540 int index = 1;
4541 /* In this loop it is safe to simply increment "events" because MAX_EVENT_VALUE cannot be reached. */
4542 events ++;
4544 while ((caller != NULL) && (caller->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE)) {
4545 caller = thread_stack_index_from_top (&(data->stack), index);
4546 caller_is_jitted = thread_stack_index_from_top_is_jitted (&(data->stack), index);
4547 index ++;
4549 if (! caller_is_jitted) {
4550 STORE_EVENT_ITEM_VALUE (events, profiler, caller, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_CALLER, 0, 0);
4551 } else {
4552 STORE_EVENT_ITEM_VALUE (events, profiler, caller, MONO_PROFILER_EVENT_DATA_TYPE_METHOD, MONO_PROFILER_EVENT_METHOD_ALLOCATION_JIT_TIME_CALLER, 0, 0);
4555 if (profiler->action_flags.allocations_carry_id) {
4556 events ++;
4557 STORE_EVENT_ITEM_VALUE (events, profiler, obj, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_ALLOCATION_OBJECT_ID, 0, 0);
4560 COMMIT_RESERVED_EVENTS (data);
4563 static void
4564 monitor_event (MonoProfiler *profiler, MonoObject *obj, MonoProfilerMonitorEvent event) {
4565 ProfilerPerThreadData *data;
4566 ProfilerEventData *events;
4567 MonoClass *klass;
4568 int unsaved_frames;
4569 int event_slot_count;
4571 CHECK_PROFILER_ENABLED ();
4573 GET_PROFILER_THREAD_DATA (data);
4574 klass = mono_object_get_class (obj);
4576 unsaved_frames = thread_stack_count_unsaved_frames (&(data->stack));
4577 if (unsaved_frames > 0) {
4578 event_slot_count = unsaved_frames + 3;
4579 } else {
4580 event_slot_count = 2;
4583 RESERVE_EVENTS (data, events, event_slot_count);
4584 if (unsaved_frames > 0) {
4585 events = save_stack_delta (profiler, data, events, unsaved_frames);
4587 STORE_EVENT_ITEM_COUNTER (events, profiler, klass, MONO_PROFILER_EVENT_DATA_TYPE_CLASS, MONO_PROFILER_EVENT_CLASS_MONITOR, MONO_PROFILER_EVENT_KIND_START);
4588 INCREMENT_EVENT (events);
4589 STORE_EVENT_ITEM_VALUE (events, profiler, obj, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_OBJECT_MONITOR, 0, event);
4590 COMMIT_RESERVED_EVENTS (data);
4593 static void
4594 statistical_call_chain (MonoProfiler *profiler, int call_chain_depth, guchar **ips, void *context) {
4595 MonoDomain *domain = mono_domain_get ();
4596 ProfilerStatisticalData *data;
4597 unsigned int index;
4599 CHECK_PROFILER_ENABLED ();
4600 do {
4601 data = profiler->statistical_data;
4602 index = InterlockedIncrement ((int*) &data->next_free_index);
4604 if (index <= data->end_index) {
4605 unsigned int base_index = (index - 1) * (profiler->statistical_call_chain_depth + 1);
4606 unsigned int call_chain_index = 0;
4608 //printf ("[statistical_call_chain] (%d)\n", call_chain_depth);
4609 while (call_chain_index < call_chain_depth) {
4610 ProfilerStatisticalHit *hit = & (data->hits [base_index + call_chain_index]);
4611 //printf ("[statistical_call_chain] [%d] = %p\n", base_index + call_chain_index, ips [call_chain_index]);
4612 hit->address = (gpointer) ips [call_chain_index];
4613 hit->domain = domain;
4614 call_chain_index ++;
4616 while (call_chain_index <= profiler->statistical_call_chain_depth) {
4617 ProfilerStatisticalHit *hit = & (data->hits [base_index + call_chain_index]);
4618 //printf ("[statistical_call_chain] [%d] = NULL\n", base_index + call_chain_index);
4619 hit->address = NULL;
4620 hit->domain = NULL;
4621 call_chain_index ++;
4623 } else {
4624 /* Check if we are the one that must swap the buffers */
4625 if (index == data->end_index + 1) {
4626 ProfilerStatisticalData *new_data;
4628 /* In the *impossible* case that the writer thread has not finished yet, */
4629 /* loop waiting for it and meanwhile lose all statistical events... */
4630 do {
4631 /* First, wait that it consumed the ready buffer */
4632 while (profiler->statistical_data_ready != NULL);
4633 /* Then, wait that it produced the free buffer */
4634 new_data = profiler->statistical_data_second_buffer;
4635 } while (new_data == NULL);
4637 profiler->statistical_data_ready = data;
4638 profiler->statistical_data = new_data;
4639 profiler->statistical_data_second_buffer = NULL;
4640 WRITER_EVENT_RAISE ();
4641 /* Otherwise exit from the handler and drop the event... */
4642 } else {
4643 break;
4646 /* Loop again, hoping to acquire a free slot this time (otherwise the event will be dropped) */
4647 data = NULL;
4649 } while (data == NULL);
4652 static void
4653 statistical_hit (MonoProfiler *profiler, guchar *ip, void *context) {
4654 MonoDomain *domain = mono_domain_get ();
4655 ProfilerStatisticalData *data;
4656 unsigned int index;
4658 CHECK_PROFILER_ENABLED ();
4659 do {
4660 data = profiler->statistical_data;
4661 index = InterlockedIncrement ((int*) &data->next_free_index);
4663 if (index <= data->end_index) {
4664 ProfilerStatisticalHit *hit = & (data->hits [index - 1]);
4665 hit->address = (gpointer) ip;
4666 hit->domain = domain;
4667 } else {
4668 /* Check if we are the one that must swap the buffers */
4669 if (index == data->end_index + 1) {
4670 ProfilerStatisticalData *new_data;
4672 /* In the *impossible* case that the writer thread has not finished yet, */
4673 /* loop waiting for it and meanwhile lose all statistical events... */
4674 do {
4675 /* First, wait that it consumed the ready buffer */
4676 while (profiler->statistical_data_ready != NULL);
4677 /* Then, wait that it produced the free buffer */
4678 new_data = profiler->statistical_data_second_buffer;
4679 } while (new_data == NULL);
4681 profiler->statistical_data_ready = data;
4682 profiler->statistical_data = new_data;
4683 profiler->statistical_data_second_buffer = NULL;
4684 WRITER_EVENT_RAISE ();
4687 /* Loop again, hoping to acquire a free slot this time */
4688 data = NULL;
4690 } while (data == NULL);
4693 static MonoProfilerEvents
4694 gc_event_code_from_profiler_event (MonoGCEvent event) {
4695 switch (event) {
4696 case MONO_GC_EVENT_START:
4697 case MONO_GC_EVENT_END:
4698 return MONO_PROFILER_EVENT_GC_COLLECTION;
4699 case MONO_GC_EVENT_MARK_START:
4700 case MONO_GC_EVENT_MARK_END:
4701 return MONO_PROFILER_EVENT_GC_MARK;
4702 case MONO_GC_EVENT_RECLAIM_START:
4703 case MONO_GC_EVENT_RECLAIM_END:
4704 return MONO_PROFILER_EVENT_GC_SWEEP;
4705 case MONO_GC_EVENT_PRE_STOP_WORLD:
4706 case MONO_GC_EVENT_POST_STOP_WORLD:
4707 return MONO_PROFILER_EVENT_GC_STOP_WORLD;
4708 case MONO_GC_EVENT_PRE_START_WORLD:
4709 case MONO_GC_EVENT_POST_START_WORLD:
4710 return MONO_PROFILER_EVENT_GC_START_WORLD;
4711 default:
4712 g_assert_not_reached ();
4713 return 0;
4717 static MonoProfilerEventKind
4718 gc_event_kind_from_profiler_event (MonoGCEvent event) {
4719 switch (event) {
4720 case MONO_GC_EVENT_START:
4721 case MONO_GC_EVENT_MARK_START:
4722 case MONO_GC_EVENT_RECLAIM_START:
4723 case MONO_GC_EVENT_PRE_STOP_WORLD:
4724 case MONO_GC_EVENT_PRE_START_WORLD:
4725 return MONO_PROFILER_EVENT_KIND_START;
4726 case MONO_GC_EVENT_END:
4727 case MONO_GC_EVENT_MARK_END:
4728 case MONO_GC_EVENT_RECLAIM_END:
4729 case MONO_GC_EVENT_POST_START_WORLD:
4730 case MONO_GC_EVENT_POST_STOP_WORLD:
4731 return MONO_PROFILER_EVENT_KIND_END;
4732 default:
4733 g_assert_not_reached ();
4734 return 0;
4738 static gboolean
4739 dump_current_heap_snapshot (void) {
4740 gboolean result;
4742 if (profiler->heap_shot_was_requested) {
4743 result = TRUE;
4744 } else {
4745 if (profiler->dump_next_heap_snapshots > 0) {
4746 profiler->dump_next_heap_snapshots--;
4747 result = TRUE;
4748 } else if (profiler->dump_next_heap_snapshots < 0) {
4749 result = TRUE;
4750 } else {
4751 result = FALSE;
4755 return result;
4758 static void
4759 profiler_heap_buffers_setup (ProfilerHeapShotHeapBuffers *heap) {
4760 heap->buffers = g_new (ProfilerHeapShotHeapBuffer, 1);
4761 heap->buffers->previous = NULL;
4762 heap->buffers->next = NULL;
4763 heap->buffers->start_slot = &(heap->buffers->buffer [0]);
4764 heap->buffers->end_slot = &(heap->buffers->buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE]);
4765 heap->last = heap->buffers;
4766 heap->current = heap->buffers;
4767 heap->first_free_slot = & (heap->buffers->buffer [0]);
4769 static void
4770 profiler_heap_buffers_clear (ProfilerHeapShotHeapBuffers *heap) {
4771 heap->buffers = NULL;
4772 heap->last = NULL;
4773 heap->current = NULL;
4774 heap->first_free_slot = NULL;
4776 static void
4777 profiler_heap_buffers_free (ProfilerHeapShotHeapBuffers *heap) {
4778 ProfilerHeapShotHeapBuffer *current = heap->buffers;
4779 while (current != NULL) {
4780 ProfilerHeapShotHeapBuffer *next = current->next;
4781 g_free (current);
4782 current = next;
4784 profiler_heap_buffers_clear (heap);
4787 static int
4788 report_object_references (gpointer *start, ClassIdMappingElement *layout, ProfilerHeapShotWriteJob *job) {
4789 int reported_references = 0;
4790 int slot;
4792 for (slot = 0; slot < layout->data.layout.slots; slot ++) {
4793 gboolean slot_has_reference;
4794 if (layout->data.layout.slots <= CLASS_LAYOUT_PACKED_BITMAP_SIZE) {
4795 if (layout->data.bitmap.compact & (((guint64)1) << slot)) {
4796 slot_has_reference = TRUE;
4797 } else {
4798 slot_has_reference = FALSE;
4800 } else {
4801 if (layout->data.bitmap.extended [slot >> 3] & (1 << (slot & 7))) {
4802 slot_has_reference = TRUE;
4803 } else {
4804 slot_has_reference = FALSE;
4808 if (slot_has_reference) {
4809 gpointer field = start [slot];
4811 if ((field != NULL) && mono_object_is_alive (field)) {
4812 reported_references ++;
4813 WRITE_HEAP_SHOT_JOB_VALUE (job, field);
4818 return reported_references;
4821 static void
4822 profiler_heap_report_object_reachable (ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4823 if (job != NULL) {
4824 MonoClass *klass = mono_object_get_class (obj);
4825 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
4826 if (class_id == NULL) {
4827 printf ("profiler_heap_report_object_reachable: class %p (%s.%s) has no id\n", klass, mono_class_get_namespace (klass), mono_class_get_name (klass));
4829 g_assert (class_id != NULL);
4831 if (job->summary.capacity > 0) {
4832 guint32 id = class_id->id;
4833 g_assert (id < job->summary.capacity);
4835 job->summary.per_class_data [id].reachable.instances ++;
4836 job->summary.per_class_data [id].reachable.bytes += mono_object_get_size (obj);
4838 if (profiler->action_flags.heap_shot && job->dump_heap_data) {
4839 int reference_counter = 0;
4840 gpointer *reference_counter_location;
4842 WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE (job, obj, HEAP_CODE_OBJECT);
4843 #if DEBUG_HEAP_PROFILER
4844 printf ("profiler_heap_report_object_reachable: reported object %p at cursor %p\n", obj, (job->cursor - 1));
4845 #endif
4846 WRITE_HEAP_SHOT_JOB_VALUE (job, NULL);
4847 reference_counter_location = job->cursor - 1;
4849 if (mono_class_get_rank (klass)) {
4850 MonoArray *array = (MonoArray *) obj;
4851 MonoClass *element_class = mono_class_get_element_class (klass);
4852 ClassIdMappingElement *element_id = class_id_mapping_element_get (element_class);
4854 g_assert (element_id != NULL);
4855 if (element_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
4856 class_id_mapping_element_build_layout_bitmap (element_class, element_id);
4858 if (! mono_class_is_valuetype (element_class)) {
4859 int length = mono_array_length (array);
4860 int i;
4861 for (i = 0; i < length; i++) {
4862 MonoObject *array_element = mono_array_get (array, MonoObject*, i);
4863 if ((array_element != NULL) && mono_object_is_alive (array_element)) {
4864 reference_counter ++;
4865 WRITE_HEAP_SHOT_JOB_VALUE (job, array_element);
4868 } else if (element_id->data.layout.references > 0) {
4869 int length = mono_array_length (array);
4870 int array_element_size = mono_array_element_size (klass);
4871 int i;
4872 for (i = 0; i < length; i++) {
4873 gpointer array_element_address = mono_array_addr_with_size (array, array_element_size, i);
4874 reference_counter += report_object_references (array_element_address, element_id, job);
4877 } else {
4878 if (class_id->data.layout.slots == CLASS_LAYOUT_NOT_INITIALIZED) {
4879 class_id_mapping_element_build_layout_bitmap (klass, class_id);
4881 if (class_id->data.layout.references > 0) {
4882 reference_counter += report_object_references ((gpointer)(((char*)obj) + sizeof (MonoObject)), class_id, job);
4886 *reference_counter_location = GINT_TO_POINTER (reference_counter);
4887 #if DEBUG_HEAP_PROFILER
4888 printf ("profiler_heap_report_object_reachable: updated reference_counter_location %p with value %d\n", reference_counter_location, reference_counter);
4889 #endif
4893 static void
4894 profiler_heap_report_object_unreachable (ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4895 if (job != NULL) {
4896 MonoClass *klass = mono_object_get_class (obj);
4897 guint32 size = mono_object_get_size (obj);
4899 if (job->summary.capacity > 0) {
4900 ClassIdMappingElement *class_id = class_id_mapping_element_get (klass);
4901 guint32 id;
4903 if (class_id == NULL) {
4904 printf ("profiler_heap_report_object_reachable: class %p (%s.%s) has no id\n", klass, mono_class_get_namespace (klass), mono_class_get_name (klass));
4906 g_assert (class_id != NULL);
4907 id = class_id->id;
4908 g_assert (id < job->summary.capacity);
4910 job->summary.per_class_data [id].unreachable.instances ++;
4911 job->summary.per_class_data [id].unreachable.bytes += size;
4913 if (profiler->action_flags.unreachable_objects && job->dump_heap_data) {
4914 #if DEBUG_HEAP_PROFILER
4915 printf ("profiler_heap_report_object_unreachable: at job %p writing klass %p\n", job, klass);
4916 #endif
4917 WRITE_HEAP_SHOT_JOB_VALUE_WITH_CODE (job, klass, HEAP_CODE_FREE_OBJECT_CLASS);
4919 #if DEBUG_HEAP_PROFILER
4920 printf ("profiler_heap_report_object_unreachable: at job %p writing size %p\n", job, GUINT_TO_POINTER (size));
4921 #endif
4922 WRITE_HEAP_SHOT_JOB_VALUE (job, GUINT_TO_POINTER (size));
4927 static void
4928 profiler_heap_add_object (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job, MonoObject *obj) {
4929 if (heap->first_free_slot >= heap->current->end_slot) {
4930 if (heap->current->next != NULL) {
4931 heap->current = heap->current->next;
4932 } else {
4933 ProfilerHeapShotHeapBuffer *buffer = g_new (ProfilerHeapShotHeapBuffer, 1);
4934 buffer->previous = heap->last;
4935 buffer->next = NULL;
4936 buffer->start_slot = &(buffer->buffer [0]);
4937 buffer->end_slot = &(buffer->buffer [PROFILER_HEAP_SHOT_HEAP_BUFFER_SIZE]);
4938 heap->current = buffer;
4939 heap->last->next = buffer;
4940 heap->last = buffer;
4942 heap->first_free_slot = &(heap->current->buffer [0]);
4945 *(heap->first_free_slot) = obj;
4946 heap->first_free_slot ++;
4947 profiler_heap_report_object_reachable (job, obj);
4950 static MonoObject*
4951 profiler_heap_pop_object_from_end (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job, MonoObject** current_slot) {
4952 while (heap->first_free_slot != current_slot) {
4953 MonoObject* obj;
4955 if (heap->first_free_slot > heap->current->start_slot) {
4956 heap->first_free_slot --;
4957 } else {
4958 heap->current = heap->current->previous;
4959 g_assert (heap->current != NULL);
4960 heap->first_free_slot = heap->current->end_slot - 1;
4963 obj = *(heap->first_free_slot);
4965 if (mono_object_is_alive (obj)) {
4966 profiler_heap_report_object_reachable (job, obj);
4967 return obj;
4968 } else {
4969 profiler_heap_report_object_unreachable (job, obj);
4972 return NULL;
4975 static void
4976 profiler_heap_scan (ProfilerHeapShotHeapBuffers *heap, ProfilerHeapShotWriteJob *job) {
4977 ProfilerHeapShotHeapBuffer *current_buffer = heap->buffers;
4978 MonoObject** current_slot = current_buffer->start_slot;
4980 while (current_slot != heap->first_free_slot) {
4981 MonoObject *obj = *current_slot;
4982 if (mono_object_is_alive (obj)) {
4983 profiler_heap_report_object_reachable (job, obj);
4984 } else {
4985 profiler_heap_report_object_unreachable (job, obj);
4986 *current_slot = profiler_heap_pop_object_from_end (heap, job, current_slot);
4989 if (*current_slot != NULL) {
4990 current_slot ++;
4992 if (current_slot == current_buffer->end_slot) {
4993 current_buffer = current_buffer->next;
4994 g_assert (current_buffer != NULL);
4995 current_slot = current_buffer->start_slot;
5001 static inline gboolean
5002 heap_shot_write_job_should_be_created (gboolean dump_heap_data) {
5003 return dump_heap_data || profiler->action_flags.unreachable_objects || profiler->action_flags.collection_summary;
5006 static void
5007 process_gc_event (MonoProfiler *profiler, gboolean do_heap_profiling, MonoGCEvent ev) {
5008 static gboolean dump_heap_data;
5010 switch (ev) {
5011 case MONO_GC_EVENT_PRE_STOP_WORLD:
5012 // Get the lock, so we are sure nobody is flushing events during the collection,
5013 // and we can update all mappings (building the class descriptors).
5014 // This is necessary also during lock profiling (even if do_heap_profiling is FALSE).
5015 LOCK_PROFILER ();
5016 break;
5017 case MONO_GC_EVENT_POST_STOP_WORLD:
5018 if (do_heap_profiling) {
5019 dump_heap_data = dump_current_heap_snapshot ();
5020 if (heap_shot_write_job_should_be_created (dump_heap_data)) {
5021 ProfilerPerThreadData *data;
5022 // Update all mappings, so that we have built all the class descriptors.
5023 flush_all_mappings ();
5024 // Also write all event buffers, so that allocations are recorded.
5025 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
5026 write_thread_data_block (data);
5029 } else {
5030 dump_heap_data = FALSE;
5032 // Release lock...
5033 UNLOCK_PROFILER ();
5034 break;
5035 case MONO_GC_EVENT_MARK_END: {
5036 if (do_heap_profiling) {
5037 ProfilerHeapShotWriteJob *job;
5038 ProfilerPerThreadData *data;
5040 if (heap_shot_write_job_should_be_created (dump_heap_data)) {
5041 job = profiler_heap_shot_write_job_new (profiler->heap_shot_was_requested, dump_heap_data, profiler->garbage_collection_counter);
5042 profiler->heap_shot_was_requested = FALSE;
5043 MONO_PROFILER_GET_CURRENT_COUNTER (job->start_counter);
5044 MONO_PROFILER_GET_CURRENT_TIME (job->start_time);
5045 } else {
5046 job = NULL;
5049 profiler_heap_scan (&(profiler->heap), job);
5051 for (data = profiler->per_thread_data; data != NULL; data = data->next) {
5052 ProfilerHeapShotObjectBuffer *buffer;
5053 for (buffer = data->heap_shot_object_buffers; buffer != NULL; buffer = buffer->next) {
5054 MonoObject **cursor;
5055 for (cursor = buffer->first_unprocessed_slot; cursor < buffer->next_free_slot; cursor ++) {
5056 MonoObject *obj = *cursor;
5057 #if DEBUG_HEAP_PROFILER
5058 printf ("gc_event: in object buffer %p(%p-%p) cursor at %p has object %p ", buffer, &(buffer->buffer [0]), buffer->end, cursor, obj);
5059 #endif
5060 if (mono_object_is_alive (obj)) {
5061 #if DEBUG_HEAP_PROFILER
5062 printf ("(object is alive, adding to heap)\n");
5063 #endif
5064 profiler_heap_add_object (&(profiler->heap), job, obj);
5065 } else {
5066 #if DEBUG_HEAP_PROFILER
5067 printf ("(object is unreachable, reporting in job)\n");
5068 #endif
5069 profiler_heap_report_object_unreachable (job, obj);
5072 buffer->first_unprocessed_slot = cursor;
5076 if (job != NULL) {
5077 MONO_PROFILER_GET_CURRENT_COUNTER (job->end_counter);
5078 MONO_PROFILER_GET_CURRENT_TIME (job->end_time);
5080 profiler_add_heap_shot_write_job (job);
5081 profiler_free_heap_shot_write_jobs ();
5082 WRITER_EVENT_RAISE ();
5085 break;
5087 default:
5088 break;
5092 static void
5093 gc_event (MonoProfiler *profiler, MonoGCEvent ev, int generation) {
5094 ProfilerPerThreadData *data;
5095 ProfilerEventData *event;
5096 gboolean do_heap_profiling = profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary;
5097 guint32 event_value;
5099 if (ev == MONO_GC_EVENT_START) {
5100 profiler->garbage_collection_counter ++;
5103 event_value = (profiler->garbage_collection_counter << 8) | generation;
5105 if (ev == MONO_GC_EVENT_POST_STOP_WORLD) {
5106 process_gc_event (profiler, do_heap_profiling, ev);
5109 /* Check if the gc event should be recorded. */
5110 if (profiler->action_flags.report_gc_events || do_heap_profiling) {
5111 GET_PROFILER_THREAD_DATA (data);
5112 GET_NEXT_FREE_EVENT (data, event);
5113 STORE_EVENT_NUMBER_COUNTER (event, profiler, event_value, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, gc_event_code_from_profiler_event (ev), gc_event_kind_from_profiler_event (ev));
5114 COMMIT_RESERVED_EVENTS (data);
5117 if (ev != MONO_GC_EVENT_POST_STOP_WORLD) {
5118 process_gc_event (profiler, do_heap_profiling, ev);
5122 static void
5123 gc_resize (MonoProfiler *profiler, gint64 new_size) {
5124 ProfilerPerThreadData *data;
5125 ProfilerEventData *event;
5126 GET_PROFILER_THREAD_DATA (data);
5127 GET_NEXT_FREE_EVENT (data, event);
5128 profiler->garbage_collection_counter ++;
5129 STORE_EVENT_NUMBER_VALUE (event, profiler, new_size, MONO_PROFILER_EVENT_DATA_TYPE_OTHER, MONO_PROFILER_EVENT_GC_RESIZE, 0, profiler->garbage_collection_counter);
5130 COMMIT_RESERVED_EVENTS (data);
5133 static void
5134 runtime_initialized (MonoProfiler *profiler) {
5135 LOG_WRITER_THREAD ("runtime_initialized: initializing internal calls.\n");
5136 mono_add_internal_call ("Mono.Profiler.RuntimeControls::EnableProfiler", enable_profiler);
5137 mono_add_internal_call ("Mono.Profiler.RuntimeControls::DisableProfiler", disable_profiler);
5138 mono_add_internal_call ("Mono.Profiler.RuntimeControls::TakeHeapSnapshot", request_heap_snapshot);
5139 LOG_WRITER_THREAD ("runtime_initialized: initialized internal calls.\n");
5143 #define MAX_COMMAND_LENGTH (1024)
5144 static int server_socket;
5145 static int command_socket;
5147 static void
5148 write_user_response (const char *response) {
5149 LOG_USER_THREAD ("write_user_response: writing response:");
5150 LOG_USER_THREAD (response);
5151 send (command_socket, response, strlen (response), 0);
5154 static void
5155 execute_user_command (char *command) {
5156 char *line_feed;
5158 LOG_USER_THREAD ("execute_user_command: executing command:");
5159 LOG_USER_THREAD (command);
5161 /* Ignore leading and trailing '\r' */
5162 line_feed = strchr (command, '\r');
5163 if (line_feed == command) {
5164 command ++;
5165 line_feed = strchr (command, '\r');
5167 if ((line_feed != NULL) && (* (line_feed + 1) == 0)) {
5168 *line_feed = 0;
5171 if (strcmp (command, "enable") == 0) {
5172 LOG_USER_THREAD ("execute_user_command: enabling profiler");
5173 enable_profiler ();
5174 write_user_response ("DONE\n");
5175 } else if (strcmp (command, "disable") == 0) {
5176 LOG_USER_THREAD ("execute_user_command: disabling profiler");
5177 disable_profiler ();
5178 write_user_response ("DONE\n");
5179 } else if (strcmp (command, "heap-snapshot") == 0) {
5180 LOG_USER_THREAD ("execute_user_command: taking heap snapshot");
5181 profiler->heap_shot_was_requested = TRUE;
5182 WRITER_EVENT_RAISE ();
5183 write_user_response ("DONE\n");
5184 } else if (strstr (command, "heap-snapshot-counter") == 0) {
5185 char *equals;
5186 LOG_USER_THREAD ("execute_user_command: changing heap counter");
5187 equals = strstr (command, "=");
5188 if (equals != NULL) {
5189 equals ++;
5190 if (strcmp (equals, "all") == 0) {
5191 LOG_USER_THREAD ("execute_user_command: heap counter is \"all\"");
5192 profiler->garbage_collection_counter = -1;
5193 } else if (strcmp (equals, "none") == 0) {
5194 LOG_USER_THREAD ("execute_user_command: heap counter is \"none\"");
5195 profiler->garbage_collection_counter = 0;
5196 } else {
5197 profiler->garbage_collection_counter = atoi (equals);
5199 write_user_response ("DONE\n");
5200 } else {
5201 write_user_response ("ERROR\n");
5203 profiler->heap_shot_was_requested = TRUE;
5204 } else {
5205 LOG_USER_THREAD ("execute_user_command: command not recognized");
5206 write_user_response ("ERROR\n");
5210 static gboolean
5211 process_user_commands (void) {
5212 char *command_buffer = malloc (MAX_COMMAND_LENGTH);
5213 int command_buffer_current_index = 0;
5214 gboolean loop = TRUE;
5215 gboolean result = TRUE;
5217 while (loop) {
5218 int unprocessed_characters;
5220 LOG_USER_THREAD ("process_user_commands: reading from socket...");
5221 unprocessed_characters = recv (command_socket, command_buffer + command_buffer_current_index, MAX_COMMAND_LENGTH - command_buffer_current_index, 0);
5223 if (unprocessed_characters > 0) {
5224 char *command_end = NULL;
5226 LOG_USER_THREAD ("process_user_commands: received characters.");
5228 do {
5229 if (command_end != NULL) {
5230 *command_end = 0;
5231 execute_user_command (command_buffer);
5232 unprocessed_characters -= (((command_end - command_buffer) - command_buffer_current_index) + 1);
5234 if (unprocessed_characters > 0) {
5235 memmove (command_buffer, command_end + 1, unprocessed_characters);
5237 command_buffer_current_index = 0;
5240 command_end = memchr (command_buffer, '\n', command_buffer_current_index + unprocessed_characters);
5241 } while (command_end != NULL);
5243 command_buffer_current_index += unprocessed_characters;
5245 } else if (unprocessed_characters == 0) {
5246 LOG_USER_THREAD ("process_user_commands: received no character.");
5247 result = TRUE;
5248 loop = FALSE;
5249 } else {
5250 LOG_USER_THREAD ("process_user_commands: received error.");
5251 result = FALSE;
5252 loop = FALSE;
5256 free (command_buffer);
5257 return result;
5260 static guint32
5261 user_thread (gpointer nothing) {
5262 struct sockaddr_in server_address;
5264 server_socket = -1;
5265 command_socket = -1;
5267 LOG_USER_THREAD ("user_thread: starting up...");
5269 server_socket = socket (AF_INET, SOCK_STREAM, 0);
5270 if (server_socket < 0) {
5271 LOG_USER_THREAD ("user_thread: error creating socket.");
5272 return 0;
5274 memset (& server_address, 0, sizeof (server_address));
5276 server_address.sin_family = AF_INET;
5277 server_address.sin_addr.s_addr = INADDR_ANY;
5278 if ((profiler->command_port < 1023) || (profiler->command_port > 65535)) {
5279 LOG_USER_THREAD ("user_thread: invalid port number.");
5280 return 0;
5282 server_address.sin_port = htons (profiler->command_port);
5284 if (bind (server_socket, (struct sockaddr *) &server_address, sizeof(server_address)) < 0) {
5285 LOG_USER_THREAD ("user_thread: error binding socket.");
5286 close (server_socket);
5287 return 0;
5290 LOG_USER_THREAD ("user_thread: listening...\n");
5291 listen (server_socket, 1);
5292 command_socket = accept (server_socket, NULL, NULL);
5293 if (command_socket < 0) {
5294 LOG_USER_THREAD ("user_thread: error accepting socket.");
5295 close (server_socket);
5296 return 0;
5299 LOG_USER_THREAD ("user_thread: processing user commands...");
5300 process_user_commands ();
5302 LOG_USER_THREAD ("user_thread: exiting cleanly.");
5303 close (server_socket);
5304 close (command_socket);
5305 return 0;
5309 /* called at the end of the program */
5310 static void
5311 profiler_shutdown (MonoProfiler *prof)
5313 ProfilerPerThreadData* current_thread_data;
5314 ProfilerPerThreadData* next_thread_data;
5316 LOG_WRITER_THREAD ("profiler_shutdown: zeroing relevant flags");
5317 mono_profiler_set_events (0);
5318 /* During shutdown searching for MonoJitInfo is not possible... */
5319 if (profiler->statistical_call_chain_strategy == MONO_PROFILER_CALL_CHAIN_MANAGED) {
5320 mono_profiler_install_statistical_call_chain (NULL, 0, MONO_PROFILER_CALL_CHAIN_NONE);
5322 //profiler->flags = 0;
5323 //profiler->action_flags.unreachable_objects = FALSE;
5324 //profiler->action_flags.heap_shot = FALSE;
5326 LOG_WRITER_THREAD ("profiler_shutdown: asking stats thread to exit");
5327 profiler->terminate_writer_thread = TRUE;
5328 WRITER_EVENT_RAISE ();
5329 LOG_WRITER_THREAD ("profiler_shutdown: waiting for stats thread to exit");
5330 WAIT_WRITER_THREAD ();
5331 LOG_WRITER_THREAD ("profiler_shutdown: stats thread should be dead now");
5332 WRITER_EVENT_DESTROY ();
5334 LOCK_PROFILER ();
5335 flush_everything ();
5336 MONO_PROFILER_GET_CURRENT_TIME (profiler->end_time);
5337 MONO_PROFILER_GET_CURRENT_COUNTER (profiler->end_counter);
5338 write_end_block ();
5339 FLUSH_FILE ();
5340 CLOSE_FILE();
5341 mono_profiler_install_code_chunk_new (NULL);
5342 mono_profiler_install_code_chunk_destroy (NULL);
5343 mono_profiler_install_code_buffer_new (NULL);
5344 profiler_code_chunks_cleanup (& (profiler->code_chunks));
5345 UNLOCK_PROFILER ();
5347 g_free (profiler->file_name);
5348 if (profiler->file_name_suffix != NULL) {
5349 g_free (profiler->file_name_suffix);
5352 method_id_mapping_destroy (profiler->methods);
5353 class_id_mapping_destroy (profiler->classes);
5354 g_hash_table_destroy (profiler->loaded_assemblies);
5355 g_hash_table_destroy (profiler->loaded_modules);
5356 g_hash_table_destroy (profiler->loaded_appdomains);
5358 FREE_PROFILER_THREAD_DATA ();
5360 for (current_thread_data = profiler->per_thread_data; current_thread_data != NULL; current_thread_data = next_thread_data) {
5361 next_thread_data = current_thread_data->next;
5362 profiler_per_thread_data_destroy (current_thread_data);
5364 if (profiler->statistical_data != NULL) {
5365 profiler_statistical_data_destroy (profiler->statistical_data);
5367 if (profiler->statistical_data_ready != NULL) {
5368 profiler_statistical_data_destroy (profiler->statistical_data_ready);
5370 if (profiler->statistical_data_second_buffer != NULL) {
5371 profiler_statistical_data_destroy (profiler->statistical_data_second_buffer);
5373 if (profiler->executable_regions != NULL) {
5374 profiler_executable_memory_regions_destroy (profiler->executable_regions);
5377 profiler_heap_buffers_free (&(profiler->heap));
5379 profiler_free_write_buffers ();
5380 profiler_destroy_heap_shot_write_jobs ();
5382 DELETE_PROFILER_MUTEX ();
5384 #if (HAS_OPROFILE)
5385 if (profiler->action_flags.oprofile) {
5386 op_close_agent ();
5388 #endif
5390 g_free (profiler);
5391 profiler = NULL;
5394 #define FAIL_ARGUMENT_CHECK(message) do {\
5395 failure_message = (message);\
5396 goto failure_handling;\
5397 } while (0)
5398 #define FAIL_PARSING_VALUED_ARGUMENT FAIL_ARGUMENT_CHECK("cannot parse valued argument %s")
5399 #define FAIL_PARSING_FLAG_ARGUMENT FAIL_ARGUMENT_CHECK("cannot parse flag argument %s")
5400 #define CHECK_CONDITION(condition,message) do {\
5401 gboolean result = (condition);\
5402 if (result) {\
5403 FAIL_ARGUMENT_CHECK (message);\
5405 } while (0)
5406 #define FAIL_IF_HAS_MINUS CHECK_CONDITION(has_minus,"minus ('-') modifier not allowed for argument %s")
5407 #define TRUE_IF_NOT_MINUS ((!has_minus)?TRUE:FALSE)
5409 #define DEFAULT_ARGUMENTS "s"
5410 static void
5411 setup_user_options (const char *arguments) {
5412 gchar **arguments_array, **current_argument;
5413 detect_fast_timer ();
5415 profiler->file_name = NULL;
5416 profiler->file_name_suffix = NULL;
5417 profiler->per_thread_buffer_size = 10000;
5418 profiler->statistical_buffer_size = 10000;
5419 profiler->statistical_call_chain_depth = 0;
5420 profiler->statistical_call_chain_strategy = MONO_PROFILER_CALL_CHAIN_NATIVE;
5421 profiler->write_buffer_size = 1024;
5422 profiler->dump_next_heap_snapshots = 0;
5423 profiler->heap_shot_was_requested = FALSE;
5424 profiler->flags = MONO_PROFILE_APPDOMAIN_EVENTS|
5425 MONO_PROFILE_ASSEMBLY_EVENTS|
5426 MONO_PROFILE_MODULE_EVENTS|
5427 MONO_PROFILE_CLASS_EVENTS|
5428 MONO_PROFILE_METHOD_EVENTS|
5429 MONO_PROFILE_JIT_COMPILATION;
5430 profiler->profiler_enabled = TRUE;
5432 if (arguments == NULL) {
5433 arguments = DEFAULT_ARGUMENTS;
5434 } else if (strstr (arguments, ":")) {
5435 arguments = strstr (arguments, ":") + 1;
5436 if (arguments [0] == 0) {
5437 arguments = DEFAULT_ARGUMENTS;
5441 arguments_array = g_strsplit (arguments, ",", -1);
5443 for (current_argument = arguments_array; ((current_argument != NULL) && (current_argument [0] != 0)); current_argument ++) {
5444 char *argument = *current_argument;
5445 char *equals = strstr (argument, "=");
5446 const char *failure_message = NULL;
5447 gboolean has_plus;
5448 gboolean has_minus;
5450 if (*argument == '+') {
5451 has_plus = TRUE;
5452 has_minus = FALSE;
5453 argument ++;
5454 } else if (*argument == '-') {
5455 has_plus = FALSE;
5456 has_minus = TRUE;
5457 argument ++;
5458 } else {
5459 has_plus = FALSE;
5460 has_minus = FALSE;
5463 if (equals != NULL) {
5464 int equals_position = equals - argument;
5466 if (! (strncmp (argument, "per-thread-buffer-size", equals_position) && strncmp (argument, "tbs", equals_position))) {
5467 int value = atoi (equals + 1);
5468 FAIL_IF_HAS_MINUS;
5469 if (value > 0) {
5470 profiler->per_thread_buffer_size = value;
5472 } else if (! (strncmp (argument, "statistical", equals_position) && strncmp (argument, "stat", equals_position) && strncmp (argument, "s", equals_position))) {
5473 int value = atoi (equals + 1);
5474 FAIL_IF_HAS_MINUS;
5475 if (value > 0) {
5476 if (value > MONO_PROFILER_MAX_STAT_CALL_CHAIN_DEPTH) {
5477 value = MONO_PROFILER_MAX_STAT_CALL_CHAIN_DEPTH;
5479 profiler->statistical_call_chain_depth = value;
5480 profiler->flags |= MONO_PROFILE_STATISTICAL;
5482 } else if (! (strncmp (argument, "call-chain-strategy", equals_position) && strncmp (argument, "ccs", equals_position))) {
5483 char *parameter = equals + 1;
5484 FAIL_IF_HAS_MINUS;
5485 if (! strcmp (parameter, "native")) {
5486 profiler->statistical_call_chain_strategy = MONO_PROFILER_CALL_CHAIN_NATIVE;
5487 } else if (! strcmp (parameter, "glibc")) {
5488 profiler->statistical_call_chain_strategy = MONO_PROFILER_CALL_CHAIN_GLIBC;
5489 } else if (! strcmp (parameter, "managed")) {
5490 profiler->statistical_call_chain_strategy = MONO_PROFILER_CALL_CHAIN_MANAGED;
5491 } else {
5492 failure_message = "invalid call chain strategy in argument %s";
5493 goto failure_handling;
5495 } else if (! (strncmp (argument, "statistical-thread-buffer-size", equals_position) && strncmp (argument, "sbs", equals_position))) {
5496 int value = atoi (equals + 1);
5497 FAIL_IF_HAS_MINUS;
5498 if (value > 0) {
5499 profiler->statistical_buffer_size = value;
5501 } else if (! (strncmp (argument, "write-buffer-size", equals_position) && strncmp (argument, "wbs", equals_position))) {
5502 int value = atoi (equals + 1);
5503 FAIL_IF_HAS_MINUS;
5504 if (value > 0) {
5505 profiler->write_buffer_size = value;
5507 } else if (! (strncmp (argument, "output", equals_position) && strncmp (argument, "out", equals_position) && strncmp (argument, "o", equals_position) && strncmp (argument, "O", equals_position))) {
5508 FAIL_IF_HAS_MINUS;
5509 if (strlen (equals + 1) > 0) {
5510 profiler->file_name = g_strdup (equals + 1);
5512 } else if (! (strncmp (argument, "output-suffix", equals_position) && strncmp (argument, "suffix", equals_position) && strncmp (argument, "os", equals_position) && strncmp (argument, "OS", equals_position))) {
5513 FAIL_IF_HAS_MINUS;
5514 if (strlen (equals + 1) > 0) {
5515 profiler->file_name_suffix = g_strdup (equals + 1);
5517 } else if (! (strncmp (argument, "heap-shot", equals_position) && strncmp (argument, "heap", equals_position) && strncmp (argument, "h", equals_position))) {
5518 char *parameter = equals + 1;
5519 if (! strcmp (parameter, "all")) {
5520 profiler->dump_next_heap_snapshots = -1;
5521 } else {
5522 profiler->dump_next_heap_snapshots = atoi (parameter);
5524 FAIL_IF_HAS_MINUS;
5525 if (! has_plus) {
5526 profiler->action_flags.save_allocation_caller = TRUE;
5527 profiler->action_flags.save_allocation_stack = TRUE;
5528 profiler->action_flags.allocations_carry_id = TRUE_IF_NOT_MINUS;
5530 profiler->action_flags.heap_shot = TRUE_IF_NOT_MINUS;
5531 } else if (! (strncmp (argument, "gc-dumps", equals_position) && strncmp (argument, "gc-d", equals_position) && strncmp (argument, "gcd", equals_position))) {
5532 FAIL_IF_HAS_MINUS;
5533 if (strlen (equals + 1) > 0) {
5534 profiler->dump_next_heap_snapshots = atoi (equals + 1);
5536 } else if (! (strncmp (argument, "command-port", equals_position) && strncmp (argument, "cp", equals_position))) {
5537 FAIL_IF_HAS_MINUS;
5538 if (strlen (equals + 1) > 0) {
5539 profiler->command_port = atoi (equals + 1);
5541 } else {
5542 FAIL_PARSING_VALUED_ARGUMENT;
5544 } else {
5545 if (! (strcmp (argument, "jit") && strcmp (argument, "j"))) {
5546 profiler->action_flags.jit_time = TRUE_IF_NOT_MINUS;
5547 } else if (! (strcmp (argument, "allocations") && strcmp (argument, "alloc") && strcmp (argument, "a"))) {
5548 FAIL_IF_HAS_MINUS;
5549 if (! has_plus) {
5550 profiler->action_flags.save_allocation_caller = TRUE;
5551 profiler->action_flags.save_allocation_stack = TRUE;
5553 if (! has_minus) {
5554 profiler->flags |= MONO_PROFILE_ALLOCATIONS;
5555 } else {
5556 profiler->flags &= ~MONO_PROFILE_ALLOCATIONS;
5558 } else if (! (strcmp (argument, "monitor") && strcmp (argument, "locks") && strcmp (argument, "lock"))) {
5559 FAIL_IF_HAS_MINUS;
5560 profiler->action_flags.track_stack = TRUE;
5561 profiler->flags |= MONO_PROFILE_MONITOR_EVENTS;
5562 profiler->flags |= MONO_PROFILE_GC;
5563 } else if (! (strcmp (argument, "gc") && strcmp (argument, "g"))) {
5564 FAIL_IF_HAS_MINUS;
5565 profiler->action_flags.report_gc_events = TRUE;
5566 profiler->flags |= MONO_PROFILE_GC;
5567 } else if (! (strcmp (argument, "allocations-summary") && strcmp (argument, "as"))) {
5568 profiler->action_flags.collection_summary = TRUE_IF_NOT_MINUS;
5569 } else if (! (strcmp (argument, "heap-shot") && strcmp (argument, "heap") && strcmp (argument, "h"))) {
5570 FAIL_IF_HAS_MINUS;
5571 if (! has_plus) {
5572 profiler->action_flags.save_allocation_caller = TRUE;
5573 profiler->action_flags.save_allocation_stack = TRUE;
5574 profiler->action_flags.allocations_carry_id = TRUE_IF_NOT_MINUS;
5576 profiler->action_flags.heap_shot = TRUE_IF_NOT_MINUS;
5577 } else if (! (strcmp (argument, "unreachable") && strcmp (argument, "free") && strcmp (argument, "f"))) {
5578 profiler->action_flags.unreachable_objects = TRUE_IF_NOT_MINUS;
5579 } else if (! (strcmp (argument, "threads") && strcmp (argument, "t"))) {
5580 if (! has_minus) {
5581 profiler->flags |= MONO_PROFILE_THREADS;
5582 } else {
5583 profiler->flags &= ~MONO_PROFILE_THREADS;
5585 } else if (! (strcmp (argument, "enter-leave") && strcmp (argument, "calls") && strcmp (argument, "c"))) {
5586 profiler->action_flags.track_calls = TRUE_IF_NOT_MINUS;
5587 } else if (! (strcmp (argument, "statistical") && strcmp (argument, "stat") && strcmp (argument, "s"))) {
5588 if (! has_minus) {
5589 profiler->flags |= MONO_PROFILE_STATISTICAL;
5590 } else {
5591 profiler->flags &= ~MONO_PROFILE_STATISTICAL;
5593 } else if (! (strcmp (argument, "save-allocation-caller") && strcmp (argument, "sac"))) {
5594 profiler->action_flags.save_allocation_caller = TRUE_IF_NOT_MINUS;
5595 } else if (! (strcmp (argument, "save-allocation-stack") && strcmp (argument, "sas"))) {
5596 profiler->action_flags.save_allocation_stack = TRUE_IF_NOT_MINUS;
5597 } else if (! (strcmp (argument, "allocations-carry-id") && strcmp (argument, "aci"))) {
5598 profiler->action_flags.allocations_carry_id = TRUE_IF_NOT_MINUS;
5599 } else if (! (strcmp (argument, "start-enabled") && strcmp (argument, "se"))) {
5600 profiler->profiler_enabled = TRUE_IF_NOT_MINUS;
5601 } else if (! (strcmp (argument, "start-disabled") && strcmp (argument, "sd"))) {
5602 profiler->profiler_enabled = ! TRUE_IF_NOT_MINUS;
5603 } else if (! (strcmp (argument, "force-accurate-timer") && strcmp (argument, "fac"))) {
5604 use_fast_timer = TRUE_IF_NOT_MINUS;
5605 #if (HAS_OPROFILE)
5606 } else if (! (strcmp (argument, "oprofile") && strcmp (argument, "oprof"))) {
5607 profiler->flags |= MONO_PROFILE_JIT_COMPILATION;
5608 profiler->action_flags.oprofile = TRUE;
5609 if (op_open_agent ()) {
5610 FAIL_ARGUMENT_CHECK ("problem calling op_open_agent");
5612 #endif
5613 } else if (strcmp (argument, "logging")) {
5614 FAIL_PARSING_FLAG_ARGUMENT;
5618 failure_handling:
5619 if (failure_message != NULL) {
5620 g_warning (failure_message, argument);
5621 failure_message = NULL;
5625 g_free (arguments_array);
5627 /* Ensure that the profiler flags needed to support required action flags are active */
5628 if (profiler->action_flags.jit_time) {
5629 profiler->flags |= MONO_PROFILE_JIT_COMPILATION;
5631 if (profiler->action_flags.save_allocation_caller || profiler->action_flags.save_allocation_stack || profiler->action_flags.allocations_carry_id) {
5632 profiler->flags |= MONO_PROFILE_ALLOCATIONS;
5634 if (profiler->action_flags.collection_summary || profiler->action_flags.heap_shot || profiler->action_flags.unreachable_objects) {
5635 profiler->flags |= MONO_PROFILE_ALLOCATIONS;
5636 profiler->action_flags.report_gc_events = TRUE;
5638 if (profiler->action_flags.track_calls) {
5639 profiler->flags |= MONO_PROFILE_ENTER_LEAVE;
5640 profiler->action_flags.jit_time = TRUE;
5642 if (profiler->action_flags.save_allocation_caller || profiler->action_flags.save_allocation_stack) {
5643 profiler->action_flags.track_stack = TRUE;
5644 profiler->flags |= MONO_PROFILE_ENTER_LEAVE;
5646 if (profiler->action_flags.track_stack) {
5647 profiler->flags |= MONO_PROFILE_ENTER_LEAVE;
5650 /* Tracking call stacks is useless if we already emit all enter-exit events... */
5651 if (profiler->action_flags.track_calls) {
5652 profiler->action_flags.track_stack = FALSE;
5653 profiler->action_flags.save_allocation_caller = FALSE;
5654 profiler->action_flags.save_allocation_stack = FALSE;
5657 /* Without JIT events the stat profiler will not find method IDs... */
5658 if (profiler->flags | MONO_PROFILE_STATISTICAL) {
5659 profiler->flags |= MONO_PROFILE_JIT_COMPILATION;
5661 /* Profiling allocations without knowing which gc we are doing is not nice... */
5662 if (profiler->flags | MONO_PROFILE_ALLOCATIONS) {
5663 profiler->flags |= MONO_PROFILE_GC;
5664 profiler->action_flags.report_gc_events = TRUE;
5668 if (profiler->file_name == NULL) {
5669 char *program_name = g_get_prgname ();
5671 if (program_name != NULL) {
5672 char *name_buffer = g_strdup (program_name);
5673 char *name_start = name_buffer;
5674 char *cursor;
5676 /* Jump over the last '/' */
5677 cursor = strrchr (name_buffer, '/');
5678 if (cursor == NULL) {
5679 cursor = name_buffer;
5680 } else {
5681 cursor ++;
5683 name_start = cursor;
5685 /* Then jump over the last '\\' */
5686 cursor = strrchr (name_start, '\\');
5687 if (cursor == NULL) {
5688 cursor = name_start;
5689 } else {
5690 cursor ++;
5692 name_start = cursor;
5694 /* Finally, find the last '.' */
5695 cursor = strrchr (name_start, '.');
5696 if (cursor != NULL) {
5697 *cursor = 0;
5700 if (profiler->file_name_suffix == NULL) {
5701 profiler->file_name = g_strdup_printf ("%s.mprof", name_start);
5702 } else {
5703 profiler->file_name = g_strdup_printf ("%s-%s.mprof", name_start, profiler->file_name_suffix);
5705 g_free (name_buffer);
5706 } else {
5707 profiler->file_name = g_strdup_printf ("%s.mprof", "profiler-log");
5712 static guint32
5713 data_writer_thread (gpointer nothing) {
5714 for (;;) {
5715 ProfilerStatisticalData *statistical_data;
5716 gboolean done;
5718 LOG_WRITER_THREAD ("data_writer_thread: going to sleep");
5719 WRITER_EVENT_WAIT ();
5720 LOG_WRITER_THREAD ("data_writer_thread: just woke up");
5722 if (profiler->heap_shot_was_requested) {
5723 MonoDomain * root_domain = mono_get_root_domain ();
5725 if (root_domain != NULL) {
5726 MonoThread *this_thread;
5727 LOG_WRITER_THREAD ("data_writer_thread: attaching thread");
5728 this_thread = mono_thread_attach (root_domain);
5729 LOG_WRITER_THREAD ("data_writer_thread: starting requested collection");
5730 mono_gc_collect (mono_gc_max_generation ());
5731 LOG_WRITER_THREAD ("data_writer_thread: requested collection done");
5732 LOG_WRITER_THREAD ("data_writer_thread: detaching thread");
5733 mono_thread_detach (this_thread);
5734 this_thread = NULL;
5735 LOG_WRITER_THREAD ("data_writer_thread: collection sequence completed");
5736 } else {
5737 LOG_WRITER_THREAD ("data_writer_thread: cannot get root domain, collection sequence skipped");
5742 statistical_data = profiler->statistical_data_ready;
5743 done = (statistical_data == NULL) && (profiler->heap_shot_write_jobs == NULL);
5745 if (!done) {
5746 LOG_WRITER_THREAD ("data_writer_thread: acquiring lock and writing data");
5747 LOCK_PROFILER ();
5749 // This makes sure that all method ids are in place
5750 LOG_WRITER_THREAD ("data_writer_thread: writing mapping...");
5751 flush_all_mappings ();
5752 LOG_WRITER_THREAD ("data_writer_thread: wrote mapping");
5754 if (statistical_data != NULL) {
5755 LOG_WRITER_THREAD ("data_writer_thread: writing statistical data...");
5756 profiler->statistical_data_ready = NULL;
5757 write_statistical_data_block (statistical_data);
5758 statistical_data->next_free_index = 0;
5759 statistical_data->first_unwritten_index = 0;
5760 profiler->statistical_data_second_buffer = statistical_data;
5761 LOG_WRITER_THREAD ("data_writer_thread: wrote statistical data");
5764 profiler_process_heap_shot_write_jobs ();
5766 UNLOCK_PROFILER ();
5767 LOG_WRITER_THREAD ("data_writer_thread: wrote data and released lock");
5768 } else {
5769 LOG_WRITER_THREAD ("data_writer_thread: acquiring lock and flushing buffers");
5770 LOCK_PROFILER ();
5771 LOG_WRITER_THREAD ("data_writer_thread: lock acquired, flushing buffers");
5772 flush_everything ();
5773 UNLOCK_PROFILER ();
5774 LOG_WRITER_THREAD ("data_writer_thread: flushed buffers and released lock");
5777 if (profiler->terminate_writer_thread) {
5778 LOG_WRITER_THREAD ("data_writer_thread: exiting thread");
5779 CLEANUP_WRITER_THREAD ();
5780 EXIT_THREAD ();
5783 return 0;
5786 void
5787 mono_profiler_startup (const char *desc);
5789 /* the entry point (mono_profiler_load?) */
5790 void
5791 mono_profiler_startup (const char *desc)
5793 profiler = g_new0 (MonoProfiler, 1);
5795 setup_user_options ((desc != NULL) ? desc : DEFAULT_ARGUMENTS);
5797 INITIALIZE_PROFILER_MUTEX ();
5798 MONO_PROFILER_GET_CURRENT_TIME (profiler->start_time);
5799 MONO_PROFILER_GET_CURRENT_COUNTER (profiler->start_counter);
5800 profiler->last_header_counter = 0;
5802 profiler->methods = method_id_mapping_new ();
5803 profiler->classes = class_id_mapping_new ();
5804 profiler->loaded_element_next_free_id = 1;
5805 profiler->loaded_assemblies = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
5806 profiler->loaded_modules = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
5807 profiler->loaded_appdomains = g_hash_table_new_full (g_direct_hash, NULL, NULL, loaded_element_destroy);
5809 profiler->statistical_data = profiler_statistical_data_new (profiler);
5810 profiler->statistical_data_second_buffer = profiler_statistical_data_new (profiler);
5812 profiler->write_buffers = g_malloc (sizeof (ProfilerFileWriteBuffer) + PROFILER_FILE_WRITE_BUFFER_SIZE);
5813 profiler->write_buffers->next = NULL;
5814 profiler->current_write_buffer = profiler->write_buffers;
5815 profiler->current_write_position = 0;
5816 profiler->full_write_buffers = 0;
5817 profiler_code_chunks_initialize (& (profiler->code_chunks));
5819 profiler->executable_regions = profiler_executable_memory_regions_new (1, 1);
5821 profiler->executable_files.table = g_hash_table_new (g_str_hash, g_str_equal);
5822 profiler->executable_files.new_files = NULL;
5824 profiler->heap_shot_write_jobs = NULL;
5825 if (profiler->action_flags.unreachable_objects || profiler->action_flags.heap_shot || profiler->action_flags.collection_summary) {
5826 profiler_heap_buffers_setup (&(profiler->heap));
5827 } else {
5828 profiler_heap_buffers_clear (&(profiler->heap));
5830 profiler->garbage_collection_counter = 0;
5832 WRITER_EVENT_INIT ();
5833 LOG_WRITER_THREAD ("mono_profiler_startup: creating writer thread");
5834 CREATE_WRITER_THREAD (data_writer_thread);
5835 LOG_WRITER_THREAD ("mono_profiler_startup: created writer thread");
5836 if ((profiler->command_port >= 1024) && (profiler->command_port <= 65535)) {
5837 LOG_USER_THREAD ("mono_profiler_startup: creating user thread");
5838 CREATE_USER_THREAD (user_thread);
5839 LOG_USER_THREAD ("mono_profiler_startup: created user thread");
5840 } else {
5841 LOG_USER_THREAD ("mono_profiler_startup: skipping user thread creation");
5844 ALLOCATE_PROFILER_THREAD_DATA ();
5846 OPEN_FILE ();
5848 write_intro_block ();
5849 write_directives_block (TRUE);
5851 mono_profiler_install (profiler, profiler_shutdown);
5853 mono_profiler_install_appdomain (appdomain_start_load, appdomain_end_load,
5854 appdomain_start_unload, appdomain_end_unload);
5855 mono_profiler_install_assembly (assembly_start_load, assembly_end_load,
5856 assembly_start_unload, assembly_end_unload);
5857 mono_profiler_install_module (module_start_load, module_end_load,
5858 module_start_unload, module_end_unload);
5859 mono_profiler_install_class (class_start_load, class_end_load,
5860 class_start_unload, class_end_unload);
5861 mono_profiler_install_jit_compile (method_start_jit, method_end_jit);
5862 mono_profiler_install_enter_leave (method_enter, method_leave);
5863 mono_profiler_install_method_free (method_free);
5864 mono_profiler_install_thread (thread_start, thread_end);
5865 mono_profiler_install_allocation (object_allocated);
5866 mono_profiler_install_monitor (monitor_event);
5867 mono_profiler_install_statistical (statistical_hit);
5868 mono_profiler_install_statistical_call_chain (statistical_call_chain, profiler->statistical_call_chain_depth, profiler->statistical_call_chain_strategy);
5869 mono_profiler_install_gc (gc_event, gc_resize);
5870 mono_profiler_install_runtime_initialized (runtime_initialized);
5871 #if (HAS_OPROFILE)
5872 mono_profiler_install_jit_end (method_jit_result);
5873 #endif
5874 if (profiler->flags | MONO_PROFILE_STATISTICAL) {
5875 mono_profiler_install_code_chunk_new (profiler_code_chunk_new_callback);
5876 mono_profiler_install_code_chunk_destroy (profiler_code_chunk_destroy_callback);
5877 mono_profiler_install_code_buffer_new (profiler_code_buffer_new_callback);
5880 mono_profiler_set_events (profiler->flags);