Fix comments in src/cygw32.h.
[emacs.git] / src / profiler.c
blob51580710f28626c3d5a181a050d62a0490e3b7eb
1 /* Profiler implementation.
3 Copyright (C) 2012 Free Software Foundation, Inc.
5 This file is part of GNU Emacs.
7 GNU Emacs is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
12 GNU Emacs is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
20 #include <config.h>
21 #include "lisp.h"
22 #include "syssignal.h"
23 #include "systime.h"
25 /* Return A + B, but return the maximum fixnum if the result would overflow.
26 Assume A and B are nonnegative and in fixnum range. */
28 static EMACS_INT
29 saturated_add (EMACS_INT a, EMACS_INT b)
31 return min (a + b, MOST_POSITIVE_FIXNUM);
34 /* Logs. */
36 typedef struct Lisp_Hash_Table log_t;
38 static Lisp_Object
39 make_log (int heap_size, int max_stack_depth)
41 /* We use a standard Elisp hash-table object, but we use it in
42 a special way. This is OK as long as the object is not exposed
43 to Elisp, i.e. until it is returned by *-profiler-log, after which
44 it can't be used any more. */
45 Lisp_Object log = make_hash_table (Qequal, make_number (heap_size),
46 make_float (DEFAULT_REHASH_SIZE),
47 make_float (DEFAULT_REHASH_THRESHOLD),
48 Qnil, Qnil, Qnil);
49 struct Lisp_Hash_Table *h = XHASH_TABLE (log);
51 /* What is special about our hash-tables is that the keys are pre-filled
52 with the vectors we'll put in them. */
53 int i = ASIZE (h->key_and_value) / 2;
54 while (0 < i)
55 set_hash_key_slot (h, --i,
56 Fmake_vector (make_number (max_stack_depth), Qnil));
57 return log;
60 /* Evict the least used half of the hash_table.
62 When the table is full, we have to evict someone.
63 The easiest and most efficient is to evict the value we're about to add
64 (i.e. once the table is full, stop sampling).
66 We could also pick the element with the lowest count and evict it,
67 but finding it is O(N) and for that amount of work we get very
68 little in return: for the next sample, this latest sample will have
69 count==1 and will hence be a prime candidate for eviction :-(
71 So instead, we take O(N) time to eliminate more or less half of the
72 entries (the half with the lowest counts). So we get an amortized
73 cost of O(1) and we get O(N) time for a new entry to grow larger
74 than the other least counts before a new round of eviction. */
76 static EMACS_INT approximate_median (log_t *log,
77 ptrdiff_t start, ptrdiff_t size)
79 eassert (size > 0);
80 if (size < 2)
81 return XINT (HASH_VALUE (log, start));
82 if (size < 3)
83 /* Not an actual median, but better for our application than
84 choosing either of the two numbers. */
85 return ((XINT (HASH_VALUE (log, start))
86 + XINT (HASH_VALUE (log, start + 1)))
87 / 2);
88 else
90 ptrdiff_t newsize = size / 3;
91 ptrdiff_t start2 = start + newsize;
92 EMACS_INT i1 = approximate_median (log, start, newsize);
93 EMACS_INT i2 = approximate_median (log, start2, newsize);
94 EMACS_INT i3 = approximate_median (log, start2 + newsize,
95 size - 2 * newsize);
96 return (i1 < i2
97 ? (i2 < i3 ? i2 : (i1 < i3 ? i3 : i1))
98 : (i1 < i3 ? i1 : (i2 < i3 ? i3 : i2)));
102 static void evict_lower_half (log_t *log)
104 ptrdiff_t size = ASIZE (log->key_and_value) / 2;
105 EMACS_INT median = approximate_median (log, 0, size);
106 ptrdiff_t i;
108 for (i = 0; i < size; i++)
109 /* Evict not only values smaller but also values equal to the median,
110 so as to make sure we evict something no matter what. */
111 if (XINT (HASH_VALUE (log, i)) <= median)
113 Lisp_Object key = HASH_KEY (log, i);
114 { /* FIXME: we could make this more efficient. */
115 Lisp_Object tmp;
116 XSET_HASH_TABLE (tmp, log); /* FIXME: Use make_lisp_ptr. */
117 Fremhash (key, tmp);
119 eassert (EQ (log->next_free, make_number (i)));
121 int j;
122 eassert (VECTORP (key));
123 for (j = 0; j < ASIZE (key); j++)
124 ASET (key, j, Qnil);
126 set_hash_key_slot (log, i, key);
130 /* Record the current backtrace in LOG. COUNT is the weight of this
131 current backtrace: interrupt counts for CPU, and the allocation
132 size for memory. */
134 static void
135 record_backtrace (log_t *log, EMACS_INT count)
137 struct backtrace *backlist = backtrace_list;
138 Lisp_Object backtrace;
139 ptrdiff_t index, i = 0;
140 ptrdiff_t asize;
142 if (!INTEGERP (log->next_free))
143 /* FIXME: transfer the evicted counts to a special entry rather
144 than dropping them on the floor. */
145 evict_lower_half (log);
146 index = XINT (log->next_free);
148 /* Get a "working memory" vector. */
149 backtrace = HASH_KEY (log, index);
150 asize = ASIZE (backtrace);
152 /* Copy the backtrace contents into working memory. */
153 for (; i < asize && backlist; i++, backlist = backlist->next)
154 /* FIXME: For closures we should ignore the environment. */
155 ASET (backtrace, i, backlist->function);
157 /* Make sure that unused space of working memory is filled with nil. */
158 for (; i < asize; i++)
159 ASET (backtrace, i, Qnil);
161 { /* We basically do a `gethash+puthash' here, except that we have to be
162 careful to avoid memory allocation since we're in a signal
163 handler, and we optimize the code to try and avoid computing the
164 hash+lookup twice. See fns.c:Fputhash for reference. */
165 EMACS_UINT hash;
166 ptrdiff_t j = hash_lookup (log, backtrace, &hash);
167 if (j >= 0)
169 EMACS_INT old_val = XINT (HASH_VALUE (log, j));
170 EMACS_INT new_val = saturated_add (old_val, count);
171 set_hash_value_slot (log, j, make_number (new_val));
173 else
174 { /* BEWARE! hash_put in general can allocate memory.
175 But currently it only does that if log->next_free is nil. */
176 int j;
177 eassert (!NILP (log->next_free));
178 j = hash_put (log, backtrace, make_number (count), hash);
179 /* Let's make sure we've put `backtrace' right where it
180 already was to start with. */
181 eassert (index == j);
183 /* FIXME: If the hash-table is almost full, we should set
184 some global flag so that some Elisp code can offload its
185 data elsewhere, so as to avoid the eviction code.
186 There are 2 ways to do that, AFAICT:
187 - Set a flag checked in QUIT, such that QUIT can then call
188 Fprofiler_cpu_log and stash the full log for later use.
189 - Set a flag check in post-gc-hook, so that Elisp code can call
190 profiler-cpu-log. That gives us more flexibility since that
191 Elisp code can then do all kinds of fun stuff like write
192 the log to disk. Or turn it right away into a call tree.
193 Of course, using Elisp is generally preferable, but it may
194 take longer until we get a chance to run the Elisp code, so
195 there's more risk that the table will get full before we
196 get there. */
201 /* Sampling profiler. */
203 #ifdef PROFILER_CPU_SUPPORT
205 /* The profiler timer and whether it was properly initialized, if
206 POSIX timers are available. */
207 #ifdef HAVE_ITIMERSPEC
208 static timer_t profiler_timer;
209 static bool profiler_timer_ok;
210 #endif
212 /* Status of sampling profiler. */
213 static enum profiler_cpu_running
214 { NOT_RUNNING, TIMER_SETTIME_RUNNING, SETITIMER_RUNNING }
215 profiler_cpu_running;
217 /* Hash-table log of CPU profiler. */
218 static Lisp_Object cpu_log;
220 /* Separate counter for the time spent in the GC. */
221 static EMACS_INT cpu_gc_count;
223 /* The current sampling interval in nanoseconds. */
224 static EMACS_INT current_sampling_interval;
226 /* Signal handler for sampling profiler. */
228 static void
229 handle_profiler_signal (int signal)
231 if (backtrace_list && EQ (backtrace_list->function, Qautomatic_gc))
232 /* Special case the time-count inside GC because the hash-table
233 code is not prepared to be used while the GC is running.
234 More specifically it uses ASIZE at many places where it does
235 not expect the ARRAY_MARK_FLAG to be set. We could try and
236 harden the hash-table code, but it doesn't seem worth the
237 effort. */
238 cpu_gc_count = saturated_add (cpu_gc_count, 1);
239 else
241 Lisp_Object oquit;
242 bool saved_pending_signals;
243 EMACS_INT count = 1;
244 #ifdef HAVE_ITIMERSPEC
245 if (profiler_timer_ok)
247 int overruns = timer_getoverrun (profiler_timer);
248 eassert (0 <= overruns);
249 count += overruns;
251 #endif
252 /* record_backtrace uses hash functions that call Fequal, which
253 uses QUIT, which can call malloc, which can cause disaster in
254 a signal handler. So inhibit QUIT. */
255 oquit = Vinhibit_quit;
256 saved_pending_signals = pending_signals;
257 Vinhibit_quit = Qt;
258 pending_signals = 0;
260 eassert (HASH_TABLE_P (cpu_log));
261 record_backtrace (XHASH_TABLE (cpu_log), count);
263 Vinhibit_quit = oquit;
264 pending_signals = saved_pending_signals;
268 static void
269 deliver_profiler_signal (int signal)
271 deliver_process_signal (signal, handle_profiler_signal);
274 static enum profiler_cpu_running
275 setup_cpu_timer (Lisp_Object sampling_interval)
277 struct sigaction action;
278 struct itimerval timer;
279 struct timespec interval;
280 int billion = 1000000000;
282 if (! RANGED_INTEGERP (1, sampling_interval,
283 (TYPE_MAXIMUM (time_t) < EMACS_INT_MAX / billion
284 ? ((EMACS_INT) TYPE_MAXIMUM (time_t) * billion
285 + (billion - 1))
286 : EMACS_INT_MAX)))
287 return NOT_RUNNING;
289 current_sampling_interval = XINT (sampling_interval);
290 interval = make_emacs_time (current_sampling_interval / billion,
291 current_sampling_interval % billion);
292 emacs_sigaction_init (&action, deliver_profiler_signal);
293 sigaction (SIGPROF, &action, 0);
295 #ifdef HAVE_ITIMERSPEC
296 if (! profiler_timer_ok)
298 /* System clocks to try, in decreasing order of desirability. */
299 static clockid_t const system_clock[] = {
300 #ifdef CLOCK_THREAD_CPUTIME_ID
301 CLOCK_THREAD_CPUTIME_ID,
302 #endif
303 #ifdef CLOCK_PROCESS_CPUTIME_ID
304 CLOCK_PROCESS_CPUTIME_ID,
305 #endif
306 #ifdef CLOCK_MONOTONIC
307 CLOCK_MONOTONIC,
308 #endif
309 CLOCK_REALTIME
311 int i;
312 struct sigevent sigev;
313 sigev.sigev_value.sival_ptr = &profiler_timer;
314 sigev.sigev_signo = SIGPROF;
315 sigev.sigev_notify = SIGEV_SIGNAL;
317 for (i = 0; i < sizeof system_clock / sizeof *system_clock; i++)
318 if (timer_create (system_clock[i], &sigev, &profiler_timer) == 0)
320 profiler_timer_ok = 1;
321 break;
325 if (profiler_timer_ok)
327 struct itimerspec ispec;
328 ispec.it_value = ispec.it_interval = interval;
329 if (timer_settime (profiler_timer, 0, &ispec, 0) == 0)
330 return TIMER_SETTIME_RUNNING;
332 #endif
334 #ifdef HAVE_SETITIMER
335 timer.it_value = timer.it_interval = make_timeval (interval);
336 if (setitimer (ITIMER_PROF, &timer, 0) == 0)
337 return SETITIMER_RUNNING;
338 #endif
340 return NOT_RUNNING;
343 DEFUN ("profiler-cpu-start", Fprofiler_cpu_start, Sprofiler_cpu_start,
344 1, 1, 0,
345 doc: /* Start or restart the cpu profiler.
346 It takes call-stack samples each SAMPLING-INTERVAL nanoseconds, approximately.
347 See also `profiler-log-size' and `profiler-max-stack-depth'. */)
348 (Lisp_Object sampling_interval)
350 if (profiler_cpu_running)
351 error ("CPU profiler is already running");
353 if (NILP (cpu_log))
355 cpu_gc_count = 0;
356 cpu_log = make_log (profiler_log_size,
357 profiler_max_stack_depth);
360 profiler_cpu_running = setup_cpu_timer (sampling_interval);
361 if (! profiler_cpu_running)
362 error ("Invalid sampling interval");
364 return Qt;
367 DEFUN ("profiler-cpu-stop", Fprofiler_cpu_stop, Sprofiler_cpu_stop,
368 0, 0, 0,
369 doc: /* Stop the cpu profiler. The profiler log is not affected.
370 Return non-nil if the profiler was running. */)
371 (void)
373 switch (profiler_cpu_running)
375 case NOT_RUNNING:
376 return Qnil;
378 #ifdef HAVE_ITIMERSPEC
379 case TIMER_SETTIME_RUNNING:
381 struct itimerspec disable;
382 memset (&disable, 0, sizeof disable);
383 timer_settime (profiler_timer, 0, &disable, 0);
385 break;
386 #endif
388 #ifdef HAVE_SETITIMER
389 case SETITIMER_RUNNING:
391 struct itimerval disable;
392 memset (&disable, 0, sizeof disable);
393 setitimer (ITIMER_PROF, &disable, 0);
395 break;
396 #endif
399 signal (SIGPROF, SIG_IGN);
400 profiler_cpu_running = NOT_RUNNING;
401 return Qt;
404 DEFUN ("profiler-cpu-running-p",
405 Fprofiler_cpu_running_p, Sprofiler_cpu_running_p,
406 0, 0, 0,
407 doc: /* Return non-nil iff cpu profiler is running. */)
408 (void)
410 return profiler_cpu_running ? Qt : Qnil;
413 DEFUN ("profiler-cpu-log", Fprofiler_cpu_log, Sprofiler_cpu_log,
414 0, 0, 0,
415 doc: /* Return the current cpu profiler log.
416 The log is a hash-table mapping backtraces to counters which represent
417 the amount of time spent at those points. Every backtrace is a vector
418 of functions, where the last few elements may be nil.
419 Before returning, a new log is allocated for future samples. */)
420 (void)
422 Lisp_Object result = cpu_log;
423 /* Here we're making the log visible to Elisp, so it's not safe any
424 more for our use afterwards since we can't rely on its special
425 pre-allocated keys anymore. So we have to allocate a new one. */
426 cpu_log = (profiler_cpu_running
427 ? make_log (profiler_log_size, profiler_max_stack_depth)
428 : Qnil);
429 Fputhash (Fmake_vector (make_number (1), Qautomatic_gc),
430 make_number (cpu_gc_count),
431 result);
432 cpu_gc_count = 0;
433 return result;
435 #endif /* PROFILER_CPU_SUPPORT */
437 /* Memory profiler. */
439 /* True if memory profiler is running. */
440 bool profiler_memory_running;
442 static Lisp_Object memory_log;
444 DEFUN ("profiler-memory-start", Fprofiler_memory_start, Sprofiler_memory_start,
445 0, 0, 0,
446 doc: /* Start/restart the memory profiler.
447 The memory profiler will take samples of the call-stack whenever a new
448 allocation takes place. Note that most small allocations only trigger
449 the profiler occasionally.
450 See also `profiler-log-size' and `profiler-max-stack-depth'. */)
451 (void)
453 if (profiler_memory_running)
454 error ("Memory profiler is already running");
456 if (NILP (memory_log))
457 memory_log = make_log (profiler_log_size,
458 profiler_max_stack_depth);
460 profiler_memory_running = true;
462 return Qt;
465 DEFUN ("profiler-memory-stop",
466 Fprofiler_memory_stop, Sprofiler_memory_stop,
467 0, 0, 0,
468 doc: /* Stop the memory profiler. The profiler log is not affected.
469 Return non-nil if the profiler was running. */)
470 (void)
472 if (!profiler_memory_running)
473 return Qnil;
474 profiler_memory_running = false;
475 return Qt;
478 DEFUN ("profiler-memory-running-p",
479 Fprofiler_memory_running_p, Sprofiler_memory_running_p,
480 0, 0, 0,
481 doc: /* Return non-nil if memory profiler is running. */)
482 (void)
484 return profiler_memory_running ? Qt : Qnil;
487 DEFUN ("profiler-memory-log",
488 Fprofiler_memory_log, Sprofiler_memory_log,
489 0, 0, 0,
490 doc: /* Return the current memory profiler log.
491 The log is a hash-table mapping backtraces to counters which represent
492 the amount of memory allocated at those points. Every backtrace is a vector
493 of functions, where the last few elements may be nil.
494 Before returning, a new log is allocated for future samples. */)
495 (void)
497 Lisp_Object result = memory_log;
498 /* Here we're making the log visible to Elisp , so it's not safe any
499 more for our use afterwards since we can't rely on its special
500 pre-allocated keys anymore. So we have to allocate a new one. */
501 memory_log = (profiler_memory_running
502 ? make_log (profiler_log_size, profiler_max_stack_depth)
503 : Qnil);
504 return result;
508 /* Signals and probes. */
510 /* Record that the current backtrace allocated SIZE bytes. */
511 void
512 malloc_probe (size_t size)
514 eassert (HASH_TABLE_P (memory_log));
515 record_backtrace (XHASH_TABLE (memory_log), min (size, MOST_POSITIVE_FIXNUM));
518 void
519 syms_of_profiler (void)
521 DEFVAR_INT ("profiler-max-stack-depth", profiler_max_stack_depth,
522 doc: /* Number of elements from the call-stack recorded in the log. */);
523 profiler_max_stack_depth = 16;
524 DEFVAR_INT ("profiler-log-size", profiler_log_size,
525 doc: /* Number of distinct call-stacks that can be recorded in a profiler log.
526 If the log gets full, some of the least-seen call-stacks will be evicted
527 to make room for new entries. */);
528 profiler_log_size = 10000;
530 #ifdef PROFILER_CPU_SUPPORT
531 profiler_cpu_running = NOT_RUNNING;
532 cpu_log = Qnil;
533 staticpro (&cpu_log);
534 defsubr (&Sprofiler_cpu_start);
535 defsubr (&Sprofiler_cpu_stop);
536 defsubr (&Sprofiler_cpu_running_p);
537 defsubr (&Sprofiler_cpu_log);
538 #endif
539 profiler_memory_running = false;
540 memory_log = Qnil;
541 staticpro (&memory_log);
542 defsubr (&Sprofiler_memory_start);
543 defsubr (&Sprofiler_memory_stop);
544 defsubr (&Sprofiler_memory_running_p);
545 defsubr (&Sprofiler_memory_log);