1 /* Profile heap and stack memory usage of running program.
2 Copyright (C) 1998-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
22 #include <stdatomic.h>
31 #include <hp-timing.h>
32 #include <machine-sp.h>
33 #include <stackinfo.h> /* For _STACK_GROWS_UP */
35 /* Pointer to the real functions. These are determined used `dlsym'
36 when really needed. */
37 static void *(*mallocp
)(size_t);
38 static void *(*reallocp
) (void *, size_t);
39 static void *(*callocp
) (size_t, size_t);
40 static void (*freep
) (void *);
42 static void *(*mmapp
) (void *, size_t, int, int, int, off_t
);
43 static void *(*mmap64p
) (void *, size_t, int, int, int, off64_t
);
44 static int (*munmapp
) (void *, size_t);
45 static void *(*mremapp
) (void *, size_t, size_t, int, void *);
68 #define MAGIC 0xfeedbeaf
71 static _Atomic
unsigned long int calls
[idx_last
];
72 static _Atomic
unsigned long int failed
[idx_last
];
73 static _Atomic
size_t total
[idx_last
];
74 static _Atomic
size_t grand_total
;
75 static _Atomic
unsigned long int histogram
[65536 / 16];
76 static _Atomic
unsigned long int large
;
77 static _Atomic
unsigned long int calls_total
;
78 static _Atomic
unsigned long int inplace
;
79 static _Atomic
unsigned long int decreasing
;
80 static _Atomic
unsigned long int realloc_free
;
81 static _Atomic
unsigned long int inplace_mremap
;
82 static _Atomic
unsigned long int decreasing_mremap
;
83 static _Atomic
size_t current_heap
;
84 static _Atomic
size_t peak_use
[3];
85 static __thread
uintptr_t start_sp
;
87 /* A few macros to make the source more readable. */
88 #define peak_heap peak_use[0]
89 #define peak_stack peak_use[1]
90 #define peak_total peak_use[2]
92 #define DEFAULT_BUFFER_SIZE 32768
93 static size_t buffer_size
;
98 static int initialized
;
99 static bool trace_mmap
;
100 extern const char *__progname
;
110 static struct entry buffer
[2 * DEFAULT_BUFFER_SIZE
];
111 static _Atomic
uint32_t buffer_cnt
;
112 static struct entry first
;
115 gettime (struct entry
*e
)
120 e
->time_low
= now
& 0xffffffff;
121 e
->time_high
= now
>> 32;
123 struct __timespec64 now
;
125 __clock_gettime64 (CLOCK_REALTIME
, &now
);
126 usecs
= (uint64_t)now
.tv_nsec
/ 1000 + (uint64_t)now
.tv_sec
* 1000000;
127 e
->time_low
= usecs
& 0xffffffff;
128 e
->time_high
= usecs
>> 32;
133 peak_atomic_max (_Atomic
size_t *peak
, size_t val
)
138 v
= atomic_load_explicit (peak
, memory_order_relaxed
);
142 while (! atomic_compare_exchange_weak (peak
, &v
, val
));
145 /* Update the global data after a successful function call. */
147 update_data (struct header
*result
, size_t len
, size_t old_len
)
151 /* Record the information we need and mark the block using a
153 result
->length
= len
;
154 result
->magic
= MAGIC
;
157 /* Compute current heap usage and compare it with the maximum value. */
159 = atomic_fetch_add_explicit (¤t_heap
, len
- old_len
,
160 memory_order_relaxed
) + len
- old_len
;
161 peak_atomic_max (&peak_heap
, heap
);
163 /* Compute current stack usage and compare it with the maximum
164 value. The base stack pointer might not be set if this is not
165 the main thread and it is the first call to any of these
167 if (__glibc_unlikely (!start_sp
))
168 start_sp
= __thread_stack_pointer ();
170 uintptr_t sp
= __thread_stack_pointer ();
171 #ifdef _STACK_GROWS_UP
172 /* This can happen in threads where we didn't catch the thread's
173 stack early enough. */
174 if (__glibc_unlikely (sp
< start_sp
))
176 size_t current_stack
= sp
- start_sp
;
178 /* This can happen in threads where we didn't catch the thread's
179 stack early enough. */
180 if (__glibc_unlikely (sp
> start_sp
))
182 size_t current_stack
= start_sp
- sp
;
184 peak_atomic_max (&peak_stack
, current_stack
);
186 /* Add up heap and stack usage and compare it with the maximum value. */
187 peak_atomic_max (&peak_total
, heap
+ current_stack
);
189 /* Store the value only if we are writing to a file. */
192 uint32_t idx
= atomic_fetch_add_explicit (&buffer_cnt
, 1,
193 memory_order_relaxed
);
194 if (idx
+ 1 >= 2 * buffer_size
)
196 /* We try to reset the counter to the correct range. If
197 this fails because of another thread increasing the
198 counter it does not matter since that thread will take
199 care of the correction. */
200 uint32_t reset
= (idx
+ 1) % (2 * buffer_size
);
201 uint32_t expected
= idx
+ 1;
202 atomic_compare_exchange_weak (&buffer_cnt
, &expected
, reset
);
203 if (idx
>= 2 * buffer_size
)
206 assert (idx
< 2 * DEFAULT_BUFFER_SIZE
);
208 buffer
[idx
].heap
= current_heap
;
209 buffer
[idx
].stack
= current_stack
;
210 gettime (&buffer
[idx
]);
212 /* Write out buffer if it is full. */
213 if (idx
+ 1 == buffer_size
)
214 write (fd
, buffer
, buffer_size
* sizeof (struct entry
));
215 else if (idx
+ 1 == 2 * buffer_size
)
216 write (fd
, &buffer
[buffer_size
], buffer_size
* sizeof (struct entry
));
221 /* Interrupt handler. */
223 int_handler (int signo
)
225 /* Nothing gets allocated. Just record the stack pointer position. */
226 update_data (NULL
, 0, 0);
230 /* Find out whether this is the program we are supposed to profile.
231 For this the name in the variable `__progname' must match the one
232 given in the environment variable MEMUSAGE_PROG_NAME. If the variable
233 is not present every program assumes it should be profiling.
235 If this is the program open a file descriptor to the output file.
236 We will write to it whenever the buffer overflows. The name of the
237 output file is determined by the environment variable MEMUSAGE_OUTPUT.
239 If the environment variable MEMUSAGE_BUFFER_SIZE is set its numerical
240 value determines the size of the internal buffer. The number gives
241 the number of elements in the buffer. By setting the number to one
242 one effectively selects unbuffered operation.
244 If MEMUSAGE_NO_TIMER is not present an alarm handler is installed
245 which at the highest possible frequency records the stack pointer. */
249 const char *env
= getenv ("MEMUSAGE_PROG_NAME");
250 size_t prog_len
= strlen (__progname
);
253 mallocp
= (void *(*)(size_t))dlsym (RTLD_NEXT
, "malloc");
254 reallocp
= (void *(*)(void *, size_t))dlsym (RTLD_NEXT
, "realloc");
255 callocp
= (void *(*)(size_t, size_t))dlsym (RTLD_NEXT
, "calloc");
256 freep
= (void (*)(void *))dlsym (RTLD_NEXT
, "free");
258 mmapp
= (void *(*)(void *, size_t, int, int, int, off_t
))dlsym (RTLD_NEXT
,
261 (void *(*)(void *, size_t, int, int, int, off64_t
))dlsym (RTLD_NEXT
,
263 mremapp
= (void *(*)(void *, size_t, size_t, int, void *))dlsym (RTLD_NEXT
,
265 munmapp
= (int (*)(void *, size_t))dlsym (RTLD_NEXT
, "munmap");
270 /* Check for program name. */
271 size_t len
= strlen (env
);
272 if (len
> prog_len
|| strcmp (env
, &__progname
[prog_len
- len
]) != 0
273 || (prog_len
!= len
&& __progname
[prog_len
- len
- 1] != '/'))
277 /* Only open the file if it's really us. */
278 if (!not_me
&& fd
== -1)
283 start_sp
= __thread_stack_pointer ();
285 outname
= getenv ("MEMUSAGE_OUTPUT");
286 if (outname
!= NULL
&& outname
[0] != '\0'
287 && (access (outname
, R_OK
| W_OK
) == 0 || errno
== ENOENT
))
289 fd
= creat64 (outname
, 0666);
292 /* Don't do anything in future calls if we cannot write to
297 /* Write the first entry. */
301 /* Write it two times since we need the starting and end time. */
302 write (fd
, &first
, sizeof (first
));
303 write (fd
, &first
, sizeof (first
));
305 /* Determine the buffer size. We use the default if the
306 environment variable is not present. */
307 buffer_size
= DEFAULT_BUFFER_SIZE
;
308 const char *str_buffer_size
= getenv ("MEMUSAGE_BUFFER_SIZE");
309 if (str_buffer_size
!= NULL
)
311 buffer_size
= atoi (str_buffer_size
);
312 if (buffer_size
== 0 || buffer_size
> DEFAULT_BUFFER_SIZE
)
313 buffer_size
= DEFAULT_BUFFER_SIZE
;
316 /* Possibly enable timer-based stack pointer retrieval. */
317 if (getenv ("MEMUSAGE_NO_TIMER") == NULL
)
319 struct sigaction act
;
321 act
.sa_handler
= (sighandler_t
) &int_handler
;
322 act
.sa_flags
= SA_RESTART
;
323 sigfillset (&act
.sa_mask
);
325 if (sigaction (SIGPROF
, &act
, NULL
) >= 0)
327 struct itimerval timer
;
329 timer
.it_value
.tv_sec
= 0;
330 timer
.it_value
.tv_usec
= 1;
331 timer
.it_interval
= timer
.it_value
;
332 setitimer (ITIMER_PROF
, &timer
, NULL
);
338 if (!not_me
&& getenv ("MEMUSAGE_TRACE_MMAP") != NULL
)
344 /* Record the initial stack position. */
346 __attribute__ ((constructor
))
349 start_sp
= __thread_stack_pointer ();
355 /* `malloc' replacement. We keep track of the memory usage if this is the
360 struct header
*result
= NULL
;
362 /* Determine real implementation if not already happened. */
363 if (__glibc_unlikely (initialized
<= 0))
365 if (initialized
== -1)
371 /* If this is not the correct program just use the normal function. */
373 return (*mallocp
)(len
);
375 /* Keep track of number of calls. */
376 atomic_fetch_add_explicit (&calls
[idx_malloc
], 1, memory_order_relaxed
);
377 /* Keep track of total memory consumption for `malloc'. */
378 atomic_fetch_add_explicit (&total
[idx_malloc
], len
, memory_order_relaxed
);
379 /* Keep track of total memory requirement. */
380 atomic_fetch_add_explicit (&grand_total
, len
, memory_order_relaxed
);
381 /* Remember the size of the request. */
383 atomic_fetch_add_explicit (&histogram
[len
/ 16], 1, memory_order_relaxed
);
385 atomic_fetch_add_explicit (&large
, 1, memory_order_relaxed
);
386 /* Total number of calls of any of the functions. */
387 atomic_fetch_add_explicit (&calls_total
, 1, memory_order_relaxed
);
389 /* Do the real work. */
390 result
= (struct header
*) (*mallocp
)(len
+ sizeof (struct header
));
393 atomic_fetch_add_explicit (&failed
[idx_malloc
], 1,
394 memory_order_relaxed
);
398 /* Update the allocation data and write out the records if necessary. */
399 update_data (result
, len
, 0);
401 /* Return the pointer to the user buffer. */
402 return (void *) (result
+ 1);
406 /* `realloc' replacement. We keep track of the memory usage if this is the
409 realloc (void *old
, size_t len
)
411 struct header
*result
= NULL
;
415 /* Determine real implementation if not already happened. */
416 if (__glibc_unlikely (initialized
<= 0))
418 if (initialized
== -1)
424 /* If this is not the correct program just use the normal function. */
426 return (*reallocp
)(old
, len
);
430 /* This is really a `malloc' call. */
436 real
= ((struct header
*) old
) - 1;
437 if (real
->magic
!= MAGIC
)
438 /* This is no memory allocated here. */
439 return (*reallocp
)(old
, len
);
441 old_len
= real
->length
;
444 /* Keep track of number of calls. */
445 atomic_fetch_add_explicit (&calls
[idx_realloc
], 1, memory_order_relaxed
);
448 /* Keep track of total memory consumption for `realloc'. */
449 atomic_fetch_add_explicit (&total
[idx_realloc
], len
- old_len
,
450 memory_order_relaxed
);
451 /* Keep track of total memory requirement. */
452 atomic_fetch_add_explicit (&grand_total
, len
- old_len
,
453 memory_order_relaxed
);
456 if (len
== 0 && old
!= NULL
)
459 atomic_fetch_add_explicit (&realloc_free
, 1, memory_order_relaxed
);
460 /* Keep track of total memory freed using `free'. */
461 atomic_fetch_add_explicit (&total
[idx_free
], real
->length
,
462 memory_order_relaxed
);
464 /* Update the allocation data and write out the records if necessary. */
465 update_data (NULL
, 0, old_len
);
467 /* Do the real work. */
473 /* Remember the size of the request. */
475 atomic_fetch_add_explicit (&histogram
[len
/ 16], 1, memory_order_relaxed
);
477 atomic_fetch_add_explicit (&large
, 1, memory_order_relaxed
);
478 /* Total number of calls of any of the functions. */
479 atomic_fetch_add_explicit (&calls_total
, 1, memory_order_relaxed
);
481 /* Do the real work. */
482 result
= (struct header
*) (*reallocp
)(real
, len
+ sizeof (struct header
));
485 atomic_fetch_add_explicit (&failed
[idx_realloc
], 1,
486 memory_order_relaxed
);
490 /* Record whether the reduction/increase happened in place. */
492 atomic_fetch_add_explicit (&inplace
, 1, memory_order_relaxed
);
493 /* Was the buffer increased? */
495 atomic_fetch_add_explicit (&decreasing
, 1, memory_order_relaxed
);
497 /* Update the allocation data and write out the records if necessary. */
498 update_data (result
, len
, old_len
);
500 /* Return the pointer to the user buffer. */
501 return (void *) (result
+ 1);
505 /* `calloc' replacement. We keep track of the memory usage if this is the
508 calloc (size_t n
, size_t len
)
510 struct header
*result
;
511 size_t size
= n
* len
;
513 /* Determine real implementation if not already happened. */
514 if (__glibc_unlikely (initialized
<= 0))
516 if (initialized
== -1)
522 /* If this is not the correct program just use the normal function. */
524 return (*callocp
)(n
, len
);
526 /* Keep track of number of calls. */
527 atomic_fetch_add_explicit (&calls
[idx_calloc
], 1, memory_order_relaxed
);
528 /* Keep track of total memory consumption for `calloc'. */
529 atomic_fetch_add_explicit (&total
[idx_calloc
], size
, memory_order_relaxed
);
530 /* Keep track of total memory requirement. */
531 atomic_fetch_add_explicit (&grand_total
, size
, memory_order_relaxed
);
532 /* Remember the size of the request. */
534 atomic_fetch_add_explicit (&histogram
[size
/ 16], 1,
535 memory_order_relaxed
);
537 atomic_fetch_add_explicit (&large
, 1, memory_order_relaxed
);
538 /* Total number of calls of any of the functions. */
541 /* Do the real work. */
542 result
= (struct header
*) (*mallocp
)(size
+ sizeof (struct header
));
545 atomic_fetch_add_explicit (&failed
[idx_calloc
], 1,
546 memory_order_relaxed
);
550 /* Update the allocation data and write out the records if necessary. */
551 update_data (result
, size
, 0);
553 /* Do what `calloc' would have done and return the buffer to the caller. */
554 return memset (result
+ 1, '\0', size
);
558 /* `free' replacement. We keep track of the memory usage if this is the
565 /* Determine real implementation if not already happened. */
566 if (__glibc_unlikely (initialized
<= 0))
568 if (initialized
== -1)
574 /* If this is not the correct program just use the normal function. */
581 /* `free (NULL)' has no effect. */
584 atomic_fetch_add_explicit (&calls
[idx_free
], 1, memory_order_relaxed
);
588 /* Determine the pointer to the header. */
589 real
= ((struct header
*) ptr
) - 1;
590 if (real
->magic
!= MAGIC
)
592 /* This block wasn't allocated here. */
597 /* Keep track of number of calls. */
598 atomic_fetch_add_explicit (&calls
[idx_free
], 1, memory_order_relaxed
);
599 /* Keep track of total memory freed using `free'. */
600 atomic_fetch_add_explicit (&total
[idx_free
], real
->length
,
601 memory_order_relaxed
);
603 /* Update the allocation data and write out the records if necessary. */
604 update_data (NULL
, 0, real
->length
);
606 /* Do the real work. */
611 /* `mmap' replacement. We do not have to keep track of the size since
612 `munmap' will get it as a parameter. */
614 mmap (void *start
, size_t len
, int prot
, int flags
, int fd
, off_t offset
)
618 /* Determine real implementation if not already happened. */
619 if (__glibc_unlikely (initialized
<= 0))
621 if (initialized
== -1)
627 /* Always get a block. We don't need extra memory. */
628 result
= (*mmapp
)(start
, len
, prot
, flags
, fd
, offset
);
630 if (!not_me
&& trace_mmap
)
632 int idx
= (flags
& MAP_ANON
633 ? idx_mmap_a
: prot
& PROT_WRITE
? idx_mmap_w
: idx_mmap_r
);
635 /* Keep track of number of calls. */
636 atomic_fetch_add_explicit (&calls
[idx
], 1, memory_order_relaxed
);
637 /* Keep track of total memory consumption for `malloc'. */
638 atomic_fetch_add_explicit (&total
[idx
], len
, memory_order_relaxed
);
639 /* Keep track of total memory requirement. */
640 atomic_fetch_add_explicit (&grand_total
, len
, memory_order_relaxed
);
641 /* Remember the size of the request. */
643 atomic_fetch_add_explicit (&histogram
[len
/ 16], 1,
644 memory_order_relaxed
);
646 atomic_fetch_add_explicit (&large
, 1, memory_order_relaxed
);
647 /* Total number of calls of any of the functions. */
648 atomic_fetch_add_explicit (&calls_total
, 1, memory_order_relaxed
);
650 /* Check for failures. */
652 atomic_fetch_add_explicit (&failed
[idx
], 1, memory_order_relaxed
);
653 else if (idx
== idx_mmap_w
)
654 /* Update the allocation data and write out the records if
655 necessary. Note the first parameter is NULL which means
656 the size is not tracked. */
657 update_data (NULL
, len
, 0);
660 /* Return the pointer to the user buffer. */
665 /* `mmap64' replacement. We do not have to keep track of the size since
666 `munmap' will get it as a parameter. */
668 mmap64 (void *start
, size_t len
, int prot
, int flags
, int fd
, off64_t offset
)
672 /* Determine real implementation if not already happened. */
673 if (__glibc_unlikely (initialized
<= 0))
675 if (initialized
== -1)
681 /* Always get a block. We don't need extra memory. */
682 result
= (*mmap64p
)(start
, len
, prot
, flags
, fd
, offset
);
684 if (!not_me
&& trace_mmap
)
686 int idx
= (flags
& MAP_ANON
687 ? idx_mmap_a
: prot
& PROT_WRITE
? idx_mmap_w
: idx_mmap_r
);
689 /* Keep track of number of calls. */
690 atomic_fetch_add_explicit (&calls
[idx
], 1, memory_order_relaxed
);
691 /* Keep track of total memory consumption for `malloc'. */
692 atomic_fetch_add_explicit (&total
[idx
], len
, memory_order_relaxed
);
693 /* Keep track of total memory requirement. */
694 atomic_fetch_add_explicit (&grand_total
, len
, memory_order_relaxed
);
695 /* Remember the size of the request. */
697 atomic_fetch_add_explicit (&histogram
[len
/ 16], 1,
698 memory_order_relaxed
);
700 atomic_fetch_add_explicit (&large
, 1, memory_order_relaxed
);
701 /* Total number of calls of any of the functions. */
702 atomic_fetch_add_explicit (&calls_total
, 1, memory_order_relaxed
);
704 /* Check for failures. */
706 atomic_fetch_add_explicit (&failed
[idx
], 1, memory_order_relaxed
);
707 else if (idx
== idx_mmap_w
)
708 /* Update the allocation data and write out the records if
709 necessary. Note the first parameter is NULL which means
710 the size is not tracked. */
711 update_data (NULL
, len
, 0);
714 /* Return the pointer to the user buffer. */
719 /* `mremap' replacement. We do not have to keep track of the size since
720 `munmap' will get it as a parameter. */
722 mremap (void *start
, size_t old_len
, size_t len
, int flags
, ...)
727 va_start (ap
, flags
);
728 void *newaddr
= (flags
& MREMAP_FIXED
) ? va_arg (ap
, void *) : NULL
;
731 /* Determine real implementation if not already happened. */
732 if (__glibc_unlikely (initialized
<= 0))
734 if (initialized
== -1)
740 /* Always get a block. We don't need extra memory. */
741 result
= (*mremapp
)(start
, old_len
, len
, flags
, newaddr
);
743 if (!not_me
&& trace_mmap
)
745 /* Keep track of number of calls. */
746 atomic_fetch_add_explicit (&calls
[idx_mremap
], 1, memory_order_relaxed
);
749 /* Keep track of total memory consumption for `malloc'. */
750 atomic_fetch_add_explicit (&total
[idx_mremap
], len
- old_len
,
751 memory_order_relaxed
);
752 /* Keep track of total memory requirement. */
753 atomic_fetch_add_explicit (&grand_total
, len
- old_len
,
754 memory_order_relaxed
);
756 /* Remember the size of the request. */
758 atomic_fetch_add_explicit (&histogram
[len
/ 16], 1,
759 memory_order_relaxed
);
761 atomic_fetch_add_explicit (&large
, 1, memory_order_relaxed
);
762 /* Total number of calls of any of the functions. */
763 atomic_fetch_add_explicit (&calls_total
, 1, memory_order_relaxed
);
765 /* Check for failures. */
767 atomic_fetch_add_explicit (&failed
[idx_mremap
], 1,
768 memory_order_relaxed
);
771 /* Record whether the reduction/increase happened in place. */
773 atomic_fetch_add_explicit (&inplace_mremap
, 1,
774 memory_order_relaxed
);
775 /* Was the buffer increased? */
777 atomic_fetch_add_explicit (&decreasing_mremap
, 1,
778 memory_order_relaxed
);
780 /* Update the allocation data and write out the records if
781 necessary. Note the first parameter is NULL which means
782 the size is not tracked. */
783 update_data (NULL
, len
, old_len
);
787 /* Return the pointer to the user buffer. */
792 /* `munmap' replacement. */
794 munmap (void *start
, size_t len
)
798 /* Determine real implementation if not already happened. */
799 if (__glibc_unlikely (initialized
<= 0))
801 if (initialized
== -1)
807 /* Do the real work. */
808 result
= (*munmapp
)(start
, len
);
810 if (!not_me
&& trace_mmap
)
812 /* Keep track of number of calls. */
813 atomic_fetch_add_explicit (&calls
[idx_munmap
], 1, memory_order_relaxed
);
815 if (__glibc_likely (result
== 0))
817 /* Keep track of total memory freed using `free'. */
818 atomic_fetch_add_explicit (&total
[idx_munmap
], len
,
819 memory_order_relaxed
);
821 /* Update the allocation data and write out the records if
823 update_data (NULL
, 0, len
);
826 atomic_fetch_add_explicit (&failed
[idx_munmap
], 1,
827 memory_order_relaxed
);
834 /* Write some statistics to standard error. */
836 __attribute__ ((destructor
))
840 unsigned long int maxcalls
;
842 /* If we haven't done anything here just return. */
846 /* If we should call any of the memory functions don't do any profiling. */
849 /* Finish the output file. */
852 /* Write the partially filled buffer. */
853 if (buffer_cnt
> buffer_size
)
854 write (fd
, buffer
+ buffer_size
,
855 (buffer_cnt
- buffer_size
) * sizeof (struct entry
));
857 write (fd
, buffer
, buffer_cnt
* sizeof (struct entry
));
859 /* Go back to the beginning of the file. We allocated two records
860 here when we opened the file. */
861 lseek (fd
, 0, SEEK_SET
);
862 /* Write out a record containing the total size. */
863 first
.stack
= peak_total
;
864 write (fd
, &first
, sizeof (struct entry
));
865 /* Write out another record containing the maximum for heap and
867 first
.heap
= peak_heap
;
868 first
.stack
= peak_stack
;
870 write (fd
, &first
, sizeof (struct entry
));
872 /* Close the file. */
877 /* Write a colorful statistic. */
878 fprintf (stderr
, "\n\
879 \e[01;32mMemory usage summary:\e[0;0m heap total: %llu, heap peak: %lu, stack peak: %lu\n\
880 \e[04;34m total calls total memory failed calls\e[0m\n\
881 \e[00;34m malloc|\e[0m %10lu %12llu %s%12lu\e[00;00m\n\
882 \e[00;34mrealloc|\e[0m %10lu %12llu %s%12lu\e[00;00m (nomove:%ld, dec:%ld, free:%ld)\n\
883 \e[00;34m calloc|\e[0m %10lu %12llu %s%12lu\e[00;00m\n\
884 \e[00;34m free|\e[0m %10lu %12llu\n",
885 (unsigned long long int) grand_total
, (unsigned long int) peak_heap
,
886 (unsigned long int) peak_stack
,
887 (unsigned long int) calls
[idx_malloc
],
888 (unsigned long long int) total
[idx_malloc
],
889 failed
[idx_malloc
] ? "\e[01;41m" : "",
890 (unsigned long int) failed
[idx_malloc
],
891 (unsigned long int) calls
[idx_realloc
],
892 (unsigned long long int) total
[idx_realloc
],
893 failed
[idx_realloc
] ? "\e[01;41m" : "",
894 (unsigned long int) failed
[idx_realloc
],
895 (unsigned long int) inplace
,
896 (unsigned long int) decreasing
,
897 (unsigned long int) realloc_free
,
898 (unsigned long int) calls
[idx_calloc
],
899 (unsigned long long int) total
[idx_calloc
],
900 failed
[idx_calloc
] ? "\e[01;41m" : "",
901 (unsigned long int) failed
[idx_calloc
],
902 (unsigned long int) calls
[idx_free
],
903 (unsigned long long int) total
[idx_free
]);
907 \e[00;34mmmap(r)|\e[0m %10lu %12llu %s%12lu\e[00;00m\n\
908 \e[00;34mmmap(w)|\e[0m %10lu %12llu %s%12lu\e[00;00m\n\
909 \e[00;34mmmap(a)|\e[0m %10lu %12llu %s%12lu\e[00;00m\n\
910 \e[00;34m mremap|\e[0m %10lu %12llu %s%12lu\e[00;00m (nomove: %ld, dec:%ld)\n\
911 \e[00;34m munmap|\e[0m %10lu %12llu %s%12lu\e[00;00m\n",
912 (unsigned long int) calls
[idx_mmap_r
],
913 (unsigned long long int) total
[idx_mmap_r
],
914 failed
[idx_mmap_r
] ? "\e[01;41m" : "",
915 (unsigned long int) failed
[idx_mmap_r
],
916 (unsigned long int) calls
[idx_mmap_w
],
917 (unsigned long long int) total
[idx_mmap_w
],
918 failed
[idx_mmap_w
] ? "\e[01;41m" : "",
919 (unsigned long int) failed
[idx_mmap_w
],
920 (unsigned long int) calls
[idx_mmap_a
],
921 (unsigned long long int) total
[idx_mmap_a
],
922 failed
[idx_mmap_a
] ? "\e[01;41m" : "",
923 (unsigned long int) failed
[idx_mmap_a
],
924 (unsigned long int) calls
[idx_mremap
],
925 (unsigned long long int) total
[idx_mremap
],
926 failed
[idx_mremap
] ? "\e[01;41m" : "",
927 (unsigned long int) failed
[idx_mremap
],
928 (unsigned long int) inplace_mremap
,
929 (unsigned long int) decreasing_mremap
,
930 (unsigned long int) calls
[idx_munmap
],
931 (unsigned long long int) total
[idx_munmap
],
932 failed
[idx_munmap
] ? "\e[01;41m" : "",
933 (unsigned long int) failed
[idx_munmap
]);
935 /* Write out a histoogram of the sizes of the allocations. */
936 fprintf (stderr
, "\e[01;32mHistogram for block sizes:\e[0;0m\n");
938 /* Determine the maximum of all calls for each size range. */
940 for (cnt
= 0; cnt
< 65536; cnt
+= 16)
941 if (histogram
[cnt
/ 16] > maxcalls
)
942 maxcalls
= histogram
[cnt
/ 16];
944 for (cnt
= 0; cnt
< 65536; cnt
+= 16)
945 /* Only write out the nonzero entries. */
946 if (histogram
[cnt
/ 16] != 0)
948 percent
= (histogram
[cnt
/ 16] * 100) / calls_total
;
949 fprintf (stderr
, "%5d-%-5d%12lu ", cnt
, cnt
+ 15,
950 (unsigned long int) histogram
[cnt
/ 16]);
952 fputs (" <1% \e[41;37m", stderr
);
954 fprintf (stderr
, "%3d%% \e[41;37m", percent
);
956 /* Draw a bar with a length corresponding to the current
958 percent
= (histogram
[cnt
/ 16] * 50) / maxcalls
;
959 while (percent
-- > 0)
961 fputs ("\e[0;0m\n", stderr
);
966 percent
= (large
* 100) / calls_total
;
967 fprintf (stderr
, " large %12lu ", (unsigned long int) large
);
969 fputs (" <1% \e[41;37m", stderr
);
971 fprintf (stderr
, "%3d%% \e[41;37m", percent
);
972 percent
= (large
* 50) / maxcalls
;
973 while (percent
-- > 0)
975 fputs ("\e[0;0m\n", stderr
);
978 /* Any following malloc/free etc. calls should generate statistics again,
979 because otherwise freeing something that has been malloced before
980 this destructor (including struct header in front of it) wouldn't
981 be properly freed. */