1 /*@ S-nail - a mail user agent derived from Berkeley Mail.
4 * Copyright (c) 2000-2004 Gunnar Ritter, Freiburg i. Br., Germany.
5 * Copyright (c) 2012 - 2016 Steffen (Daode) Nurpmeso <steffen@sdaoden.eu>.
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 #ifndef HAVE_AMALGAMATION
26 #ifdef HAVE_MEMORY_DEBUG
27 CTA(sizeof(char) == sizeof(ui8_t
));
29 # define _HOPE_SIZE (2 * 8 * sizeof(char))
30 # define _HOPE_SET(C) \
32 union a_mem_ptr __xl, __xu;\
33 struct a_mem_chunk *__xc;\
38 __xl.p_ui8p[0]=0xDE; __xl.p_ui8p[1]=0xAA;\
39 __xl.p_ui8p[2]=0x55; __xl.p_ui8p[3]=0xAD;\
40 __xl.p_ui8p[4]=0xBE; __xl.p_ui8p[5]=0x55;\
41 __xl.p_ui8p[6]=0xAA; __xl.p_ui8p[7]=0xEF;\
42 __xu.p_ui8p += __xc->mc_size - 8;\
43 __xu.p_ui8p[0]=0xDE; __xu.p_ui8p[1]=0xAA;\
44 __xu.p_ui8p[2]=0x55; __xu.p_ui8p[3]=0xAD;\
45 __xu.p_ui8p[4]=0xBE; __xu.p_ui8p[5]=0x55;\
46 __xu.p_ui8p[6]=0xAA; __xu.p_ui8p[7]=0xEF;\
49 # define _HOPE_GET_TRACE(C,BAD) \
56 # define _HOPE_GET(C,BAD) \
58 union a_mem_ptr __xl, __xu;\
59 struct a_mem_chunk *__xc;\
63 (C).p_cp = __xl.p_cp;\
67 if (__xl.p_ui8p[0] != 0xDE) __i |= 1<<0;\
68 if (__xl.p_ui8p[1] != 0xAA) __i |= 1<<1;\
69 if (__xl.p_ui8p[2] != 0x55) __i |= 1<<2;\
70 if (__xl.p_ui8p[3] != 0xAD) __i |= 1<<3;\
71 if (__xl.p_ui8p[4] != 0xBE) __i |= 1<<4;\
72 if (__xl.p_ui8p[5] != 0x55) __i |= 1<<5;\
73 if (__xl.p_ui8p[6] != 0xAA) __i |= 1<<6;\
74 if (__xl.p_ui8p[7] != 0xEF) __i |= 1<<7;\
77 n_alert("%p: corrupt lower canary: 0x%02X: %s, line %d",\
78 __xl.p_p, __i, mdbg_file, mdbg_line);\
81 __xu.p_ui8p += __xc->mc_size - 8;\
83 if (__xu.p_ui8p[0] != 0xDE) __i |= 1<<0;\
84 if (__xu.p_ui8p[1] != 0xAA) __i |= 1<<1;\
85 if (__xu.p_ui8p[2] != 0x55) __i |= 1<<2;\
86 if (__xu.p_ui8p[3] != 0xAD) __i |= 1<<3;\
87 if (__xu.p_ui8p[4] != 0xBE) __i |= 1<<4;\
88 if (__xu.p_ui8p[5] != 0x55) __i |= 1<<5;\
89 if (__xu.p_ui8p[6] != 0xAA) __i |= 1<<6;\
90 if (__xu.p_ui8p[7] != 0xEF) __i |= 1<<7;\
93 n_alert("%p: corrupt upper canary: 0x%02X: %s, line %d",\
94 __xl.p_p, __i, mdbg_file, mdbg_line);\
97 n_alert(" ..canary last seen: %s, line %" PRIu16 "",\
98 __xc->mc_file, __xc->mc_line);\
100 #endif /* HAVE_MEMORY_DEBUG */
102 #ifdef HAVE_MEMORY_DEBUG
104 struct a_mem_chunk
*mc_prev
;
105 struct a_mem_chunk
*mc_next
;
115 struct a_mem_chunk
*p_c
;
119 #endif /* HAVE_MEMORY_DEBUG */
122 * String dope -- this is a temporary left over
125 /* In debug mode the "string dope" allocations are enwrapped in canaries, just
126 * as we do with our normal memory allocator */
127 #ifdef HAVE_MEMORY_DEBUG
128 # define _SHOPE_SIZE (2u * 8 * sizeof(char) + sizeof(struct schunk))
130 CTA(sizeof(char) == sizeof(ui8_t
));
145 #endif /* HAVE_MEMORY_DEBUG */
152 #define SALIGN (sizeof(union __align__) - 1)
154 CTA(ISPOW2(SALIGN
+ 1));
157 struct buffer
*_next
;
158 char *_bot
; /* For spreserve() */
159 char *_relax
; /* If !NULL, used by srelax() instead of ._bot */
160 char *_max
; /* Max usable byte */
161 char *_caster
; /* NULL if full */
164 /* Single instance builtin buffer. Room for anything, most of the time */
166 struct b_base b_base
;
167 char b_buf
[SBUFFER_BUILTIN
- sizeof(struct b_base
)];
169 #define SBLTIN_SIZE SIZEOF_FIELD(struct b_bltin, b_buf)
171 /* Dynamically allocated buffers to overcome shortage, always released again
172 * once the command loop ticks */
174 struct b_base b_base
;
175 char b_buf
[SBUFFER_SIZE
- sizeof(struct b_base
)];
177 #define SDYN_SIZE SIZEOF_FIELD(struct b_dyn, b_buf)
179 /* The multiplexer of the several real b_* */
182 char b_buf
[VFIELD_SIZE(SALIGN
+ 1)];
185 /* Requests that exceed SDYN_SIZE-1 and thus cannot be handled by string dope
186 * are always served by the normal memory allocator (which panics if memory
187 * cannot be served). Note such an allocation has not yet occurred, it is only
188 * included as a security fallback bypass */
190 struct hugebuf
*hb_next
;
191 char hb_buf
[VFIELD_SIZE(SALIGN
+ 1)];
194 #ifdef HAVE_MEMORY_DEBUG
195 static size_t a_mem_aall
, a_mem_acur
, a_mem_amax
,
196 a_mem_mall
, a_mem_mcur
, a_mem_mmax
;
198 static struct a_mem_chunk
*a_mem_list
, *a_mem_free
;
202 * String dope -- this is a temporary left over
205 static struct b_bltin _builtin_buf
;
206 static struct buffer
*_buf_head
, *_buf_list
, *_buf_server
, *_buf_relax
;
207 static size_t _relax_recur_no
;
208 static struct hugebuf
*_huge_list
;
209 #ifdef HAVE_MEMORY_DEBUG
210 static size_t _all_cnt
, _all_cycnt
, _all_cycnt_max
,
211 _all_size
, _all_cysize
, _all_cysize_max
, _all_min
,
213 _all_bufcnt
, _all_cybufcnt
, _all_cybufcnt_max
,
214 _all_resetreqs
, _all_resets
;
217 /* sreset() / srelax() release a buffer, check the canaries of all chunks */
218 #ifdef HAVE_MEMORY_DEBUG
219 static void _salloc_bcheck(struct buffer
*b
);
222 #ifdef HAVE_MEMORY_DEBUG
224 _salloc_bcheck(struct buffer
*b
)
229 pmax
.cp
= (b
->b
._caster
== NULL
) ? b
->b
._max
: b
->b
._caster
;
232 while (pp
.cp
< pmax
.cp
) {
239 pp
.cp
+= c
->full_size
;
244 if (x
.ui8p
[0] != 0xDE) i
|= 1<<0;
245 if (x
.ui8p
[1] != 0xAA) i
|= 1<<1;
246 if (x
.ui8p
[2] != 0x55) i
|= 1<<2;
247 if (x
.ui8p
[3] != 0xAD) i
|= 1<<3;
248 if (x
.ui8p
[4] != 0xBE) i
|= 1<<4;
249 if (x
.ui8p
[5] != 0x55) i
|= 1<<5;
250 if (x
.ui8p
[6] != 0xAA) i
|= 1<<6;
251 if (x
.ui8p
[7] != 0xEF) i
|= 1<<7;
253 n_alert("sdope %p: corrupt lower canary: 0x%02X, size %u: %s, line %u",
254 ux
, i
, c
->usr_size
, c
->file
, c
->line
);
255 x
.cp
+= 8 + c
->usr_size
;
258 if (x
.ui8p
[0] != 0xDE) i
|= 1<<0;
259 if (x
.ui8p
[1] != 0xAA) i
|= 1<<1;
260 if (x
.ui8p
[2] != 0x55) i
|= 1<<2;
261 if (x
.ui8p
[3] != 0xAD) i
|= 1<<3;
262 if (x
.ui8p
[4] != 0xBE) i
|= 1<<4;
263 if (x
.ui8p
[5] != 0x55) i
|= 1<<5;
264 if (x
.ui8p
[6] != 0xAA) i
|= 1<<6;
265 if (x
.ui8p
[7] != 0xEF) i
|= 1<<7;
267 n_alert("sdope %p: corrupt upper canary: 0x%02X, size %u: %s, line %u",
268 ux
, i
, c
->usr_size
, c
->file
, c
->line
);
272 #endif /* HAVE_MEMORY_DEBUG */
274 #ifndef HAVE_MEMORY_DEBUG
276 smalloc(size_t s SMALLOC_DEBUG_ARGS
)
283 if ((rv
= malloc(s
)) == NULL
)
284 n_panic(_("no memory"));
290 srealloc(void *v
, size_t s SMALLOC_DEBUG_ARGS
)
299 else if ((rv
= realloc(v
, s
)) == NULL
)
300 n_panic(_("no memory"));
306 scalloc(size_t nmemb
, size_t size SMALLOC_DEBUG_ARGS
)
313 if ((rv
= calloc(nmemb
, size
)) == NULL
)
314 n_panic(_("no memory"));
319 #else /* !HAVE_MEMORY_DEBUG */
321 (smalloc
)(size_t s SMALLOC_DEBUG_ARGS
)
328 if (s
> UI32_MAX
- sizeof(struct a_mem_chunk
) - _HOPE_SIZE
)
329 n_panic("smalloc(): allocation too large: %s, line %d",
330 mdbg_file
, mdbg_line
);
331 s
+= sizeof(struct a_mem_chunk
) + _HOPE_SIZE
;
333 if ((p
.p_p
= (malloc
)(s
)) == NULL
)
334 n_panic(_("no memory"));
335 p
.p_c
->mc_prev
= NULL
;
336 if ((p
.p_c
->mc_next
= a_mem_list
) != NULL
)
337 a_mem_list
->mc_prev
= p
.p_c
;
338 p
.p_c
->mc_file
= mdbg_file
;
339 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
340 p
.p_c
->mc_isfree
= FAL0
;
341 p
.p_c
->mc_size
= (ui32_t
)s
;
343 a_mem_list
= p
.p_c
++;
348 a_mem_amax
= MAX(a_mem_amax
, a_mem_acur
);
351 a_mem_mmax
= MAX(a_mem_mmax
, a_mem_mcur
);
357 (srealloc
)(void *v
, size_t s SMALLOC_DEBUG_ARGS
)
363 if ((p
.p_p
= v
) == NULL
) {
364 p
.p_p
= (smalloc
)(s
, mdbg_file
, mdbg_line
);
370 if (p
.p_c
->mc_isfree
) {
371 n_err("srealloc(): region freed! At %s, line %d\n"
372 "\tLast seen: %s, line %" PRIu16
"\n",
373 mdbg_file
, mdbg_line
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
377 if (p
.p_c
== a_mem_list
)
378 a_mem_list
= p
.p_c
->mc_next
;
380 p
.p_c
->mc_prev
->mc_next
= p
.p_c
->mc_next
;
381 if (p
.p_c
->mc_next
!= NULL
)
382 p
.p_c
->mc_next
->mc_prev
= p
.p_c
->mc_prev
;
385 a_mem_mcur
-= p
.p_c
->mc_size
;
389 if (s
> UI32_MAX
- sizeof(struct a_mem_chunk
) - _HOPE_SIZE
)
390 n_panic("srealloc(): allocation too large: %s, line %d",
391 mdbg_file
, mdbg_line
);
392 s
+= sizeof(struct a_mem_chunk
) + _HOPE_SIZE
;
394 if ((p
.p_p
= (realloc
)(p
.p_c
, s
)) == NULL
)
395 n_panic(_("no memory"));
396 p
.p_c
->mc_prev
= NULL
;
397 if ((p
.p_c
->mc_next
= a_mem_list
) != NULL
)
398 a_mem_list
->mc_prev
= p
.p_c
;
399 p
.p_c
->mc_file
= mdbg_file
;
400 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
401 p
.p_c
->mc_isfree
= FAL0
;
402 p
.p_c
->mc_size
= (ui32_t
)s
;
403 a_mem_list
= p
.p_c
++;
408 a_mem_amax
= MAX(a_mem_amax
, a_mem_acur
);
411 a_mem_mmax
= MAX(a_mem_mmax
, a_mem_mcur
);
418 (scalloc
)(size_t nmemb
, size_t size SMALLOC_DEBUG_ARGS
)
427 if (size
> UI32_MAX
- sizeof(struct a_mem_chunk
) - _HOPE_SIZE
)
428 n_panic("scalloc(): allocation size too large: %s, line %d",
429 mdbg_file
, mdbg_line
);
430 if ((UI32_MAX
- sizeof(struct a_mem_chunk
) - _HOPE_SIZE
) / nmemb
< size
)
431 n_panic("scalloc(): allocation count too large: %s, line %d",
432 mdbg_file
, mdbg_line
);
435 size
+= sizeof(struct a_mem_chunk
) + _HOPE_SIZE
;
437 if ((p
.p_p
= (malloc
)(size
)) == NULL
)
438 n_panic(_("no memory"));
439 memset(p
.p_p
, 0, size
);
440 p
.p_c
->mc_prev
= NULL
;
441 if ((p
.p_c
->mc_next
= a_mem_list
) != NULL
)
442 a_mem_list
->mc_prev
= p
.p_c
;
443 p
.p_c
->mc_file
= mdbg_file
;
444 p
.p_c
->mc_line
= (ui16_t
)mdbg_line
;
445 p
.p_c
->mc_isfree
= FAL0
;
446 p
.p_c
->mc_size
= (ui32_t
)size
;
447 a_mem_list
= p
.p_c
++;
452 a_mem_amax
= MAX(a_mem_amax
, a_mem_acur
);
455 a_mem_mmax
= MAX(a_mem_mmax
, a_mem_mcur
);
461 (sfree
)(void *v SMALLOC_DEBUG_ARGS
)
467 if ((p
.p_p
= v
) == NULL
) {
468 n_err("sfree(NULL) from %s, line %d\n", mdbg_file
, mdbg_line
);
474 if (p
.p_c
->mc_isfree
) {
475 n_err("sfree(): double-free avoided at %s, line %d\n"
476 "\tLast seen: %s, line %" PRIu16
"\n",
477 mdbg_file
, mdbg_line
, p
.p_c
->mc_file
, p
.p_c
->mc_line
);
481 if (p
.p_c
== a_mem_list
)
482 a_mem_list
= p
.p_c
->mc_next
;
484 p
.p_c
->mc_prev
->mc_next
= p
.p_c
->mc_next
;
485 if (p
.p_c
->mc_next
!= NULL
)
486 p
.p_c
->mc_next
->mc_prev
= p
.p_c
->mc_prev
;
487 p
.p_c
->mc_isfree
= TRU1
;
488 /* Trash contents (also see [21c05f8]) */
489 memset(v
, 0377, p
.p_c
->mc_size
- sizeof(struct a_mem_chunk
) - _HOPE_SIZE
);
492 a_mem_mcur
-= p
.p_c
->mc_size
;
494 if (options
& (OPT_DEBUG
| OPT_MEMDEBUG
)) {
495 p
.p_c
->mc_next
= a_mem_free
;
512 for (p
.p_c
= a_mem_free
; p
.p_c
!= NULL
;) {
516 p
.p_c
= p
.p_c
->mc_next
;
521 if (options
& (OPT_DEBUG
| OPT_MEMDEBUG
))
522 n_err("memreset: freed %" PRIuZ
" chunks/%" PRIuZ
" bytes\n", c
, s
);
529 /* For _HOPE_GET() */
530 char const * const mdbg_file
= "memtrace()";
531 int const mdbg_line
= -1;
533 union a_mem_ptr p
, xp
;
539 if ((fp
= Ftmp(NULL
, "memtr", OF_RDWR
| OF_UNLINK
| OF_REGISTER
)) == NULL
) {
540 n_perr("tmpfile", 0);
544 fprintf(fp
, "Memory statistics:\n"
545 " Count cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n"
546 " Bytes cur/peek/all: %7" PRIuZ
"/%7" PRIuZ
"/%10" PRIuZ
"\n\n",
547 a_mem_acur
, a_mem_amax
, a_mem_aall
, a_mem_mcur
, a_mem_mmax
, a_mem_mall
);
549 fprintf(fp
, "Currently allocated memory chunks:\n");
550 for (lines
= 0, p
.p_c
= a_mem_list
; p
.p_c
!= NULL
;
551 ++lines
, p
.p_c
= p
.p_c
->mc_next
) {
554 _HOPE_GET_TRACE(xp
, isbad
);
555 fprintf(fp
, "%s%p (%5" PRIuZ
" bytes): %s, line %" PRIu16
"\n",
556 (isbad
? "! CANARY ERROR: " : ""), xp
.p_p
,
557 (size_t)(p
.p_c
->mc_size
- sizeof(struct a_mem_chunk
)), p
.p_c
->mc_file
,
561 if (options
& (OPT_DEBUG
| OPT_MEMDEBUG
)) {
562 fprintf(fp
, "sfree()d memory chunks awaiting free():\n");
563 for (p
.p_c
= a_mem_free
; p
.p_c
!= NULL
; ++lines
, p
.p_c
= p
.p_c
->mc_next
) {
566 _HOPE_GET_TRACE(xp
, isbad
);
567 fprintf(fp
, "%s%p (%5" PRIuZ
" bytes): %s, line %" PRIu16
"\n",
568 (isbad
? "! CANARY ERROR: " : ""), xp
.p_p
,
569 (size_t)(p
.p_c
->mc_size
- sizeof(struct a_mem_chunk
)),
570 p
.p_c
->mc_file
, p
.p_c
->mc_line
);
574 page_or_print(fp
, lines
);
583 n__memcheck(char const *mdbg_file
, int mdbg_line
)
585 union a_mem_ptr p
, xp
;
586 bool_t anybad
= FAL0
, isbad
;
590 for (lines
= 0, p
.p_c
= a_mem_list
; p
.p_c
!= NULL
;
591 ++lines
, p
.p_c
= p
.p_c
->mc_next
) {
594 _HOPE_GET_TRACE(xp
, isbad
);
598 "! CANARY ERROR: %p (%5" PRIuZ
" bytes): %s, line %" PRIu16
"\n",
599 xp
.p_p
, (size_t)(p
.p_c
->mc_size
- sizeof(struct a_mem_chunk
)),
600 p
.p_c
->mc_file
, p
.p_c
->mc_line
);
604 if (options
& (OPT_DEBUG
| OPT_MEMDEBUG
)) {
605 for (p
.p_c
= a_mem_free
; p
.p_c
!= NULL
; ++lines
, p
.p_c
= p
.p_c
->mc_next
) {
608 _HOPE_GET_TRACE(xp
, isbad
);
612 "! CANARY ERROR: %p (%5" PRIuZ
" bytes): %s, line %" PRIu16
"\n",
613 xp
.p_p
, (size_t)(p
.p_c
->mc_size
- sizeof(struct a_mem_chunk
)),
614 p
.p_c
->mc_file
, p
.p_c
->mc_line
);
621 #endif /* HAVE_MEMORY_DEBUG */
624 (salloc
)(size_t size SALLOC_DEBUG_ARGS
)
626 #ifdef HAVE_MEMORY_DEBUG
627 size_t orig_size
= size
;
629 union {struct buffer
*b
; struct hugebuf
*hb
; char *cp
;} u
;
638 #ifdef HAVE_MEMORY_DEBUG
641 _all_cycnt_max
= MAX(_all_cycnt_max
, _all_cycnt
);
644 _all_cysize_max
= MAX(_all_cysize_max
, _all_cysize
);
645 _all_min
= (_all_max
== 0) ? size
: MIN(_all_min
, size
);
646 _all_max
= MAX(_all_max
, size
);
647 _all_wast
+= size
- orig_size
;
651 if (size
>= SDYN_SIZE
- 1)
652 n_alert("salloc() of %" PRIuZ
" bytes from %s, line %d",
653 size
, mdbg_file
, mdbg_line
);
656 /* Huge allocations are special */
657 if (UNLIKELY(size
>= SDYN_SIZE
- 1))
660 /* Search for a buffer with enough free space to serve request */
661 if ((u
.b
= _buf_server
) != NULL
)
664 for (u
.b
= _buf_head
; u
.b
!= NULL
; u
.b
= u
.b
->b
._next
) {
668 if (u
.b
== _buf_server
) {
669 if (u
.b
== _buf_head
&& (u
.b
= _buf_head
->b
._next
) != NULL
) {
680 if (PTRCMP(y
, <=, z
)) {
681 /* Alignment is the one thing, the other is what is usually allocated,
682 * and here about 40 bytes seems to be a good cut to avoid non-usable
683 * non-NULL casters. However, because of _salloc_bcheck(), we may not
684 * set ._caster to NULL because then it would check all chunks up to
685 * ._max, which surely doesn't work; speed is no issue with DEBUG */
686 u
.b
->b
._caster
= NDBG( PTRCMP(y
+ 42 + 16, >=, z
) ? NULL
: ) y
;
692 /* Need a new buffer */
693 if (_buf_head
== NULL
) {
694 struct b_bltin
*b
= &_builtin_buf
;
695 b
->b_base
._max
= b
->b_buf
+ SBLTIN_SIZE
- 1;
696 _buf_head
= (struct buffer
*)b
;
699 #ifdef HAVE_MEMORY_DEBUG
702 _all_cybufcnt_max
= MAX(_all_cybufcnt_max
, _all_cybufcnt
);
704 u
.b
= smalloc(sizeof(struct b_dyn
));
705 u
.b
->b
._max
= u
.b
->b_buf
+ SDYN_SIZE
- 1;
707 if (_buf_list
!= NULL
)
708 _buf_list
->b
._next
= u
.b
;
709 _buf_server
= _buf_list
= u
.b
;
711 u
.b
->b
._caster
= (u
.b
->b
._bot
= u
.b
->b_buf
) + size
;
712 u
.b
->b
._relax
= NULL
;
716 /* Encapsulate user chunk in debug canaries */
717 #ifdef HAVE_MEMORY_DEBUG
724 xc
->file
= mdbg_file
;
725 xc
->line
= mdbg_line
;
726 xc
->usr_size
= (ui16_t
)orig_size
;
727 xc
->full_size
= (ui16_t
)size
;
729 xl
.ui8p
[0]=0xDE; xl
.ui8p
[1]=0xAA; xl
.ui8p
[2]=0x55; xl
.ui8p
[3]=0xAD;
730 xl
.ui8p
[4]=0xBE; xl
.ui8p
[5]=0x55; xl
.ui8p
[6]=0xAA; xl
.ui8p
[7]=0xEF;
734 xu
.ui8p
[0]=0xDE; xu
.ui8p
[1]=0xAA; xu
.ui8p
[2]=0x55; xu
.ui8p
[3]=0xAD;
735 xu
.ui8p
[4]=0xBE; xu
.ui8p
[5]=0x55; xu
.ui8p
[6]=0xAA; xu
.ui8p
[7]=0xEF;
742 u
.hb
= smalloc(sizeof(*u
.hb
) - VFIELD_SIZEOF(struct hugebuf
, hb_buf
) +
744 u
.hb
->hb_next
= _huge_list
;
751 (csalloc
)(size_t nmemb
, size_t size SALLOC_DEBUG_ARGS
)
757 vp
= (salloc
)(size SALLOC_DEBUG_ARGSCALL
);
764 sreset(bool_t only_if_relaxed
)
766 struct buffer
*blh
, *bh
;
769 #ifdef HAVE_MEMORY_DEBUG
773 /* Reset relaxation after any jump is a MUST */
774 if (_relax_recur_no
> 0)
778 if (only_if_relaxed
&& _relax_recur_no
== 0)
781 #ifdef HAVE_MEMORY_DEBUG
782 _all_cycnt
= _all_cysize
= 0;
783 _all_cybufcnt
= (_buf_head
!= NULL
&& _buf_head
->b
._next
!= NULL
);
787 /* Reset relaxation after jump */
788 if (_relax_recur_no
> 0) {
790 assert(_relax_recur_no
== 0);
794 if ((bh
= _buf_head
) != NULL
) {
796 struct buffer
*x
= bh
;
798 #ifdef HAVE_MEMORY_DEBUG
802 /* Give away all buffers that are not covered by sreset().
803 * _buf_head is builtin and thus cannot be free()d */
804 if (blh
!= NULL
&& x
->b
._bot
== x
->b_buf
) {
809 x
->b
._caster
= x
->b
._bot
;
811 DBG( memset(x
->b
._caster
, 0377,
812 PTR2SIZE(x
->b
._max
- x
->b
._caster
)); )
814 } while (bh
!= NULL
);
816 _buf_server
= _buf_head
;
821 while (_huge_list
!= NULL
) {
822 struct hugebuf
*hb
= _huge_list
;
823 _huge_list
= hb
->hb_next
;
838 if (_relax_recur_no
++ == 0) {
839 for (b
= _buf_head
; b
!= NULL
; b
= b
->b
._next
)
840 b
->b
._relax
= b
->b
._caster
;
841 _buf_relax
= _buf_server
;
852 assert(_relax_recur_no
> 0);
854 if (--_relax_recur_no
== 0) {
855 for (b
= _buf_head
; b
!= NULL
; b
= b
->b
._next
) {
856 #ifdef HAVE_MEMORY_DEBUG
859 b
->b
._caster
= (b
->b
._relax
!= NULL
) ? b
->b
._relax
: b
->b
._bot
;
867 n_err("srelax_rele(): recursion >0!\n");
875 /* The purpose of relaxation is only that it is possible to reset the
876 * casters, *not* to give back memory to the system. We are presumably in
877 * an iteration over all messages of a mailbox, and it'd be quite
878 * counterproductive to give the system allocator a chance to waste time */
882 assert(_relax_recur_no
> 0);
884 if (_relax_recur_no
== 1) {
885 for (b
= _buf_head
; b
!= NULL
; b
= b
->b
._next
) {
886 #ifdef HAVE_MEMORY_DEBUG
889 b
->b
._caster
= (b
->b
._relax
!= NULL
) ? b
->b
._relax
: b
->b
._bot
;
890 DBG( memset(b
->b
._caster
, 0377, PTR2SIZE(b
->b
._max
- b
->b
._caster
)); )
902 for (b
= _buf_head
; b
!= NULL
; b
= b
->b
._next
)
903 b
->b
._bot
= b
->b
._caster
;
907 #ifdef HAVE_MEMORY_DEBUG
915 excess
= (_all_cybufcnt_max
* SDYN_SIZE
) + SBLTIN_SIZE
;
916 excess
= (excess
>= _all_cysize_max
) ? 0 : _all_cysize_max
- excess
;
918 printf("String usage statistics (cycle means one sreset() cycle):\n"
919 " Buffer allocs ever/max a time : %" PRIuZ
"/%" PRIuZ
"\n"
920 " .. size of the builtin/dynamic: %" PRIuZ
"/%" PRIuZ
"\n"
921 " Overall alloc count/bytes : %" PRIuZ
"/%" PRIuZ
"\n"
922 " .. bytes min/max/align wastage: %" PRIuZ
"/%" PRIuZ
"/%" PRIuZ
"\n"
923 " sreset() cycles : %" PRIuZ
" (%" PRIuZ
" performed)\n"
924 " Cycle max.: alloc count/bytes : %" PRIuZ
"/%" PRIuZ
"+%" PRIuZ
"\n",
925 _all_bufcnt
, _all_cybufcnt_max
,
926 SBLTIN_SIZE
, SDYN_SIZE
,
928 _all_min
, _all_max
, _all_wast
,
929 _all_resetreqs
, _all_resets
,
930 _all_cycnt_max
, _all_cysize_max
, excess
);
934 #endif /* HAVE_MEMORY_DEBUG */
936 #ifdef HAVE_MEMORY_DEBUG
939 # undef _HOPE_GET_TRACE